diff options
Diffstat (limited to 'compiler/rustc_const_eval')
37 files changed, 576 insertions, 1978 deletions
diff --git a/compiler/rustc_const_eval/messages.ftl b/compiler/rustc_const_eval/messages.ftl index 20f0f27517f..2dbeb7d5e0c 100644 --- a/compiler/rustc_const_eval/messages.ftl +++ b/compiler/rustc_const_eval/messages.ftl @@ -246,11 +246,10 @@ const_eval_offset_from_unsigned_overflow = const_eval_operator_non_const = cannot call non-const operator in {const_eval_const_context}s -const_eval_overflow = - overflow executing `{$name}` - +const_eval_overflow_arith = + arithmetic overflow in `{$intrinsic}` const_eval_overflow_shift = - overflowing shift by {$val} in `{$name}` + overflowing shift by {$shift_amount} in `{$intrinsic}` const_eval_panic = the evaluated program panicked at '{$msg}', {$file}:{$line}:{$col} diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/check_consts/check.rs index c8c54143f61..5fbf5b41109 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/check.rs +++ b/compiler/rustc_const_eval/src/check_consts/check.rs @@ -20,6 +20,8 @@ use rustc_type_ir::visit::{TypeSuperVisitable, TypeVisitor}; use std::mem; use std::ops::Deref; +use tracing::{debug, instrument, trace}; + use super::ops::{self, NonConstOp, Status}; use super::qualifs::{self, HasMutInterior, NeedsDrop, NeedsNonConstDrop}; use super::resolver::FlowSensitiveAnalysis; diff --git a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs b/compiler/rustc_const_eval/src/check_consts/mod.rs index 308b90cd470..308b90cd470 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs +++ b/compiler/rustc_const_eval/src/check_consts/mod.rs diff --git a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs b/compiler/rustc_const_eval/src/check_consts/ops.rs index 8775685e8c7..90b622cae65 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs +++ b/compiler/rustc_const_eval/src/check_consts/ops.rs @@ -19,6 +19,7 @@ use rustc_session::parse::feature_err; use rustc_span::symbol::sym; use rustc_span::{BytePos, Pos, Span, Symbol}; use rustc_trait_selection::traits::SelectionContext; +use tracing::debug; use super::ConstCx; use crate::errors; @@ -308,7 +309,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> { } if let ConstContext::Static(_) = ccx.const_kind() { - err.note("consider wrapping this expression in `Lazy::new(|| ...)` from the `once_cell` crate: https://crates.io/crates/once_cell"); + err.note("consider wrapping this expression in `std::sync::LazyLock::new(|| ...)`"); } err diff --git a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs b/compiler/rustc_const_eval/src/check_consts/post_drop_elaboration.rs index 5cd13783c23..f0763f1e490 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs +++ b/compiler/rustc_const_eval/src/check_consts/post_drop_elaboration.rs @@ -2,6 +2,7 @@ use rustc_middle::mir::visit::Visitor; use rustc_middle::mir::{self, BasicBlock, Location}; use rustc_middle::ty::{Ty, TyCtxt}; use rustc_span::{symbol::sym, Span}; +use tracing::trace; use super::check::Qualifs; use super::ops::{self, NonConstOp}; diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/check_consts/qualifs.rs index 7e8a208659b..5949444e599 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs +++ b/compiler/rustc_const_eval/src/check_consts/qualifs.rs @@ -13,6 +13,7 @@ use rustc_middle::ty::{self, AdtDef, GenericArgsRef, Ty}; use rustc_trait_selection::traits::{ ImplSource, Obligation, ObligationCause, ObligationCtxt, SelectionContext, }; +use tracing::{instrument, trace}; use super::ConstCx; diff --git a/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs b/compiler/rustc_const_eval/src/check_consts/resolver.rs index 011341472b4..011341472b4 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs +++ b/compiler/rustc_const_eval/src/check_consts/resolver.rs diff --git a/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs b/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs index 94c9f056b30..9a98677a844 100644 --- a/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs +++ b/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs @@ -44,8 +44,8 @@ impl HasStaticRootDefId for DummyMachine { } } -impl<'mir, 'tcx: 'mir> interpret::Machine<'mir, 'tcx> for DummyMachine { - interpret::compile_time_machine!(<'mir, 'tcx>); +impl<'tcx> interpret::Machine<'tcx> for DummyMachine { + interpret::compile_time_machine!(<'tcx>); type MemoryKind = !; const PANIC_ON_ALLOC_FAIL: bool = true; @@ -53,11 +53,11 @@ impl<'mir, 'tcx: 'mir> interpret::Machine<'mir, 'tcx> for DummyMachine { const ALL_CONSTS_ARE_PRECHECKED: bool = false; #[inline(always)] - fn enforce_alignment(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool { + fn enforce_alignment(_ecx: &InterpCx<'tcx, Self>) -> bool { false // no reason to enforce alignment } - fn enforce_validity(_ecx: &InterpCx<'mir, 'tcx, Self>, _layout: TyAndLayout<'tcx>) -> bool { + fn enforce_validity(_ecx: &InterpCx<'tcx, Self>, _layout: TyAndLayout<'tcx>) -> bool { false } @@ -83,26 +83,26 @@ impl<'mir, 'tcx: 'mir> interpret::Machine<'mir, 'tcx> for DummyMachine { } fn find_mir_or_eval_fn( - _ecx: &mut InterpCx<'mir, 'tcx, Self>, + _ecx: &mut InterpCx<'tcx, Self>, _instance: ty::Instance<'tcx>, _abi: rustc_target::spec::abi::Abi, _args: &[interpret::FnArg<'tcx, Self::Provenance>], _destination: &interpret::MPlaceTy<'tcx, Self::Provenance>, _target: Option<BasicBlock>, _unwind: UnwindAction, - ) -> interpret::InterpResult<'tcx, Option<(&'mir Body<'tcx>, ty::Instance<'tcx>)>> { + ) -> interpret::InterpResult<'tcx, Option<(&'tcx Body<'tcx>, ty::Instance<'tcx>)>> { unimplemented!() } fn panic_nounwind( - _ecx: &mut InterpCx<'mir, 'tcx, Self>, + _ecx: &mut InterpCx<'tcx, Self>, _msg: &str, ) -> interpret::InterpResult<'tcx> { unimplemented!() } fn call_intrinsic( - _ecx: &mut InterpCx<'mir, 'tcx, Self>, + _ecx: &mut InterpCx<'tcx, Self>, _instance: ty::Instance<'tcx>, _args: &[interpret::OpTy<'tcx, Self::Provenance>], _destination: &interpret::MPlaceTy<'tcx, Self::Provenance>, @@ -113,7 +113,7 @@ impl<'mir, 'tcx: 'mir> interpret::Machine<'mir, 'tcx> for DummyMachine { } fn assert_panic( - _ecx: &mut InterpCx<'mir, 'tcx, Self>, + _ecx: &mut InterpCx<'tcx, Self>, _msg: &rustc_middle::mir::AssertMessage<'tcx>, _unwind: UnwindAction, ) -> interpret::InterpResult<'tcx> { @@ -121,11 +121,11 @@ impl<'mir, 'tcx: 'mir> interpret::Machine<'mir, 'tcx> for DummyMachine { } fn binary_ptr_op( - ecx: &InterpCx<'mir, 'tcx, Self>, + ecx: &InterpCx<'tcx, Self>, bin_op: BinOp, left: &interpret::ImmTy<'tcx, Self::Provenance>, right: &interpret::ImmTy<'tcx, Self::Provenance>, - ) -> interpret::InterpResult<'tcx, (ImmTy<'tcx, Self::Provenance>, bool)> { + ) -> interpret::InterpResult<'tcx, ImmTy<'tcx, Self::Provenance>> { use rustc_middle::mir::BinOp::*; Ok(match bin_op { Eq | Ne | Lt | Le | Gt | Ge => { @@ -154,7 +154,7 @@ impl<'mir, 'tcx: 'mir> interpret::Machine<'mir, 'tcx> for DummyMachine { Ge => left >= right, _ => bug!(), }; - (ImmTy::from_bool(res, *ecx.tcx), false) + ImmTy::from_bool(res, *ecx.tcx) } // Some more operations are possible with atomics. @@ -168,32 +168,30 @@ impl<'mir, 'tcx: 'mir> interpret::Machine<'mir, 'tcx> for DummyMachine { } fn expose_ptr( - _ecx: &mut InterpCx<'mir, 'tcx, Self>, + _ecx: &mut InterpCx<'tcx, Self>, _ptr: interpret::Pointer<Self::Provenance>, ) -> interpret::InterpResult<'tcx> { unimplemented!() } - fn init_frame_extra( - _ecx: &mut InterpCx<'mir, 'tcx, Self>, - _frame: interpret::Frame<'mir, 'tcx, Self::Provenance>, - ) -> interpret::InterpResult< - 'tcx, - interpret::Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>, - > { + fn init_frame( + _ecx: &mut InterpCx<'tcx, Self>, + _frame: interpret::Frame<'tcx, Self::Provenance>, + ) -> interpret::InterpResult<'tcx, interpret::Frame<'tcx, Self::Provenance, Self::FrameExtra>> + { unimplemented!() } fn stack<'a>( - _ecx: &'a InterpCx<'mir, 'tcx, Self>, - ) -> &'a [interpret::Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>] { + _ecx: &'a InterpCx<'tcx, Self>, + ) -> &'a [interpret::Frame<'tcx, Self::Provenance, Self::FrameExtra>] { // Return an empty stack instead of panicking, as `cur_span` uses it to evaluate constants. &[] } fn stack_mut<'a>( - _ecx: &'a mut InterpCx<'mir, 'tcx, Self>, - ) -> &'a mut Vec<interpret::Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>> { + _ecx: &'a mut InterpCx<'tcx, Self>, + ) -> &'a mut Vec<interpret::Frame<'tcx, Self::Provenance, Self::FrameExtra>> { unimplemented!() } } diff --git a/compiler/rustc_const_eval/src/const_eval/error.rs b/compiler/rustc_const_eval/src/const_eval/error.rs index 08c9609eacf..923b9ddf9af 100644 --- a/compiler/rustc_const_eval/src/const_eval/error.rs +++ b/compiler/rustc_const_eval/src/const_eval/error.rs @@ -2,7 +2,7 @@ use std::mem; use rustc_errors::{DiagArgName, DiagArgValue, DiagMessage, Diagnostic, IntoDiagArg}; use rustc_hir::CRATE_HIR_ID; -use rustc_middle::mir::interpret::Provenance; +use rustc_middle::mir::interpret::{Provenance, ReportedErrorInfo}; use rustc_middle::mir::AssertKind; use rustc_middle::query::TyCtxtAt; use rustc_middle::ty::TyCtxt; @@ -58,13 +58,10 @@ impl<'tcx> Into<InterpErrorInfo<'tcx>> for ConstEvalErrKind { } } -pub fn get_span_and_frames<'tcx, 'mir>( +pub fn get_span_and_frames<'tcx>( tcx: TyCtxtAt<'tcx>, - stack: &[Frame<'mir, 'tcx, impl Provenance, impl Sized>], -) -> (Span, Vec<errors::FrameNote>) -where - 'tcx: 'mir, -{ + stack: &[Frame<'tcx, impl Provenance, impl Sized>], +) -> (Span, Vec<errors::FrameNote>) { let mut stacktrace = Frame::generate_stacktrace_from_stack(stack); // Filter out `requires_caller_location` frames. stacktrace.retain(|frame| !frame.instance.def.requires_caller_location(*tcx)); @@ -140,7 +137,7 @@ where } err_inval!(AlreadyReported(guar)) => ErrorHandled::Reported(guar, span), err_inval!(Layout(LayoutError::ReferencesError(guar))) => { - ErrorHandled::Reported(guar.into(), span) + ErrorHandled::Reported(ReportedErrorInfo::tainted_by_errors(guar), span) } // Report remaining errors. _ => { @@ -161,9 +158,9 @@ where /// Emit a lint from a const-eval situation. // Even if this is unused, please don't remove it -- chances are we will need to emit a lint during const-eval again in the future! -pub(super) fn lint<'tcx, 'mir, L>( +pub(super) fn lint<'tcx, L>( tcx: TyCtxtAt<'tcx>, - machine: &CompileTimeInterpreter<'mir, 'tcx>, + machine: &CompileTimeInterpreter<'tcx>, lint: &'static rustc_session::lint::Lint, decorator: impl FnOnce(Vec<errors::FrameNote>) -> L, ) where diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index 6a9a21bbd8e..36f468d3308 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -1,6 +1,7 @@ use std::sync::atomic::Ordering::Relaxed; use either::{Left, Right}; +use tracing::{debug, instrument, trace}; use rustc_hir::def::DefKind; use rustc_middle::bug; @@ -30,10 +31,10 @@ use crate::CTRL_C_RECEIVED; // Returns a pointer to where the result lives #[instrument(level = "trace", skip(ecx, body))] -fn eval_body_using_ecx<'mir, 'tcx, R: InterpretationResult<'tcx>>( - ecx: &mut CompileTimeEvalContext<'mir, 'tcx>, +fn eval_body_using_ecx<'tcx, R: InterpretationResult<'tcx>>( + ecx: &mut CompileTimeEvalContext<'tcx>, cid: GlobalId<'tcx>, - body: &'mir mir::Body<'tcx>, + body: &'tcx mir::Body<'tcx>, ) -> InterpResult<'tcx, R> { trace!(?ecx.param_env); let tcx = *ecx.tcx; @@ -133,12 +134,12 @@ fn eval_body_using_ecx<'mir, 'tcx, R: InterpretationResult<'tcx>>( /// that inform us about the generic bounds of the constant. E.g., using an associated constant /// of a function's generic parameter will require knowledge about the bounds on the generic /// parameter. These bounds are passed to `mk_eval_cx` via the `ParamEnv` argument. -pub(crate) fn mk_eval_cx_to_read_const_val<'mir, 'tcx>( +pub(crate) fn mk_eval_cx_to_read_const_val<'tcx>( tcx: TyCtxt<'tcx>, root_span: Span, param_env: ty::ParamEnv<'tcx>, can_access_mut_global: CanAccessMutGlobal, -) -> CompileTimeEvalContext<'mir, 'tcx> { +) -> CompileTimeEvalContext<'tcx> { debug!("mk_eval_cx: {:?}", param_env); InterpCx::new( tcx, @@ -150,12 +151,12 @@ pub(crate) fn mk_eval_cx_to_read_const_val<'mir, 'tcx>( /// Create an interpreter context to inspect the given `ConstValue`. /// Returns both the context and an `OpTy` that represents the constant. -pub fn mk_eval_cx_for_const_val<'mir, 'tcx>( +pub fn mk_eval_cx_for_const_val<'tcx>( tcx: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>, val: mir::ConstValue<'tcx>, ty: Ty<'tcx>, -) -> Option<(CompileTimeEvalContext<'mir, 'tcx>, OpTy<'tcx>)> { +) -> Option<(CompileTimeEvalContext<'tcx>, OpTy<'tcx>)> { let ecx = mk_eval_cx_to_read_const_val(tcx.tcx, tcx.span, param_env, CanAccessMutGlobal::No); let op = ecx.const_val_to_op(val, ty, None).ok()?; Some((ecx, op)) @@ -169,7 +170,7 @@ pub fn mk_eval_cx_for_const_val<'mir, 'tcx>( /// encounter an `Indirect` they cannot handle. #[instrument(skip(ecx), level = "debug")] pub(super) fn op_to_const<'tcx>( - ecx: &CompileTimeEvalContext<'_, 'tcx>, + ecx: &CompileTimeEvalContext<'tcx>, op: &OpTy<'tcx>, for_diagnostics: bool, ) -> ConstValue<'tcx> { @@ -325,16 +326,16 @@ pub trait InterpretationResult<'tcx> { /// This function takes the place where the result of the evaluation is stored /// and prepares it for returning it in the appropriate format needed by the specific /// evaluation query. - fn make_result<'mir>( + fn make_result( mplace: MPlaceTy<'tcx>, - ecx: &mut InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>, + ecx: &mut InterpCx<'tcx, CompileTimeInterpreter<'tcx>>, ) -> Self; } impl<'tcx> InterpretationResult<'tcx> for ConstAlloc<'tcx> { - fn make_result<'mir>( + fn make_result( mplace: MPlaceTy<'tcx>, - _ecx: &mut InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>, + _ecx: &mut InterpCx<'tcx, CompileTimeInterpreter<'tcx>>, ) -> Self { ConstAlloc { alloc_id: mplace.ptr().provenance.unwrap().alloc_id(), ty: mplace.layout.ty } } @@ -415,8 +416,8 @@ fn eval_in_interpreter<'tcx, R: InterpretationResult<'tcx>>( } #[inline(always)] -fn const_validate_mplace<'mir, 'tcx>( - ecx: &InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>, +fn const_validate_mplace<'tcx>( + ecx: &InterpCx<'tcx, CompileTimeInterpreter<'tcx>>, mplace: &MPlaceTy<'tcx>, cid: GlobalId<'tcx>, ) -> Result<(), ErrorHandled> { @@ -445,8 +446,8 @@ fn const_validate_mplace<'mir, 'tcx>( } #[inline(always)] -fn report_validation_error<'mir, 'tcx>( - ecx: &InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>, +fn report_validation_error<'tcx>( + ecx: &InterpCx<'tcx, CompileTimeInterpreter<'tcx>>, error: InterpErrorInfo<'tcx>, alloc_id: AllocId, ) -> ErrorHandled { diff --git a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs index 8c66888d100..3c11d67e748 100644 --- a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs @@ -38,7 +38,6 @@ fn constness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Constness { match node { hir::Node::Ctor(_) | hir::Node::AnonConst(_) - | hir::Node::ConstBlock(_) | hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Const(..), .. }) => { hir::Constness::Const } @@ -57,6 +56,7 @@ fn constness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Constness { if is_const { hir::Constness::Const } else { hir::Constness::NotConst } } hir::Node::Expr(e) if let hir::ExprKind::Closure(c) = e.kind => c.constness, + hir::Node::Expr(e) if let hir::ExprKind::ConstBlock(_) = e.kind => hir::Constness::Const, _ => { if let Some(fn_kind) = node.fn_kind() { if fn_kind.constness() == hir::Constness::Const { diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs index 836e548ae2b..79a161d3f03 100644 --- a/compiler/rustc_const_eval/src/const_eval/machine.rs +++ b/compiler/rustc_const_eval/src/const_eval/machine.rs @@ -6,7 +6,6 @@ use std::ops::ControlFlow; use rustc_ast::Mutability; use rustc_data_structures::fx::FxIndexMap; use rustc_data_structures::fx::IndexEntry; -use rustc_hir::def::DefKind; use rustc_hir::def_id::DefId; use rustc_hir::def_id::LocalDefId; use rustc_hir::LangItem; @@ -21,13 +20,14 @@ use rustc_span::symbol::{sym, Symbol}; use rustc_span::Span; use rustc_target::abi::{Align, Size}; use rustc_target::spec::abi::Abi as CallAbi; +use tracing::debug; use crate::errors::{LongRunning, LongRunningWarn}; use crate::fluent_generated as fluent; use crate::interpret::{ - self, compile_time_machine, err_ub, throw_exhaust, throw_inval, throw_ub_custom, + self, compile_time_machine, err_ub, throw_exhaust, throw_inval, throw_ub_custom, throw_unsup, throw_unsup_format, AllocId, AllocRange, ConstAllocation, CtfeProvenance, FnArg, FnVal, Frame, - ImmTy, InterpCx, InterpResult, MPlaceTy, OpTy, Pointer, PointerArithmetic, Scalar, + GlobalAlloc, ImmTy, InterpCx, InterpResult, MPlaceTy, OpTy, Pointer, PointerArithmetic, Scalar, }; use super::error::*; @@ -44,7 +44,7 @@ const TINY_LINT_TERMINATOR_LIMIT: usize = 20; const PROGRESS_INDICATOR_START: usize = 4_000_000; /// Extra machine state for CTFE, and the Machine instance -pub struct CompileTimeInterpreter<'mir, 'tcx> { +pub struct CompileTimeInterpreter<'tcx> { /// The number of terminators that have been evaluated. /// /// This is used to produce lints informing the user that the compiler is not stuck. @@ -52,7 +52,7 @@ pub struct CompileTimeInterpreter<'mir, 'tcx> { pub(super) num_evaluated_steps: usize, /// The virtual call stack. - pub(super) stack: Vec<Frame<'mir, 'tcx>>, + pub(super) stack: Vec<Frame<'tcx>>, /// Pattern matching on consts with references would be unsound if those references /// could point to anything mutable. Therefore, when evaluating consts and when constructing valtrees, @@ -89,7 +89,7 @@ impl From<bool> for CanAccessMutGlobal { } } -impl<'mir, 'tcx> CompileTimeInterpreter<'mir, 'tcx> { +impl<'tcx> CompileTimeInterpreter<'tcx> { pub(crate) fn new( can_access_mut_global: CanAccessMutGlobal, check_alignment: CheckAlignment, @@ -163,8 +163,7 @@ impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxIndexMap<K, V> { } } -pub(crate) type CompileTimeEvalContext<'mir, 'tcx> = - InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>; +pub(crate) type CompileTimeEvalContext<'tcx> = InterpCx<'tcx, CompileTimeInterpreter<'tcx>>; #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub enum MemoryKind { @@ -196,7 +195,7 @@ impl interpret::MayLeak for ! { } } -impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> { +impl<'tcx> CompileTimeEvalContext<'tcx> { fn location_triple_for_span(&self, span: Span) -> (Symbol, u32, u32) { let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span); let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo()); @@ -370,53 +369,42 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> { } } -impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, 'tcx> { - compile_time_machine!(<'mir, 'tcx>); +impl<'tcx> interpret::Machine<'tcx> for CompileTimeInterpreter<'tcx> { + compile_time_machine!(<'tcx>); type MemoryKind = MemoryKind; const PANIC_ON_ALLOC_FAIL: bool = false; // will be raised as a proper error #[inline(always)] - fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool { + fn enforce_alignment(ecx: &InterpCx<'tcx, Self>) -> bool { matches!(ecx.machine.check_alignment, CheckAlignment::Error) } #[inline(always)] - fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool { + fn enforce_validity(ecx: &InterpCx<'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool { ecx.tcx.sess.opts.unstable_opts.extra_const_ub_checks || layout.abi.is_uninhabited() } fn load_mir( - ecx: &InterpCx<'mir, 'tcx, Self>, + ecx: &InterpCx<'tcx, Self>, instance: ty::InstanceDef<'tcx>, ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> { match instance { - ty::InstanceDef::Item(def) => { - if ecx.tcx.is_ctfe_mir_available(def) { - Ok(ecx.tcx.mir_for_ctfe(def)) - } else if ecx.tcx.def_kind(def) == DefKind::AssocConst { - ecx.tcx.dcx().bug("This is likely a const item that is missing from its impl"); - } else { - // `find_mir_or_eval_fn` checks that this is a const fn before even calling us, - // so this should be unreachable. - let path = ecx.tcx.def_path_str(def); - bug!("trying to call extern function `{path}` at compile-time"); - } - } + ty::InstanceDef::Item(def) => Ok(ecx.tcx.mir_for_ctfe(def)), _ => Ok(ecx.tcx.instance_mir(instance)), } } fn find_mir_or_eval_fn( - ecx: &mut InterpCx<'mir, 'tcx, Self>, + ecx: &mut InterpCx<'tcx, Self>, orig_instance: ty::Instance<'tcx>, _abi: CallAbi, args: &[FnArg<'tcx>], dest: &MPlaceTy<'tcx>, ret: Option<mir::BasicBlock>, _unwind: mir::UnwindAction, // unwinding is not supported in consts - ) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>> { + ) -> InterpResult<'tcx, Option<(&'tcx mir::Body<'tcx>, ty::Instance<'tcx>)>> { debug!("find_mir_or_eval_fn: {:?}", orig_instance); // Replace some functions. @@ -447,7 +435,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, Ok(Some((ecx.load_mir(instance.def, None)?, orig_instance))) } - fn panic_nounwind(ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: &str) -> InterpResult<'tcx> { + fn panic_nounwind(ecx: &mut InterpCx<'tcx, Self>, msg: &str) -> InterpResult<'tcx> { let msg = Symbol::intern(msg); let span = ecx.find_closest_untracked_caller_location(); let (file, line, col) = ecx.location_triple_for_span(span); @@ -455,7 +443,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, } fn call_intrinsic( - ecx: &mut InterpCx<'mir, 'tcx, Self>, + ecx: &mut InterpCx<'tcx, Self>, instance: ty::Instance<'tcx>, args: &[OpTy<'tcx>], dest: &MPlaceTy<'tcx, Self::Provenance>, @@ -554,7 +542,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, } fn assert_panic( - ecx: &mut InterpCx<'mir, 'tcx, Self>, + ecx: &mut InterpCx<'tcx, Self>, msg: &AssertMessage<'tcx>, _unwind: mir::UnwindAction, ) -> InterpResult<'tcx> { @@ -585,15 +573,15 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, } fn binary_ptr_op( - _ecx: &InterpCx<'mir, 'tcx, Self>, + _ecx: &InterpCx<'tcx, Self>, _bin_op: mir::BinOp, _left: &ImmTy<'tcx>, _right: &ImmTy<'tcx>, - ) -> InterpResult<'tcx, (ImmTy<'tcx>, bool)> { + ) -> InterpResult<'tcx, ImmTy<'tcx>> { throw_unsup_format!("pointer arithmetic or comparison is not supported at compile-time"); } - fn increment_const_eval_counter(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> { + fn increment_const_eval_counter(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> { // The step limit has already been hit in a previous call to `increment_const_eval_counter`. if let Some(new_steps) = ecx.machine.num_evaluated_steps.checked_add(1) { @@ -649,16 +637,16 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, } #[inline(always)] - fn expose_ptr(_ecx: &mut InterpCx<'mir, 'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx> { + fn expose_ptr(_ecx: &mut InterpCx<'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx> { // This is only reachable with -Zunleash-the-miri-inside-of-you. throw_unsup_format!("exposing pointers is not possible at compile-time") } #[inline(always)] - fn init_frame_extra( - ecx: &mut InterpCx<'mir, 'tcx, Self>, - frame: Frame<'mir, 'tcx>, - ) -> InterpResult<'tcx, Frame<'mir, 'tcx>> { + fn init_frame( + ecx: &mut InterpCx<'tcx, Self>, + frame: Frame<'tcx>, + ) -> InterpResult<'tcx, Frame<'tcx>> { // Enforce stack size limit. Add 1 because this is run before the new frame is pushed. if !ecx.recursion_limit.value_within_limit(ecx.stack().len() + 1) { throw_exhaust!(StackFrameLimitReached) @@ -669,15 +657,15 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, #[inline(always)] fn stack<'a>( - ecx: &'a InterpCx<'mir, 'tcx, Self>, - ) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>] { + ecx: &'a InterpCx<'tcx, Self>, + ) -> &'a [Frame<'tcx, Self::Provenance, Self::FrameExtra>] { &ecx.machine.stack } #[inline(always)] fn stack_mut<'a>( - ecx: &'a mut InterpCx<'mir, 'tcx, Self>, - ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>> { + ecx: &'a mut InterpCx<'tcx, Self>, + ) -> &'a mut Vec<Frame<'tcx, Self::Provenance, Self::FrameExtra>> { &mut ecx.machine.stack } @@ -714,7 +702,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, } fn retag_ptr_value( - ecx: &mut InterpCx<'mir, 'tcx, Self>, + ecx: &mut InterpCx<'tcx, Self>, _kind: mir::RetagKind, val: &ImmTy<'tcx, CtfeProvenance>, ) -> InterpResult<'tcx, ImmTy<'tcx, CtfeProvenance>> { @@ -755,15 +743,22 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, Ok(()) } - fn before_alloc_read( - ecx: &InterpCx<'mir, 'tcx, Self>, - alloc_id: AllocId, - ) -> InterpResult<'tcx> { + fn before_alloc_read(ecx: &InterpCx<'tcx, Self>, alloc_id: AllocId) -> InterpResult<'tcx> { + // Check if this is the currently evaluated static. if Some(alloc_id) == ecx.machine.static_root_ids.map(|(id, _)| id) { - Err(ConstEvalErrKind::RecursiveStatic.into()) - } else { - Ok(()) + return Err(ConstEvalErrKind::RecursiveStatic.into()); } + // If this is another static, make sure we fire off the query to detect cycles. + // But only do that when checks for static recursion are enabled. + if ecx.machine.static_root_ids.is_some() { + if let Some(GlobalAlloc::Static(def_id)) = ecx.tcx.try_get_global_alloc(alloc_id) { + if ecx.tcx.is_foreign_item(def_id) { + throw_unsup!(ExternStatic(def_id)); + } + ecx.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?; + } + } + Ok(()) } } diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs index a5c8c0bb82a..4ae4816e33a 100644 --- a/compiler/rustc_const_eval/src/const_eval/mod.rs +++ b/compiler/rustc_const_eval/src/const_eval/mod.rs @@ -6,6 +6,7 @@ use rustc_middle::mir::interpret::InterpErrorInfo; use rustc_middle::query::{Key, TyCtxtAt}; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_target::abi::VariantIdx; +use tracing::instrument; use crate::interpret::{format_interp_error, InterpCx}; diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs index fbf2ca5ab0a..5312f1f946f 100644 --- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs +++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs @@ -6,6 +6,7 @@ use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout}; use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt}; use rustc_span::DUMMY_SP; use rustc_target::abi::{Abi, VariantIdx}; +use tracing::{debug, instrument, trace}; use super::eval_queries::{mk_eval_cx_to_read_const_val, op_to_const}; use super::machine::CompileTimeEvalContext; @@ -20,7 +21,7 @@ use crate::interpret::{ #[instrument(skip(ecx), level = "debug")] fn branches<'tcx>( - ecx: &CompileTimeEvalContext<'tcx, 'tcx>, + ecx: &CompileTimeEvalContext<'tcx>, place: &MPlaceTy<'tcx>, n: usize, variant: Option<VariantIdx>, @@ -58,7 +59,7 @@ fn branches<'tcx>( #[instrument(skip(ecx), level = "debug")] fn slice_branches<'tcx>( - ecx: &CompileTimeEvalContext<'tcx, 'tcx>, + ecx: &CompileTimeEvalContext<'tcx>, place: &MPlaceTy<'tcx>, num_nodes: &mut usize, ) -> ValTreeCreationResult<'tcx> { @@ -76,7 +77,7 @@ fn slice_branches<'tcx>( #[instrument(skip(ecx), level = "debug")] fn const_to_valtree_inner<'tcx>( - ecx: &CompileTimeEvalContext<'tcx, 'tcx>, + ecx: &CompileTimeEvalContext<'tcx>, place: &MPlaceTy<'tcx>, num_nodes: &mut usize, ) -> ValTreeCreationResult<'tcx> { @@ -218,7 +219,7 @@ fn reconstruct_place_meta<'tcx>( #[instrument(skip(ecx), level = "debug", ret)] fn create_valtree_place<'tcx>( - ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>, + ecx: &mut CompileTimeEvalContext<'tcx>, layout: TyAndLayout<'tcx>, valtree: ty::ValTree<'tcx>, ) -> MPlaceTy<'tcx> { @@ -363,7 +364,7 @@ pub fn valtree_to_const_value<'tcx>( /// Put a valtree into memory and return a reference to that. fn valtree_to_ref<'tcx>( - ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>, + ecx: &mut CompileTimeEvalContext<'tcx>, valtree: ty::ValTree<'tcx>, pointee_ty: Ty<'tcx>, ) -> Immediate { @@ -379,7 +380,7 @@ fn valtree_to_ref<'tcx>( #[instrument(skip(ecx), level = "debug")] fn valtree_into_mplace<'tcx>( - ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>, + ecx: &mut CompileTimeEvalContext<'tcx>, place: &MPlaceTy<'tcx>, valtree: ty::ValTree<'tcx>, ) { @@ -456,6 +457,6 @@ fn valtree_into_mplace<'tcx>( } } -fn dump_place<'tcx>(ecx: &CompileTimeEvalContext<'tcx, 'tcx>, place: &MPlaceTy<'tcx>) { +fn dump_place<'tcx>(ecx: &CompileTimeEvalContext<'tcx>, place: &MPlaceTy<'tcx>) { trace!("{:?}", ecx.dump_place(&PlaceTy::from(place.clone()))); } diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs index 90d4f1168e4..e5ea4c3442e 100644 --- a/compiler/rustc_const_eval/src/errors.rs +++ b/compiler/rustc_const_eval/src/errors.rs @@ -1,5 +1,6 @@ use std::borrow::Cow; +use either::Either; use rustc_errors::{ codes::*, Diag, DiagArgValue, DiagCtxt, DiagMessage, Diagnostic, EmissionGuarantee, Level, }; @@ -481,6 +482,8 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> { DivisionOverflow => const_eval_division_overflow, RemainderOverflow => const_eval_remainder_overflow, PointerArithOverflow => const_eval_pointer_arithmetic_overflow, + ArithOverflow { .. } => const_eval_overflow_arith, + ShiftOverflow { .. } => const_eval_overflow_shift, InvalidMeta(InvalidMetaKind::SliceTooBig) => const_eval_invalid_meta_slice, InvalidMeta(InvalidMetaKind::TooBig) => const_eval_invalid_meta, UnterminatedCString(_) => const_eval_unterminated_c_string, @@ -539,6 +542,19 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> { | UninhabitedEnumVariantWritten(_) | UninhabitedEnumVariantRead(_) => {} + ArithOverflow { intrinsic } => { + diag.arg("intrinsic", intrinsic); + } + ShiftOverflow { intrinsic, shift_amount } => { + diag.arg("intrinsic", intrinsic); + diag.arg( + "shift_amount", + match shift_amount { + Either::Left(v) => v.to_string(), + Either::Right(v) => v.to_string(), + }, + ); + } BoundsCheckFailed { len, index } => { diag.arg("len", len); diag.arg("index", index); diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs index 799e12f9ac9..19414c72c6a 100644 --- a/compiler/rustc_const_eval/src/interpret/cast.rs +++ b/compiler/rustc_const_eval/src/interpret/cast.rs @@ -10,6 +10,7 @@ use rustc_middle::ty::{self, FloatTy, Ty}; use rustc_middle::{bug, span_bug}; use rustc_target::abi::Integer; use rustc_type_ir::TyKind::*; +use tracing::trace; use super::{ err_inval, throw_ub, throw_ub_custom, util::ensure_monomorphic_enough, FnVal, ImmTy, Immediate, @@ -18,7 +19,7 @@ use super::{ use crate::fluent_generated as fluent; -impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { +impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { pub fn cast( &mut self, src: &OpTy<'tcx, M::Provenance>, @@ -206,7 +207,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { assert!(cast_to.ty.is_unsafe_ptr()); // Handle casting any ptr to raw ptr (might be a fat ptr). if cast_to.size == src.layout.size { - // Thin or fat pointer that just hast the ptr kind of target type changed. + // Thin or fat pointer that just has the ptr kind of target type changed. return Ok(ImmTy::from_immediate(**src, cast_to)); } else { // Casting the metadata away from a fat ptr. @@ -323,13 +324,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { use rustc_type_ir::TyKind::*; fn adjust_nan< - 'mir, - 'tcx: 'mir, - M: Machine<'mir, 'tcx>, + 'tcx, + M: Machine<'tcx>, F1: rustc_apfloat::Float + FloatConvert<F2>, F2: rustc_apfloat::Float, >( - ecx: &InterpCx<'mir, 'tcx, M>, + ecx: &InterpCx<'tcx, M>, f1: F1, f2: F2, ) -> F2 { diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs index 8ddc741de23..224d17dbf52 100644 --- a/compiler/rustc_const_eval/src/interpret/discriminant.rs +++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs @@ -6,12 +6,13 @@ use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt}; use rustc_middle::ty::{self, ScalarInt, Ty}; use rustc_target::abi::{self, TagEncoding}; use rustc_target::abi::{VariantIdx, Variants}; +use tracing::{instrument, trace}; use super::{ err_ub, throw_ub, ImmTy, InterpCx, InterpResult, Machine, Readable, Scalar, Writeable, }; -impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { +impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Writes the discriminant of the given variant. /// /// If the variant is uninhabited, this is UB. @@ -172,7 +173,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let tag_val = ImmTy::from_uint(tag_bits, tag_layout); let niche_start_val = ImmTy::from_uint(niche_start, tag_layout); let variant_index_relative_val = - self.wrapping_binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?; + self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?; let variant_index_relative = variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size); // Check if this is in the range that indicates an actual discriminant. @@ -292,11 +293,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let variant_index_relative_val = ImmTy::from_uint(variant_index_relative, tag_layout); let tag = self - .wrapping_binary_op( - mir::BinOp::Add, - &variant_index_relative_val, - &niche_start_val, - )? + .binary_op(mir::BinOp::Add, &variant_index_relative_val, &niche_start_val)? .to_scalar() .assert_int(); Ok(Some((tag, tag_field))) diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs index 344bb7cd98b..7c2100fcbe3 100644 --- a/compiler/rustc_const_eval/src/interpret/eval_context.rs +++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs @@ -2,6 +2,7 @@ use std::cell::Cell; use std::{fmt, mem}; use either::{Either, Left, Right}; +use tracing::{debug, info, info_span, instrument, trace}; use hir::CRATE_HIR_ID; use rustc_errors::DiagCtxt; @@ -33,7 +34,7 @@ use crate::errors; use crate::util; use crate::{fluent_generated as fluent, ReportErrorExt}; -pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> { +pub struct InterpCx<'tcx, M: Machine<'tcx>> { /// Stores the `Machine` instance. /// /// Note: the stack is provided by the machine. @@ -48,7 +49,7 @@ pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> { pub(crate) param_env: ty::ParamEnv<'tcx>, /// The virtual memory system. - pub memory: Memory<'mir, 'tcx, M>, + pub memory: Memory<'tcx, M>, /// The recursion limit (cached from `tcx.recursion_limit(())`) pub recursion_limit: Limit, @@ -89,12 +90,12 @@ impl Drop for SpanGuard { } /// A stack frame. -pub struct Frame<'mir, 'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> { +pub struct Frame<'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> { //////////////////////////////////////////////////////////////////////////////// // Function and callsite information //////////////////////////////////////////////////////////////////////////////// /// The MIR for the function called on this frame. - pub body: &'mir mir::Body<'tcx>, + pub body: &'tcx mir::Body<'tcx>, /// The def_id and args of the current function. pub instance: ty::Instance<'tcx>, @@ -231,8 +232,8 @@ impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> { } } -impl<'mir, 'tcx, Prov: Provenance> Frame<'mir, 'tcx, Prov> { - pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'mir, 'tcx, Prov, Extra> { +impl<'tcx, Prov: Provenance> Frame<'tcx, Prov> { + pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'tcx, Prov, Extra> { Frame { body: self.body, instance: self.instance, @@ -246,7 +247,7 @@ impl<'mir, 'tcx, Prov: Provenance> Frame<'mir, 'tcx, Prov> { } } -impl<'mir, 'tcx, Prov: Provenance, Extra> Frame<'mir, 'tcx, Prov, Extra> { +impl<'tcx, Prov: Provenance, Extra> Frame<'tcx, Prov, Extra> { /// Get the current location within the Frame. /// /// If this is `Left`, we are not currently executing any particular statement in @@ -344,16 +345,16 @@ impl<'tcx> FrameInfo<'tcx> { } } -impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for InterpCx<'mir, 'tcx, M> { +impl<'tcx, M: Machine<'tcx>> HasDataLayout for InterpCx<'tcx, M> { #[inline] fn data_layout(&self) -> &TargetDataLayout { &self.tcx.data_layout } } -impl<'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for InterpCx<'mir, 'tcx, M> +impl<'tcx, M> layout::HasTyCtxt<'tcx> for InterpCx<'tcx, M> where - M: Machine<'mir, 'tcx>, + M: Machine<'tcx>, { #[inline] fn tcx(&self) -> TyCtxt<'tcx> { @@ -361,16 +362,16 @@ where } } -impl<'mir, 'tcx, M> layout::HasParamEnv<'tcx> for InterpCx<'mir, 'tcx, M> +impl<'tcx, M> layout::HasParamEnv<'tcx> for InterpCx<'tcx, M> where - M: Machine<'mir, 'tcx>, + M: Machine<'tcx>, { fn param_env(&self) -> ty::ParamEnv<'tcx> { self.param_env } } -impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> LayoutOfHelpers<'tcx> for InterpCx<'mir, 'tcx, M> { +impl<'tcx, M: Machine<'tcx>> LayoutOfHelpers<'tcx> for InterpCx<'tcx, M> { type LayoutOfResult = InterpResult<'tcx, TyAndLayout<'tcx>>; #[inline] @@ -390,7 +391,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> LayoutOfHelpers<'tcx> for InterpC } } -impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> FnAbiOfHelpers<'tcx> for InterpCx<'mir, 'tcx, M> { +impl<'tcx, M: Machine<'tcx>> FnAbiOfHelpers<'tcx> for InterpCx<'tcx, M> { type FnAbiOfResult = InterpResult<'tcx, &'tcx FnAbi<'tcx, Ty<'tcx>>>; fn handle_fn_abi_err( @@ -483,7 +484,7 @@ pub fn format_interp_error<'tcx>(dcx: &DiagCtxt, e: InterpErrorInfo<'tcx>) -> St s } -impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { +impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { pub fn new( tcx: TyCtxt<'tcx>, root_span: Span, @@ -516,14 +517,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } #[inline(always)] - pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>] { + pub(crate) fn stack(&self) -> &[Frame<'tcx, M::Provenance, M::FrameExtra>] { M::stack(self) } #[inline(always)] - pub(crate) fn stack_mut( - &mut self, - ) -> &mut Vec<Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>> { + pub(crate) fn stack_mut(&mut self) -> &mut Vec<Frame<'tcx, M::Provenance, M::FrameExtra>> { M::stack_mut(self) } @@ -535,17 +534,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } #[inline(always)] - pub fn frame(&self) -> &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra> { + pub fn frame(&self) -> &Frame<'tcx, M::Provenance, M::FrameExtra> { self.stack().last().expect("no call frames exist") } #[inline(always)] - pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::Provenance, M::FrameExtra> { + pub fn frame_mut(&mut self) -> &mut Frame<'tcx, M::Provenance, M::FrameExtra> { self.stack_mut().last_mut().expect("no call frames exist") } #[inline(always)] - pub fn body(&self) -> &'mir mir::Body<'tcx> { + pub fn body(&self) -> &'tcx mir::Body<'tcx> { self.frame().body } @@ -601,7 +600,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { T: TypeFoldable<TyCtxt<'tcx>>, >( &self, - frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>, + frame: &Frame<'tcx, M::Provenance, M::FrameExtra>, value: T, ) -> Result<T, ErrorHandled> { frame @@ -679,7 +678,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { #[inline(always)] pub(super) fn layout_of_local( &self, - frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>, + frame: &Frame<'tcx, M::Provenance, M::FrameExtra>, local: mir::Local, layout: Option<TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, TyAndLayout<'tcx>> { @@ -802,7 +801,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub fn push_stack_frame( &mut self, instance: ty::Instance<'tcx>, - body: &'mir mir::Body<'tcx>, + body: &'tcx mir::Body<'tcx>, return_place: &MPlaceTy<'tcx, M::Provenance>, return_to_block: StackPopCleanup, ) -> InterpResult<'tcx> { @@ -820,7 +819,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { tracing_span: SpanGuard::new(), extra: (), }; - let frame = M::init_frame_extra(self, pre_frame)?; + let frame = M::init_frame(self, pre_frame)?; self.stack_mut().push(frame); // Make sure all the constants required by this frame evaluate successfully (post-monomorphization check). @@ -1181,9 +1180,20 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { M::eval_mir_constant(self, *val, span, layout, |ecx, val, span, layout| { let const_val = val.eval(*ecx.tcx, ecx.param_env, span).map_err(|err| { - if M::ALL_CONSTS_ARE_PRECHECKED && !matches!(err, ErrorHandled::TooGeneric(..)) { - // Looks like the const is not captued by `required_consts`, that's bad. - bug!("interpret const eval failure of {val:?} which is not in required_consts"); + if M::ALL_CONSTS_ARE_PRECHECKED { + match err { + ErrorHandled::TooGeneric(..) => {}, + ErrorHandled::Reported(reported, span) => { + if reported.is_tainted_by_errors() { + // const-eval will return "tainted" errors if e.g. the layout cannot + // be computed as the type references non-existing names. + // See <https://github.com/rust-lang/rust/issues/124348>. + } else { + // Looks like the const is not captued by `required_consts`, that's bad. + span_bug!(span, "interpret const eval failure of {val:?} which is not in required_consts"); + } + } + } } err.emit_note(*ecx.tcx); err @@ -1193,10 +1203,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } #[must_use] - pub fn dump_place( - &self, - place: &PlaceTy<'tcx, M::Provenance>, - ) -> PlacePrinter<'_, 'mir, 'tcx, M> { + pub fn dump_place(&self, place: &PlaceTy<'tcx, M::Provenance>) -> PlacePrinter<'_, 'tcx, M> { PlacePrinter { ecx: self, place: *place.place() } } @@ -1208,14 +1215,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { #[doc(hidden)] /// Helper struct for the `dump_place` function. -pub struct PlacePrinter<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> { - ecx: &'a InterpCx<'mir, 'tcx, M>, +pub struct PlacePrinter<'a, 'tcx, M: Machine<'tcx>> { + ecx: &'a InterpCx<'tcx, M>, place: Place<M::Provenance>, } -impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug - for PlacePrinter<'a, 'mir, 'tcx, M> -{ +impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debug for PlacePrinter<'a, 'tcx, M> { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.place { Place::Local { local, offset, locals_addr } => { diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs index 3565b4fb516..8d0b267e1a9 100644 --- a/compiler/rustc_const_eval/src/interpret/intern.rs +++ b/compiler/rustc_const_eval/src/interpret/intern.rs @@ -23,13 +23,13 @@ use rustc_middle::query::TyCtxtAt; use rustc_middle::ty::layout::TyAndLayout; use rustc_span::def_id::LocalDefId; use rustc_span::sym; +use tracing::{instrument, trace}; use super::{err_ub, AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy}; use crate::const_eval; use crate::errors::NestedStaticInThreadLocal; -pub trait CompileTimeMachine<'mir, 'tcx: 'mir, T> = Machine< - 'mir, +pub trait CompileTimeMachine<'tcx, T> = Machine< 'tcx, MemoryKind = T, Provenance = CtfeProvenance, @@ -45,7 +45,7 @@ pub trait HasStaticRootDefId { fn static_def_id(&self) -> Option<LocalDefId>; } -impl HasStaticRootDefId for const_eval::CompileTimeInterpreter<'_, '_> { +impl HasStaticRootDefId for const_eval::CompileTimeInterpreter<'_> { fn static_def_id(&self) -> Option<LocalDefId> { Some(self.static_root_ids?.1) } @@ -58,8 +58,8 @@ impl HasStaticRootDefId for const_eval::CompileTimeInterpreter<'_, '_> { /// already mutable (as a sanity check). /// /// Returns an iterator over all relocations referred to by this allocation. -fn intern_shallow<'rt, 'mir, 'tcx, T, M: CompileTimeMachine<'mir, 'tcx, T>>( - ecx: &'rt mut InterpCx<'mir, 'tcx, M>, +fn intern_shallow<'rt, 'tcx, T, M: CompileTimeMachine<'tcx, T>>( + ecx: &'rt mut InterpCx<'tcx, M>, alloc_id: AllocId, mutability: Mutability, ) -> Result<impl Iterator<Item = CtfeProvenance> + 'tcx, ()> { @@ -145,12 +145,8 @@ pub enum InternResult { /// /// For `InternKind::Static` the root allocation will not be interned, but must be handled by the caller. #[instrument(level = "debug", skip(ecx))] -pub fn intern_const_alloc_recursive< - 'mir, - 'tcx: 'mir, - M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>, ->( - ecx: &mut InterpCx<'mir, 'tcx, M>, +pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx, const_eval::MemoryKind>>( + ecx: &mut InterpCx<'tcx, M>, intern_kind: InternKind, ret: &MPlaceTy<'tcx>, ) -> Result<(), InternResult> { @@ -289,13 +285,8 @@ pub fn intern_const_alloc_recursive< /// Intern `ret`. This function assumes that `ret` references no other allocation. #[instrument(level = "debug", skip(ecx))] -pub fn intern_const_alloc_for_constprop< - 'mir, - 'tcx: 'mir, - T, - M: CompileTimeMachine<'mir, 'tcx, T>, ->( - ecx: &mut InterpCx<'mir, 'tcx, M>, +pub fn intern_const_alloc_for_constprop<'tcx, T, M: CompileTimeMachine<'tcx, T>>( + ecx: &mut InterpCx<'tcx, M>, alloc_id: AllocId, ) -> InterpResult<'tcx, ()> { if ecx.tcx.try_get_global_alloc(alloc_id).is_some() { @@ -314,19 +305,14 @@ pub fn intern_const_alloc_for_constprop< Ok(()) } -impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>> - InterpCx<'mir, 'tcx, M> -{ +impl<'tcx, M: super::intern::CompileTimeMachine<'tcx, !>> InterpCx<'tcx, M> { /// A helper function that allocates memory for the layout given and gives you access to mutate /// it. Once your own mutation code is done, the backing `Allocation` is removed from the /// current `Memory` and interned as read-only into the global memory. pub fn intern_with_temp_alloc( &mut self, layout: TyAndLayout<'tcx>, - f: impl FnOnce( - &mut InterpCx<'mir, 'tcx, M>, - &PlaceTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx, ()>, + f: impl FnOnce(&mut InterpCx<'tcx, M>, &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx, ()>, ) -> InterpResult<'tcx, AllocId> { // `allocate` picks a fresh AllocId that we will associate with its data below. let dest = self.allocate(layout, MemoryKind::Stack)?; diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index dce4d56f7e0..18b76443cd9 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -14,6 +14,7 @@ use rustc_middle::{ }; use rustc_span::symbol::{sym, Symbol}; use rustc_target::abi::Size; +use tracing::trace; use super::{ err_inval, err_ub_custom, err_unsup_format, memory::MemoryKind, throw_inval, throw_ub_custom, @@ -97,7 +98,7 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>( }) } -impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { +impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Returns `true` if emulation happened. /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own /// intrinsic handling. @@ -255,6 +256,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { name = intrinsic_name, ); } + // This will always return 0. (a, b) } (Err(_), _) | (_, Err(_)) => { @@ -285,9 +287,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let (val, overflowed) = { let a_offset = ImmTy::from_uint(a_offset, usize_layout); let b_offset = ImmTy::from_uint(b_offset, usize_layout); - self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)? + self.binary_op(BinOp::SubWithOverflow, &a_offset, &b_offset)? + .to_scalar_pair() }; - if overflowed { + if overflowed.to_bool()? { // a < b if intrinsic_name == sym::ptr_offset_from_unsigned { throw_ub_custom!( @@ -299,7 +302,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // The signed form of the intrinsic allows this. If we interpret the // difference as isize, we'll get the proper signed difference. If that // seems *positive*, they were more than isize::MAX apart. - let dist = val.to_scalar().to_target_isize(self)?; + let dist = val.to_target_isize(self)?; if dist >= 0 { throw_ub_custom!( fluent::const_eval_offset_from_underflow, @@ -309,7 +312,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { dist } else { // b >= a - let dist = val.to_scalar().to_target_isize(self)?; + let dist = val.to_target_isize(self)?; // If converting to isize produced a *negative* result, we had an overflow // because they were more than isize::MAX apart. if dist < 0 { @@ -515,9 +518,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Performs an exact division, resulting in undefined behavior where // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`. // First, check x % y != 0 (or if that computation overflows). - let (res, overflow) = self.overflowing_binary_op(BinOp::Rem, a, b)?; - assert!(!overflow); // All overflow is UB, so this should never return on overflow. - if res.to_scalar().assert_bits(a.layout.size) != 0 { + let rem = self.binary_op(BinOp::Rem, a, b)?; + if rem.to_scalar().assert_bits(a.layout.size) != 0 { throw_ub_custom!( fluent::const_eval_exact_div_has_remainder, a = format!("{a}"), @@ -525,7 +527,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ) } // `Rem` says this is all right, so we can let `Div` do its job. - self.binop_ignore_overflow(BinOp::Div, a, b, &dest.clone().into()) + let res = self.binary_op(BinOp::Div, a, b)?; + self.write_immediate(*res, dest) } pub fn saturating_arith( @@ -538,8 +541,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { assert!(matches!(l.layout.ty.kind(), ty::Int(..) | ty::Uint(..))); assert!(matches!(mir_op, BinOp::Add | BinOp::Sub)); - let (val, overflowed) = self.overflowing_binary_op(mir_op, l, r)?; - Ok(if overflowed { + let (val, overflowed) = + self.binary_op(mir_op.wrapping_to_overflowing().unwrap(), l, r)?.to_scalar_pair(); + Ok(if overflowed.to_bool()? { let size = l.layout.size; let num_bits = size.bits(); if l.layout.abi.is_signed() { @@ -570,7 +574,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } } } else { - val.to_scalar() + val }) } @@ -601,9 +605,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// Copy `count*size_of::<T>()` many bytes from `*src` to `*dst`. pub(crate) fn copy_intrinsic( &mut self, - src: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, - dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, - count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, + src: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, + dst: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, + count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, nonoverlapping: bool, ) -> InterpResult<'tcx> { let count = self.read_target_usize(count)?; @@ -630,8 +634,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// Does a *typed* swap of `*left` and `*right`. fn typed_swap_intrinsic( &mut self, - left: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, - right: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, + left: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, + right: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, ) -> InterpResult<'tcx> { let left = self.deref_pointer(left)?; let right = self.deref_pointer(right)?; @@ -647,9 +651,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub(crate) fn write_bytes_intrinsic( &mut self, - dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, - byte: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, - count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, + dst: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, + byte: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, + count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, ) -> InterpResult<'tcx> { let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap())?; @@ -669,9 +673,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub(crate) fn compare_bytes_intrinsic( &mut self, - left: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, - right: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, - byte_count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, + left: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, + right: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, + byte_count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, ) -> InterpResult<'tcx, Scalar<M::Provenance>> { let left = self.read_pointer(left)?; let right = self.read_pointer(right)?; @@ -687,14 +691,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub(crate) fn raw_eq_intrinsic( &mut self, - lhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, - rhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, + lhs: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, + rhs: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, ) -> InterpResult<'tcx, Scalar<M::Provenance>> { let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap())?; assert!(layout.is_sized()); - let get_bytes = |this: &InterpCx<'mir, 'tcx, M>, - op: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, + let get_bytes = |this: &InterpCx<'tcx, M>, + op: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, size| -> InterpResult<'tcx, &[u8]> { let ptr = this.read_pointer(op)?; diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs index 2eaebc1924b..4ae0aca5a0c 100644 --- a/compiler/rustc_const_eval/src/interpret/machine.rs +++ b/compiler/rustc_const_eval/src/interpret/machine.rs @@ -94,7 +94,7 @@ pub trait AllocMap<K: Hash + Eq, V> { /// Methods of this trait signifies a point where CTFE evaluation would fail /// and some use case dependent behaviour can instead be applied. -pub trait Machine<'mir, 'tcx: 'mir>: Sized { +pub trait Machine<'tcx>: Sized { /// Additional memory kinds a machine wishes to distinguish from the builtin ones type MemoryKind: Debug + std::fmt::Display + MayLeak + Eq + 'static; @@ -145,12 +145,12 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { const ALL_CONSTS_ARE_PRECHECKED: bool = true; /// Whether memory accesses should be alignment-checked. - fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool; + fn enforce_alignment(ecx: &InterpCx<'tcx, Self>) -> bool; /// Gives the machine a chance to detect more misalignment than the built-in checks would catch. #[inline(always)] fn alignment_check( - _ecx: &InterpCx<'mir, 'tcx, Self>, + _ecx: &InterpCx<'tcx, Self>, _alloc_id: AllocId, _alloc_align: Align, _alloc_kind: AllocKind, @@ -161,22 +161,22 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { } /// Whether to enforce the validity invariant for a specific layout. - fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool; + fn enforce_validity(ecx: &InterpCx<'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool; /// Whether function calls should be [ABI](CallAbi)-checked. - fn enforce_abi(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool { + fn enforce_abi(_ecx: &InterpCx<'tcx, Self>) -> bool { true } /// Whether Assert(OverflowNeg) and Assert(Overflow) MIR terminators should actually /// check for overflow. - fn ignore_optional_overflow_checks(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool; + fn ignore_optional_overflow_checks(_ecx: &InterpCx<'tcx, Self>) -> bool; /// Entry point for obtaining the MIR of anything that should get evaluated. /// So not just functions and shims, but also const/static initializers, anonymous /// constants, ... fn load_mir( - ecx: &InterpCx<'mir, 'tcx, Self>, + ecx: &InterpCx<'tcx, Self>, instance: ty::InstanceDef<'tcx>, ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> { Ok(ecx.tcx.instance_mir(instance)) @@ -193,19 +193,19 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// Passing `dest`and `ret` in the same `Option` proved very annoying when only one of them /// was used. fn find_mir_or_eval_fn( - ecx: &mut InterpCx<'mir, 'tcx, Self>, + ecx: &mut InterpCx<'tcx, Self>, instance: ty::Instance<'tcx>, abi: CallAbi, args: &[FnArg<'tcx, Self::Provenance>], destination: &MPlaceTy<'tcx, Self::Provenance>, target: Option<mir::BasicBlock>, unwind: mir::UnwindAction, - ) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>>; + ) -> InterpResult<'tcx, Option<(&'tcx mir::Body<'tcx>, ty::Instance<'tcx>)>>; /// Execute `fn_val`. It is the hook's responsibility to advance the instruction /// pointer as appropriate. fn call_extra_fn( - ecx: &mut InterpCx<'mir, 'tcx, Self>, + ecx: &mut InterpCx<'tcx, Self>, fn_val: Self::ExtraFnVal, abi: CallAbi, args: &[FnArg<'tcx, Self::Provenance>], @@ -220,7 +220,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// Returns `None` if the intrinsic was fully handled. /// Otherwise, returns an `Instance` of the function that implements the intrinsic. fn call_intrinsic( - ecx: &mut InterpCx<'mir, 'tcx, Self>, + ecx: &mut InterpCx<'tcx, Self>, instance: ty::Instance<'tcx>, args: &[OpTy<'tcx, Self::Provenance>], destination: &MPlaceTy<'tcx, Self::Provenance>, @@ -230,17 +230,17 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// Called to evaluate `Assert` MIR terminators that trigger a panic. fn assert_panic( - ecx: &mut InterpCx<'mir, 'tcx, Self>, + ecx: &mut InterpCx<'tcx, Self>, msg: &mir::AssertMessage<'tcx>, unwind: mir::UnwindAction, ) -> InterpResult<'tcx>; /// Called to trigger a non-unwinding panic. - fn panic_nounwind(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: &str) -> InterpResult<'tcx>; + fn panic_nounwind(_ecx: &mut InterpCx<'tcx, Self>, msg: &str) -> InterpResult<'tcx>; /// Called when unwinding reached a state where execution should be terminated. fn unwind_terminate( - ecx: &mut InterpCx<'mir, 'tcx, Self>, + ecx: &mut InterpCx<'tcx, Self>, reason: mir::UnwindTerminateReason, ) -> InterpResult<'tcx>; @@ -248,16 +248,16 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// /// Returns a (value, overflowed) pair if the operation succeeded fn binary_ptr_op( - ecx: &InterpCx<'mir, 'tcx, Self>, + ecx: &InterpCx<'tcx, Self>, bin_op: mir::BinOp, left: &ImmTy<'tcx, Self::Provenance>, right: &ImmTy<'tcx, Self::Provenance>, - ) -> InterpResult<'tcx, (ImmTy<'tcx, Self::Provenance>, bool)>; + ) -> InterpResult<'tcx, ImmTy<'tcx, Self::Provenance>>; /// Generate the NaN returned by a float operation, given the list of inputs. /// (This is all inputs, not just NaN inputs!) fn generate_nan<F1: Float + FloatConvert<F2>, F2: Float>( - _ecx: &InterpCx<'mir, 'tcx, Self>, + _ecx: &InterpCx<'tcx, Self>, _inputs: &[F1], ) -> F2 { // By default we always return the preferred NaN. @@ -266,14 +266,14 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// Called before a basic block terminator is executed. #[inline] - fn before_terminator(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> { + fn before_terminator(_ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> { Ok(()) } /// Called when the interpreter encounters a `StatementKind::ConstEvalCounter` instruction. /// You can use this to detect long or endlessly running programs. #[inline] - fn increment_const_eval_counter(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> { + fn increment_const_eval_counter(_ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> { Ok(()) } @@ -293,7 +293,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// Return the `AllocId` for the given thread-local static in the current thread. fn thread_local_static_pointer( - _ecx: &mut InterpCx<'mir, 'tcx, Self>, + _ecx: &mut InterpCx<'tcx, Self>, def_id: DefId, ) -> InterpResult<'tcx, Pointer<Self::Provenance>> { throw_unsup!(ThreadLocalStatic(def_id)) @@ -301,20 +301,20 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// Return the `AllocId` for the given `extern static`. fn extern_static_pointer( - ecx: &InterpCx<'mir, 'tcx, Self>, + ecx: &InterpCx<'tcx, Self>, def_id: DefId, ) -> InterpResult<'tcx, Pointer<Self::Provenance>>; /// "Int-to-pointer cast" fn ptr_from_addr_cast( - ecx: &InterpCx<'mir, 'tcx, Self>, + ecx: &InterpCx<'tcx, Self>, addr: u64, ) -> InterpResult<'tcx, Pointer<Option<Self::Provenance>>>; /// Marks a pointer as exposed, allowing it's provenance /// to be recovered. "Pointer-to-int cast" fn expose_ptr( - ecx: &mut InterpCx<'mir, 'tcx, Self>, + ecx: &mut InterpCx<'tcx, Self>, ptr: Pointer<Self::Provenance>, ) -> InterpResult<'tcx>; @@ -325,31 +325,45 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// /// When this fails, that means the pointer does not point to a live allocation. fn ptr_get_alloc( - ecx: &InterpCx<'mir, 'tcx, Self>, + ecx: &InterpCx<'tcx, Self>, ptr: Pointer<Self::Provenance>, ) -> Option<(AllocId, Size, Self::ProvenanceExtra)>; - /// Called to adjust allocations to the Provenance and AllocExtra of this machine. + /// Called to adjust global allocations to the Provenance and AllocExtra of this machine. /// /// If `alloc` contains pointers, then they are all pointing to globals. /// - /// The way we construct allocations is to always first construct it without extra and then add - /// the extra. This keeps uniform code paths for handling both allocations created by CTFE for - /// globals, and allocations created by Miri during evaluation. - /// - /// `kind` is the kind of the allocation being adjusted; it can be `None` when - /// it's a global and `GLOBAL_KIND` is `None`. - /// /// This should avoid copying if no work has to be done! If this returns an owned /// allocation (because a copy had to be done to adjust things), machine memory will /// cache the result. (This relies on `AllocMap::get_or` being able to add the /// owned allocation to the map even when the map is shared.) - fn adjust_allocation<'b>( - ecx: &InterpCx<'mir, 'tcx, Self>, + fn adjust_global_allocation<'b>( + ecx: &InterpCx<'tcx, Self>, id: AllocId, - alloc: Cow<'b, Allocation>, - kind: Option<MemoryKind<Self::MemoryKind>>, - ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>>>; + alloc: &'b Allocation, + ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>>> + { + // The default implementation does a copy; CTFE machines have a more efficient implementation + // based on their particular choice for `Provenance`, `AllocExtra`, and `Bytes`. + let kind = Self::GLOBAL_KIND + .expect("if GLOBAL_KIND is None, adjust_global_allocation must be overwritten"); + let alloc = alloc.adjust_from_tcx(&ecx.tcx, |ptr| ecx.global_root_pointer(ptr))?; + let extra = + Self::init_alloc_extra(ecx, id, MemoryKind::Machine(kind), alloc.size(), alloc.align)?; + Ok(Cow::Owned(alloc.with_extra(extra))) + } + + /// Initialize the extra state of an allocation. + /// + /// This is guaranteed to be called exactly once on all allocations that are accessed by the + /// program. + fn init_alloc_extra( + ecx: &InterpCx<'tcx, Self>, + id: AllocId, + kind: MemoryKind<Self::MemoryKind>, + size: Size, + align: Align, + ) -> InterpResult<'tcx, Self::AllocExtra>; /// Return a "root" pointer for the given allocation: the one that is used for direct /// accesses to this static/const/fn allocation, or the one returned from the heap allocator. @@ -359,7 +373,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// `kind` is the kind of the allocation the pointer points to; it can be `None` when /// it's a global and `GLOBAL_KIND` is `None`. fn adjust_alloc_root_pointer( - ecx: &InterpCx<'mir, 'tcx, Self>, + ecx: &InterpCx<'tcx, Self>, ptr: Pointer, kind: Option<MemoryKind<Self::MemoryKind>>, ) -> InterpResult<'tcx, Pointer<Self::Provenance>>; @@ -370,7 +384,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// is triggered, `targets[0]` when the assembly falls through, or diverge in case of /// `InlineAsmOptions::NORETURN` being set. fn eval_inline_asm( - _ecx: &mut InterpCx<'mir, 'tcx, Self>, + _ecx: &mut InterpCx<'tcx, Self>, _template: &'tcx [InlineAsmTemplatePiece], _operands: &[mir::InlineAsmOperand<'tcx>], _options: InlineAsmOptions, @@ -406,10 +420,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// /// Used to prevent statics from self-initializing by reading from their own memory /// as it is being initialized. - fn before_alloc_read( - _ecx: &InterpCx<'mir, 'tcx, Self>, - _alloc_id: AllocId, - ) -> InterpResult<'tcx> { + fn before_alloc_read(_ecx: &InterpCx<'tcx, Self>, _alloc_id: AllocId) -> InterpResult<'tcx> { Ok(()) } @@ -444,7 +455,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// Returns the possibly adjusted pointer. #[inline] fn retag_ptr_value( - _ecx: &mut InterpCx<'mir, 'tcx, Self>, + _ecx: &mut InterpCx<'tcx, Self>, _kind: mir::RetagKind, val: &ImmTy<'tcx, Self::Provenance>, ) -> InterpResult<'tcx, ImmTy<'tcx, Self::Provenance>> { @@ -455,7 +466,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// Replaces all pointers stored in the given place. #[inline] fn retag_place_contents( - _ecx: &mut InterpCx<'mir, 'tcx, Self>, + _ecx: &mut InterpCx<'tcx, Self>, _kind: mir::RetagKind, _place: &PlaceTy<'tcx, Self::Provenance>, ) -> InterpResult<'tcx> { @@ -467,7 +478,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// These places need to be protected to make sure the program cannot tell whether the /// argument/return value was actually copied or passed in-place.. fn protect_in_place_function_argument( - ecx: &mut InterpCx<'mir, 'tcx, Self>, + ecx: &mut InterpCx<'tcx, Self>, mplace: &MPlaceTy<'tcx, Self::Provenance>, ) -> InterpResult<'tcx> { // Without an aliasing model, all we can do is put `Uninit` into the place. @@ -476,30 +487,30 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { } /// Called immediately before a new stack frame gets pushed. - fn init_frame_extra( - ecx: &mut InterpCx<'mir, 'tcx, Self>, - frame: Frame<'mir, 'tcx, Self::Provenance>, - ) -> InterpResult<'tcx, Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>>; + fn init_frame( + ecx: &mut InterpCx<'tcx, Self>, + frame: Frame<'tcx, Self::Provenance>, + ) -> InterpResult<'tcx, Frame<'tcx, Self::Provenance, Self::FrameExtra>>; /// Borrow the current thread's stack. fn stack<'a>( - ecx: &'a InterpCx<'mir, 'tcx, Self>, - ) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>]; + ecx: &'a InterpCx<'tcx, Self>, + ) -> &'a [Frame<'tcx, Self::Provenance, Self::FrameExtra>]; /// Mutably borrow the current thread's stack. fn stack_mut<'a>( - ecx: &'a mut InterpCx<'mir, 'tcx, Self>, - ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>>; + ecx: &'a mut InterpCx<'tcx, Self>, + ) -> &'a mut Vec<Frame<'tcx, Self::Provenance, Self::FrameExtra>>; /// Called immediately after a stack frame got pushed and its locals got initialized. - fn after_stack_push(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> { + fn after_stack_push(_ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> { Ok(()) } /// Called just before the return value is copied to the caller-provided return place. fn before_stack_pop( - _ecx: &InterpCx<'mir, 'tcx, Self>, - _frame: &Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>, + _ecx: &InterpCx<'tcx, Self>, + _frame: &Frame<'tcx, Self::Provenance, Self::FrameExtra>, ) -> InterpResult<'tcx> { Ok(()) } @@ -508,8 +519,8 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// The `locals` have already been destroyed! #[inline(always)] fn after_stack_pop( - _ecx: &mut InterpCx<'mir, 'tcx, Self>, - _frame: Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>, + _ecx: &mut InterpCx<'tcx, Self>, + _frame: Frame<'tcx, Self::Provenance, Self::FrameExtra>, unwinding: bool, ) -> InterpResult<'tcx, StackPopJump> { // By default, we do not support unwinding from panics @@ -521,7 +532,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// but before the local's stack frame is updated to point to that memory. #[inline(always)] fn after_local_allocated( - _ecx: &mut InterpCx<'mir, 'tcx, Self>, + _ecx: &mut InterpCx<'tcx, Self>, _local: mir::Local, _mplace: &MPlaceTy<'tcx, Self::Provenance>, ) -> InterpResult<'tcx> { @@ -532,7 +543,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// but this hook has the chance to do some pre/postprocessing. #[inline(always)] fn eval_mir_constant<F>( - ecx: &InterpCx<'mir, 'tcx, Self>, + ecx: &InterpCx<'tcx, Self>, val: mir::Const<'tcx>, span: Span, layout: Option<TyAndLayout<'tcx>>, @@ -540,7 +551,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { ) -> InterpResult<'tcx, OpTy<'tcx, Self::Provenance>> where F: Fn( - &InterpCx<'mir, 'tcx, Self>, + &InterpCx<'tcx, Self>, mir::Const<'tcx>, Span, Option<TyAndLayout<'tcx>>, @@ -552,7 +563,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized { /// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines /// (CTFE and ConstProp) use the same instance. Here, we share that code. -pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { +pub macro compile_time_machine(<$tcx: lifetime>) { type Provenance = CtfeProvenance; type ProvenanceExtra = bool; // the "immutable" flag @@ -567,13 +578,13 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { type Bytes = Box<[u8]>; #[inline(always)] - fn ignore_optional_overflow_checks(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool { + fn ignore_optional_overflow_checks(_ecx: &InterpCx<$tcx, Self>) -> bool { false } #[inline(always)] fn unwind_terminate( - _ecx: &mut InterpCx<$mir, $tcx, Self>, + _ecx: &mut InterpCx<$tcx, Self>, _reason: mir::UnwindTerminateReason, ) -> InterpResult<$tcx> { unreachable!("unwinding cannot happen during compile-time evaluation") @@ -581,7 +592,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { #[inline(always)] fn call_extra_fn( - _ecx: &mut InterpCx<$mir, $tcx, Self>, + _ecx: &mut InterpCx<$tcx, Self>, fn_val: !, _abi: CallAbi, _args: &[FnArg<$tcx>], @@ -593,17 +604,27 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { } #[inline(always)] - fn adjust_allocation<'b>( - _ecx: &InterpCx<$mir, $tcx, Self>, + fn adjust_global_allocation<'b>( + _ecx: &InterpCx<$tcx, Self>, _id: AllocId, - alloc: Cow<'b, Allocation>, - _kind: Option<MemoryKind<Self::MemoryKind>>, + alloc: &'b Allocation, ) -> InterpResult<$tcx, Cow<'b, Allocation<Self::Provenance>>> { - Ok(alloc) + // Overwrite default implementation: no need to adjust anything. + Ok(Cow::Borrowed(alloc)) + } + + fn init_alloc_extra( + _ecx: &InterpCx<$tcx, Self>, + _id: AllocId, + _kind: MemoryKind<Self::MemoryKind>, + _size: Size, + _align: Align, + ) -> InterpResult<$tcx, Self::AllocExtra> { + Ok(()) } fn extern_static_pointer( - ecx: &InterpCx<$mir, $tcx, Self>, + ecx: &InterpCx<$tcx, Self>, def_id: DefId, ) -> InterpResult<$tcx, Pointer> { // Use the `AllocId` associated with the `DefId`. Any actual *access* will fail. @@ -612,7 +633,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { #[inline(always)] fn adjust_alloc_root_pointer( - _ecx: &InterpCx<$mir, $tcx, Self>, + _ecx: &InterpCx<$tcx, Self>, ptr: Pointer<CtfeProvenance>, _kind: Option<MemoryKind<Self::MemoryKind>>, ) -> InterpResult<$tcx, Pointer<CtfeProvenance>> { @@ -621,7 +642,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { #[inline(always)] fn ptr_from_addr_cast( - _ecx: &InterpCx<$mir, $tcx, Self>, + _ecx: &InterpCx<$tcx, Self>, addr: u64, ) -> InterpResult<$tcx, Pointer<Option<CtfeProvenance>>> { // Allow these casts, but make the pointer not dereferenceable. @@ -632,7 +653,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { #[inline(always)] fn ptr_get_alloc( - _ecx: &InterpCx<$mir, $tcx, Self>, + _ecx: &InterpCx<$tcx, Self>, ptr: Pointer<CtfeProvenance>, ) -> Option<(AllocId, Size, Self::ProvenanceExtra)> { // We know `offset` is relative to the allocation, so we can use `into_parts`. diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index 350fd480fba..521f28b7123 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -21,6 +21,8 @@ use rustc_middle::mir::display_allocation; use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TyCtxt}; use rustc_target::abi::{Align, HasDataLayout, Size}; +use tracing::{debug, instrument, trace}; + use crate::fluent_generated as fluent; use super::{ @@ -94,7 +96,7 @@ impl<'tcx, Other> FnVal<'tcx, Other> { // `Memory` has to depend on the `Machine` because some of its operations // (e.g., `get`) call a `Machine` hook. -pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> { +pub struct Memory<'tcx, M: Machine<'tcx>> { /// Allocations local to this instance of the interpreter. The kind /// helps ensure that the same mechanism is used for allocation and /// deallocation. When an allocation is not found here, it is a @@ -140,7 +142,7 @@ pub struct AllocRefMut<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Bo alloc_id: AllocId, } -impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { +impl<'tcx, M: Machine<'tcx>> Memory<'tcx, M> { pub fn new() -> Self { Memory { alloc_map: M::MemoryMap::default(), @@ -156,7 +158,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { } } -impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { +impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Call this to turn untagged "global" pointers (obtained via `tcx`) into /// the machine pointer to the allocation. Must never be used /// for any other pointers, nor for TLS statics. @@ -237,7 +239,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub fn allocate_raw_ptr( &mut self, - alloc: Allocation, + alloc: Allocation<M::Provenance, (), M::Bytes>, kind: MemoryKind<M::MemoryKind>, ) -> InterpResult<'tcx, Pointer<M::Provenance>> { let id = self.tcx.reserve_alloc_id(); @@ -246,8 +248,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { M::GLOBAL_KIND.map(MemoryKind::Machine), "dynamically allocating global memory" ); - let alloc = M::adjust_allocation(self, id, Cow::Owned(alloc), Some(kind))?; - self.memory.alloc_map.insert(id, (kind, alloc.into_owned())); + // We have set things up so we don't need to call `adjust_from_tcx` here, + // so we avoid copying the entire allocation contents. + let extra = M::init_alloc_extra(self, id, kind, alloc.size(), alloc.align)?; + let alloc = alloc.with_extra(extra); + self.memory.alloc_map.insert(id, (kind, alloc)); M::adjust_alloc_root_pointer(self, Pointer::from(id), Some(kind)) } @@ -413,6 +418,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// to the allocation it points to. Supports both shared and mutable references, as the actual /// checking is offloaded to a helper closure. /// + /// `alloc_size` will only get called for non-zero-sized accesses. + /// /// Returns `None` if and only if the size is 0. fn check_and_deref_ptr<T>( &self, @@ -425,18 +432,19 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { M::ProvenanceExtra, ) -> InterpResult<'tcx, (Size, Align, T)>, ) -> InterpResult<'tcx, Option<T>> { + // Everything is okay with size 0. + if size.bytes() == 0 { + return Ok(None); + } + Ok(match self.ptr_try_get_alloc_id(ptr) { Err(addr) => { - // We couldn't get a proper allocation. This is only okay if the access size is 0, - // and the address is not null. - if size.bytes() > 0 || addr == 0 { - throw_ub!(DanglingIntPointer(addr, msg)); - } - None + // We couldn't get a proper allocation. + throw_ub!(DanglingIntPointer(addr, msg)); } Ok((alloc_id, offset, prov)) => { let (alloc_size, _alloc_align, ret_val) = alloc_size(alloc_id, offset, prov)?; - // Test bounds. This also ensures non-null. + // Test bounds. // It is sufficient to check this for the end pointer. Also check for overflow! if offset.checked_add(size, &self.tcx).map_or(true, |end| end > alloc_size) { throw_ub!(PointerOutOfBounds { @@ -447,14 +455,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { msg, }) } - // Ensure we never consider the null pointer dereferenceable. - if M::Provenance::OFFSET_IS_ADDR { - assert_ne!(ptr.addr(), Size::ZERO); - } - // We can still be zero-sized in this branch, in which case we have to - // return `None`. - if size.bytes() == 0 { None } else { Some(ret_val) } + Some(ret_val) } }) } @@ -525,7 +527,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } } -impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { +impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// This function is used by Miri's provenance GC to remove unreachable entries from the dead_alloc_map. pub fn remove_unreachable_allocs(&mut self, reachable_allocs: &FxHashSet<AllocId>) { // Unlike all the other GC helpers where we check if an `AllocId` is found in the interpreter or @@ -537,7 +539,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } /// Allocation accessors -impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { +impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Helper function to obtain a global (tcx) allocation. /// This attempts to return a reference to an existing allocation if /// one can be found in `tcx`. That, however, is only possible if `tcx` and @@ -584,11 +586,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { }; M::before_access_global(self.tcx, &self.machine, id, alloc, def_id, is_write)?; // We got tcx memory. Let the machine initialize its "extra" stuff. - M::adjust_allocation( + M::adjust_global_allocation( self, id, // always use the ID we got as input, not the "hidden" one. - Cow::Borrowed(alloc.inner()), - M::GLOBAL_KIND.map(MemoryKind::Machine), + alloc.inner(), ) } @@ -641,16 +642,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { size, CheckInAllocMsg::MemoryAccessTest, |alloc_id, offset, prov| { - if !self.memory.validation_in_progress.get() { - // We want to call the hook on *all* accesses that involve an AllocId, - // including zero-sized accesses. That means we have to do it here - // rather than below in the `Some` branch. - M::before_alloc_read(self, alloc_id)?; - } let alloc = self.get_alloc_raw(alloc_id)?; Ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc))) }, )?; + // We want to call the hook on *all* accesses that involve an AllocId, including zero-sized + // accesses. That means we cannot rely on the closure above or the `Some` branch below. We + // do this after `check_and_deref_ptr` to ensure some basic sanity has already been checked. + if !self.memory.validation_in_progress.get() { + if let Ok((alloc_id, ..)) = self.ptr_try_get_alloc_id(ptr) { + M::before_alloc_read(self, alloc_id)?; + } + } if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc { let range = alloc_range(offset, size); @@ -887,14 +890,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// Create a lazy debug printer that prints the given allocation and all allocations it points /// to, recursively. #[must_use] - pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'mir, 'tcx, M> { + pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'tcx, M> { self.dump_allocs(vec![id]) } /// Create a lazy debug printer for a list of allocations and all allocations they point to, /// recursively. #[must_use] - pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'mir, 'tcx, M> { + pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'tcx, M> { allocs.sort(); allocs.dedup(); DumpAllocs { ecx: self, allocs } @@ -974,12 +977,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { #[doc(hidden)] /// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods. -pub struct DumpAllocs<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> { - ecx: &'a InterpCx<'mir, 'tcx, M>, +pub struct DumpAllocs<'a, 'tcx, M: Machine<'tcx>> { + ecx: &'a InterpCx<'tcx, M>, allocs: Vec<AllocId>, } -impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, 'mir, 'tcx, M> { +impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debug for DumpAllocs<'a, 'tcx, M> { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Cannot be a closure because it is generic in `Prov`, `Extra`. fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>( @@ -1124,7 +1127,7 @@ impl<'tcx, 'a, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Pr } } -impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { +impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Reads the given number of bytes from memory, and strips their provenance if possible. /// Returns them as a slice. /// @@ -1337,7 +1340,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } /// Machine pointer introspection. -impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { +impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Test if this value might be null. /// If the machine does not support ptr-to-int casts, this is conservative. pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> { diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index bad9732f483..bbb2c2f3938 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -4,10 +4,11 @@ use std::assert_matches::assert_matches; use either::{Either, Left, Right}; +use tracing::trace; use rustc_hir::def::Namespace; use rustc_middle::mir::interpret::ScalarSizeMismatch; -use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; +use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutOf, TyAndLayout}; use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter}; use rustc_middle::ty::{ConstInt, ScalarInt, Ty, TyCtxt}; use rustc_middle::{bug, span_bug}; @@ -249,6 +250,15 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { Self::from_scalar(Scalar::from_i8(c as i8), layout) } + pub fn from_pair(a: Self, b: Self, tcx: TyCtxt<'tcx>) -> Self { + let layout = tcx + .layout_of( + ty::ParamEnv::reveal_all().and(Ty::new_tup(tcx, &[a.layout.ty, b.layout.ty])), + ) + .unwrap(); + Self::from_scalar_pair(a.to_scalar(), b.to_scalar(), layout) + } + /// Return the immediate as a `ScalarInt`. Ensures that it has the size that the layout of the /// immediate indicates. #[inline] @@ -270,6 +280,17 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral()) } + #[inline] + #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980) + pub fn to_pair(self, cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>)) -> (Self, Self) { + let layout = self.layout; + let (val0, val1) = self.to_scalar_pair(); + ( + ImmTy::from_scalar(val0, layout.field(cx, 0)), + ImmTy::from_scalar(val1, layout.field(cx, 1)), + ) + } + /// Compute the "sub-immediate" that is located within the `base` at the given offset with the /// given layout. // Not called `offset` to avoid confusion with the trait method. @@ -353,21 +374,21 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> { MemPlaceMeta::None } - fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( + fn offset_with_meta<M: Machine<'tcx, Provenance = Prov>>( &self, offset: Size, _mode: OffsetMode, meta: MemPlaceMeta<Prov>, layout: TyAndLayout<'tcx>, - ecx: &InterpCx<'mir, 'tcx, M>, + ecx: &InterpCx<'tcx, M>, ) -> InterpResult<'tcx, Self> { assert_matches!(meta, MemPlaceMeta::None); // we can't store this anywhere anyway Ok(self.offset_(offset, layout, ecx)) } - fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( + fn to_op<M: Machine<'tcx, Provenance = Prov>>( &self, - _ecx: &InterpCx<'mir, 'tcx, M>, + _ecx: &InterpCx<'tcx, M>, ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { Ok(self.clone().into()) } @@ -436,13 +457,13 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> { } } - fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( + fn offset_with_meta<M: Machine<'tcx, Provenance = Prov>>( &self, offset: Size, mode: OffsetMode, meta: MemPlaceMeta<Prov>, layout: TyAndLayout<'tcx>, - ecx: &InterpCx<'mir, 'tcx, M>, + ecx: &InterpCx<'tcx, M>, ) -> InterpResult<'tcx, Self> { match self.as_mplace_or_imm() { Left(mplace) => Ok(mplace.offset_with_meta(offset, mode, meta, layout, ecx)?.into()), @@ -454,9 +475,9 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> { } } - fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( + fn to_op<M: Machine<'tcx, Provenance = Prov>>( &self, - _ecx: &InterpCx<'mir, 'tcx, M>, + _ecx: &InterpCx<'tcx, M>, ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { Ok(self.clone()) } @@ -488,7 +509,7 @@ impl<'tcx, Prov: Provenance> Readable<'tcx, Prov> for ImmTy<'tcx, Prov> { } } -impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { +impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`. /// Returns `None` if the layout does not permit loading this as a value. /// diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs index 5f59e3d887e..6d005dfcd86 100644 --- a/compiler/rustc_const_eval/src/interpret/operator.rs +++ b/compiler/rustc_const_eval/src/interpret/operator.rs @@ -1,78 +1,23 @@ +use either::Either; + use rustc_apfloat::{Float, FloatConvert}; use rustc_middle::mir; use rustc_middle::mir::interpret::{InterpResult, Scalar}; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; -use rustc_middle::ty::{self, FloatTy, ScalarInt, Ty}; +use rustc_middle::ty::{self, FloatTy, ScalarInt}; use rustc_middle::{bug, span_bug}; use rustc_span::symbol::sym; -use rustc_target::abi::Abi; - -use super::{err_ub, throw_ub, throw_ub_custom, ImmTy, Immediate, InterpCx, Machine, PlaceTy}; +use tracing::trace; -use crate::fluent_generated as fluent; - -impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { - /// Applies the binary operation `op` to the two operands and writes a tuple of the result - /// and a boolean signifying the potential overflow to the destination. - pub fn binop_with_overflow( - &mut self, - op: mir::BinOp, - left: &ImmTy<'tcx, M::Provenance>, - right: &ImmTy<'tcx, M::Provenance>, - dest: &PlaceTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx> { - let (val, overflowed) = self.overflowing_binary_op(op, left, right)?; - debug_assert_eq!( - Ty::new_tup(self.tcx.tcx, &[val.layout.ty, self.tcx.types.bool]), - dest.layout.ty, - "type mismatch for result of {op:?}", - ); - // Write the result to `dest`. - if let Abi::ScalarPair(..) = dest.layout.abi { - // We can use the optimized path and avoid `place_field` (which might do - // `force_allocation`). - let pair = Immediate::ScalarPair(val.to_scalar(), Scalar::from_bool(overflowed)); - self.write_immediate(pair, dest)?; - } else { - assert!(self.tcx.sess.opts.unstable_opts.randomize_layout); - // With randomized layout, `(int, bool)` might cease to be a `ScalarPair`, so we have to - // do a component-wise write here. This code path is slower than the above because - // `place_field` will have to `force_allocate` locals here. - let val_field = self.project_field(dest, 0)?; - self.write_scalar(val.to_scalar(), &val_field)?; - let overflowed_field = self.project_field(dest, 1)?; - self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?; - } - Ok(()) - } +use super::{err_ub, throw_ub, ImmTy, InterpCx, Machine, MemPlaceMeta}; - /// Applies the binary operation `op` to the arguments and writes the result to the - /// destination. - pub fn binop_ignore_overflow( - &mut self, - op: mir::BinOp, - left: &ImmTy<'tcx, M::Provenance>, - right: &ImmTy<'tcx, M::Provenance>, - dest: &PlaceTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx> { - let val = self.wrapping_binary_op(op, left, right)?; - assert_eq!(val.layout.ty, dest.layout.ty, "type mismatch for result of {op:?}"); - self.write_immediate(*val, dest) - } -} - -impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { - fn three_way_compare<T: Ord>(&self, lhs: T, rhs: T) -> (ImmTy<'tcx, M::Provenance>, bool) { +impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { + fn three_way_compare<T: Ord>(&self, lhs: T, rhs: T) -> ImmTy<'tcx, M::Provenance> { let res = Ord::cmp(&lhs, &rhs); - return (ImmTy::from_ordering(res, *self.tcx), false); + return ImmTy::from_ordering(res, *self.tcx); } - fn binary_char_op( - &self, - bin_op: mir::BinOp, - l: char, - r: char, - ) -> (ImmTy<'tcx, M::Provenance>, bool) { + fn binary_char_op(&self, bin_op: mir::BinOp, l: char, r: char) -> ImmTy<'tcx, M::Provenance> { use rustc_middle::mir::BinOp::*; if bin_op == Cmp { @@ -88,15 +33,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Ge => l >= r, _ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op), }; - (ImmTy::from_bool(res, *self.tcx), false) + ImmTy::from_bool(res, *self.tcx) } - fn binary_bool_op( - &self, - bin_op: mir::BinOp, - l: bool, - r: bool, - ) -> (ImmTy<'tcx, M::Provenance>, bool) { + fn binary_bool_op(&self, bin_op: mir::BinOp, l: bool, r: bool) -> ImmTy<'tcx, M::Provenance> { use rustc_middle::mir::BinOp::*; let res = match bin_op { @@ -111,7 +51,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { BitXor => l ^ r, _ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op), }; - (ImmTy::from_bool(res, *self.tcx), false) + ImmTy::from_bool(res, *self.tcx) } fn binary_float_op<F: Float + FloatConvert<F> + Into<Scalar<M::Provenance>>>( @@ -120,14 +60,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { layout: TyAndLayout<'tcx>, l: F, r: F, - ) -> (ImmTy<'tcx, M::Provenance>, bool) { + ) -> ImmTy<'tcx, M::Provenance> { use rustc_middle::mir::BinOp::*; // Performs appropriate non-deterministic adjustments of NaN results. let adjust_nan = |f: F| -> F { if f.is_nan() { M::generate_nan(self, &[l, r]) } else { f } }; - let val = match bin_op { + match bin_op { Eq => ImmTy::from_bool(l == r, *self.tcx), Ne => ImmTy::from_bool(l != r, *self.tcx), Lt => ImmTy::from_bool(l < r, *self.tcx), @@ -140,8 +80,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Div => ImmTy::from_scalar(adjust_nan((l / r).value).into(), layout), Rem => ImmTy::from_scalar(adjust_nan((l % r).value).into(), layout), _ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op), - }; - (val, false) + } } fn binary_int_op( @@ -149,7 +88,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { bin_op: mir::BinOp, left: &ImmTy<'tcx, M::Provenance>, right: &ImmTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> { + ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> { use rustc_middle::mir::BinOp::*; // This checks the size, so that we can just assert it below. @@ -169,25 +108,27 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ShrUnchecked => Some(sym::unchecked_shr), _ => None, }; + let with_overflow = bin_op.is_overflowing(); // Shift ops can have an RHS with a different numeric type. if matches!(bin_op, Shl | ShlUnchecked | Shr | ShrUnchecked) { let size = left.layout.size.bits(); - // The shift offset is implicitly masked to the type size. (This is the one MIR operator - // that does *not* directly map to a single LLVM operation.) Compute how much we - // actually shift and whether there was an overflow due to shifting too much. + // Compute the equivalent shift modulo `size` that is in the range `0..size`. (This is + // the one MIR operator that does *not* directly map to a single LLVM operation.) let (shift_amount, overflow) = if right.layout.abi.is_signed() { let shift_amount = r_signed(); let overflow = shift_amount < 0 || shift_amount >= i128::from(size); // Deliberately wrapping `as` casts: shift_amount *can* be negative, but the result // of the `as` will be equal modulo `size` (since it is a power of two). let masked_amount = (shift_amount as u128) % u128::from(size); - assert_eq!(overflow, shift_amount != (masked_amount as i128)); + assert_eq!(overflow, shift_amount != i128::try_from(masked_amount).unwrap()); (masked_amount, overflow) } else { let shift_amount = r_unsigned(); + let overflow = shift_amount >= u128::from(size); let masked_amount = shift_amount % u128::from(size); - (masked_amount, shift_amount != masked_amount) + assert_eq!(overflow, shift_amount != masked_amount); + (masked_amount, overflow) }; let shift_amount = u32::try_from(shift_amount).unwrap(); // we masked so this will always fit // Compute the shifted result. @@ -209,19 +150,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ScalarInt::truncate_from_uint(result, left.layout.size).0 }; - if overflow && let Some(intrinsic_name) = throw_ub_on_overflow { - throw_ub_custom!( - fluent::const_eval_overflow_shift, - val = if right.layout.abi.is_signed() { - r_signed().to_string() + if overflow && let Some(intrinsic) = throw_ub_on_overflow { + throw_ub!(ShiftOverflow { + intrinsic, + shift_amount: if right.layout.abi.is_signed() { + Either::Right(r_signed()) } else { - r_unsigned().to_string() - }, - name = intrinsic_name - ); + Either::Left(r_unsigned()) + } + }); } - return Ok((ImmTy::from_scalar_int(result, left.layout), overflow)); + return Ok(ImmTy::from_scalar_int(result, left.layout)); } // For the remaining ops, the types must be the same on both sides @@ -246,7 +186,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { _ => None, }; if let Some(op) = op { - return Ok((ImmTy::from_bool(op(&l_signed(), &r_signed()), *self.tcx), false)); + return Ok(ImmTy::from_bool(op(&l_signed(), &r_signed()), *self.tcx)); } if bin_op == Cmp { return Ok(self.three_way_compare(l_signed(), r_signed())); @@ -256,9 +196,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Rem if r.is_null() => throw_ub!(RemainderByZero), Div => Some(i128::overflowing_div), Rem => Some(i128::overflowing_rem), - Add | AddUnchecked => Some(i128::overflowing_add), - Sub | SubUnchecked => Some(i128::overflowing_sub), - Mul | MulUnchecked => Some(i128::overflowing_mul), + Add | AddUnchecked | AddWithOverflow => Some(i128::overflowing_add), + Sub | SubUnchecked | SubWithOverflow => Some(i128::overflowing_sub), + Mul | MulUnchecked | MulWithOverflow => Some(i128::overflowing_mul), _ => None, }; if let Some(op) = op { @@ -282,10 +222,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // If that truncation loses any information, we have an overflow. let (result, lossy) = ScalarInt::truncate_from_int(result, left.layout.size); let overflow = oflo || lossy; - if overflow && let Some(intrinsic_name) = throw_ub_on_overflow { - throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name); + if overflow && let Some(intrinsic) = throw_ub_on_overflow { + throw_ub!(ArithOverflow { intrinsic }); } - return Ok((ImmTy::from_scalar_int(result, left.layout), overflow)); + let res = ImmTy::from_scalar_int(result, left.layout); + return Ok(if with_overflow { + let overflow = ImmTy::from_bool(overflow, *self.tcx); + ImmTy::from_pair(res, overflow, *self.tcx) + } else { + res + }); } } // From here on it's okay to treat everything as unsigned. @@ -296,7 +242,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { return Ok(self.three_way_compare(l, r)); } - let val = match bin_op { + Ok(match bin_op { Eq => ImmTy::from_bool(l == r, *self.tcx), Ne => ImmTy::from_bool(l != r, *self.tcx), @@ -309,40 +255,42 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { BitAnd => ImmTy::from_uint(l & r, left.layout), BitXor => ImmTy::from_uint(l ^ r, left.layout), - Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Rem | Div => { + _ => { assert!(!left.layout.abi.is_signed()); let op: fn(u128, u128) -> (u128, bool) = match bin_op { - Add | AddUnchecked => u128::overflowing_add, - Sub | SubUnchecked => u128::overflowing_sub, - Mul | MulUnchecked => u128::overflowing_mul, + Add | AddUnchecked | AddWithOverflow => u128::overflowing_add, + Sub | SubUnchecked | SubWithOverflow => u128::overflowing_sub, + Mul | MulUnchecked | MulWithOverflow => u128::overflowing_mul, Div if r == 0 => throw_ub!(DivisionByZero), Rem if r == 0 => throw_ub!(RemainderByZero), Div => u128::overflowing_div, Rem => u128::overflowing_rem, - _ => bug!(), + _ => span_bug!( + self.cur_span(), + "invalid binary op {:?}: {:?}, {:?} (both {})", + bin_op, + left, + right, + right.layout.ty, + ), }; let (result, oflo) = op(l, r); // Truncate to target type. // If that truncation loses any information, we have an overflow. let (result, lossy) = ScalarInt::truncate_from_uint(result, left.layout.size); let overflow = oflo || lossy; - if overflow && let Some(intrinsic_name) = throw_ub_on_overflow { - throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name); + if overflow && let Some(intrinsic) = throw_ub_on_overflow { + throw_ub!(ArithOverflow { intrinsic }); + } + let res = ImmTy::from_scalar_int(result, left.layout); + if with_overflow { + let overflow = ImmTy::from_bool(overflow, *self.tcx); + ImmTy::from_pair(res, overflow, *self.tcx) + } else { + res } - return Ok((ImmTy::from_scalar_int(result, left.layout), overflow)); } - - _ => span_bug!( - self.cur_span(), - "invalid binary op {:?}: {:?}, {:?} (both {})", - bin_op, - left, - right, - right.layout.ty, - ), - }; - - Ok((val, false)) + }) } fn binary_ptr_op( @@ -350,7 +298,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { bin_op: mir::BinOp, left: &ImmTy<'tcx, M::Provenance>, right: &ImmTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> { + ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> { use rustc_middle::mir::BinOp::*; match bin_op { @@ -369,10 +317,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?; let offset_ptr = self.ptr_offset_inbounds(ptr, offset_bytes)?; - Ok(( - ImmTy::from_scalar(Scalar::from_maybe_pointer(offset_ptr, self), left.layout), - false, - )) + Ok(ImmTy::from_scalar(Scalar::from_maybe_pointer(offset_ptr, self), left.layout)) } // Fall back to machine hook so Miri can support more pointer ops. @@ -380,13 +325,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } } - /// Returns the result of the specified operation, and whether it overflowed. - pub fn overflowing_binary_op( + /// Returns the result of the specified operation. + /// + /// Whether this produces a scalar or a pair depends on the specific `bin_op`. + pub fn binary_op( &self, bin_op: mir::BinOp, left: &ImmTy<'tcx, M::Provenance>, right: &ImmTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> { + ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> { trace!( "Running binary op {:?}: {:?} ({}), {:?} ({})", bin_op, @@ -458,77 +405,74 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } } - #[inline] - pub fn wrapping_binary_op( - &self, - bin_op: mir::BinOp, - left: &ImmTy<'tcx, M::Provenance>, - right: &ImmTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> { - let (val, _overflow) = self.overflowing_binary_op(bin_op, left, right)?; - Ok(val) - } - /// Returns the result of the specified operation, whether it overflowed, and /// the result type. - pub fn overflowing_unary_op( + pub fn unary_op( &self, un_op: mir::UnOp, val: &ImmTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> { + ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> { use rustc_middle::mir::UnOp::*; let layout = val.layout; - let val = val.to_scalar(); trace!("Running unary op {:?}: {:?} ({})", un_op, val, layout.ty); match layout.ty.kind() { ty::Bool => { + let val = val.to_scalar(); let val = val.to_bool()?; let res = match un_op { Not => !val, _ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op), }; - Ok((ImmTy::from_bool(res, *self.tcx), false)) + Ok(ImmTy::from_bool(res, *self.tcx)) } ty::Float(fty) => { + let val = val.to_scalar(); // No NaN adjustment here, `-` is a bitwise operation! let res = match (un_op, fty) { (Neg, FloatTy::F32) => Scalar::from_f32(-val.to_f32()?), (Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?), _ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op), }; - Ok((ImmTy::from_scalar(res, layout), false)) + Ok(ImmTy::from_scalar(res, layout)) } - _ => { - assert!(layout.ty.is_integral()); + _ if layout.ty.is_integral() => { + let val = val.to_scalar(); let val = val.to_bits(layout.size)?; - let (res, overflow) = match un_op { - Not => (self.truncate(!val, layout), false), // bitwise negation, then truncate + let res = match un_op { + Not => self.truncate(!val, layout), // bitwise negation, then truncate Neg => { // arithmetic negation assert!(layout.abi.is_signed()); let val = self.sign_extend(val, layout) as i128; - let (res, overflow) = val.overflowing_neg(); + let res = val.wrapping_neg(); let res = res as u128; // Truncate to target type. - // If that truncation loses any information, we have an overflow. - let truncated = self.truncate(res, layout); - (truncated, overflow || self.sign_extend(truncated, layout) != res) + self.truncate(res, layout) } + _ => span_bug!(self.cur_span(), "Invalid integer op {:?}", un_op), }; - Ok((ImmTy::from_uint(res, layout), overflow)) + Ok(ImmTy::from_uint(res, layout)) + } + ty::RawPtr(..) => { + assert_eq!(un_op, PtrMetadata); + let (_, meta) = val.to_scalar_and_meta(); + Ok(match meta { + MemPlaceMeta::Meta(scalar) => { + let ty = un_op.ty(*self.tcx, val.layout.ty); + let layout = self.layout_of(ty)?; + ImmTy::from_scalar(scalar, layout) + } + MemPlaceMeta::None => { + let unit_layout = self.layout_of(self.tcx.types.unit)?; + ImmTy::uninit(unit_layout) + } + }) + } + _ => { + bug!("Unexpected unary op argument {val:?}") } } } - - #[inline] - pub fn wrapping_unary_op( - &self, - un_op: mir::UnOp, - val: &ImmTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> { - let (val, _overflow) = self.overflowing_unary_op(un_op, val)?; - Ok(val) - } } diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index 9ced825853b..4a86ec3f57a 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -5,6 +5,7 @@ use std::assert_matches::assert_matches; use either::{Either, Left, Right}; +use tracing::{instrument, trace}; use rustc_ast::Mutability; use rustc_middle::mir; @@ -76,12 +77,12 @@ impl<Prov: Provenance> MemPlace<Prov> { #[inline] // Not called `offset_with_meta` to avoid confusion with the trait method. - fn offset_with_meta_<'mir, 'tcx, M: Machine<'mir, 'tcx, Provenance = Prov>>( + fn offset_with_meta_<'tcx, M: Machine<'tcx, Provenance = Prov>>( self, offset: Size, mode: OffsetMode, meta: MemPlaceMeta<Prov>, - ecx: &InterpCx<'mir, 'tcx, M>, + ecx: &InterpCx<'tcx, M>, ) -> InterpResult<'tcx, Self> { debug_assert!( !meta.has_meta() || self.meta.has_meta(), @@ -161,20 +162,20 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for MPlaceTy<'tcx, Prov> { self.mplace.meta } - fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( + fn offset_with_meta<M: Machine<'tcx, Provenance = Prov>>( &self, offset: Size, mode: OffsetMode, meta: MemPlaceMeta<Prov>, layout: TyAndLayout<'tcx>, - ecx: &InterpCx<'mir, 'tcx, M>, + ecx: &InterpCx<'tcx, M>, ) -> InterpResult<'tcx, Self> { Ok(MPlaceTy { mplace: self.mplace.offset_with_meta_(offset, mode, meta, ecx)?, layout }) } - fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( + fn to_op<M: Machine<'tcx, Provenance = Prov>>( &self, - _ecx: &InterpCx<'mir, 'tcx, M>, + _ecx: &InterpCx<'tcx, M>, ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { Ok(self.clone().into()) } @@ -273,13 +274,13 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for PlaceTy<'tcx, Prov> { } } - fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( + fn offset_with_meta<M: Machine<'tcx, Provenance = Prov>>( &self, offset: Size, mode: OffsetMode, meta: MemPlaceMeta<Prov>, layout: TyAndLayout<'tcx>, - ecx: &InterpCx<'mir, 'tcx, M>, + ecx: &InterpCx<'tcx, M>, ) -> InterpResult<'tcx, Self> { Ok(match self.as_mplace_or_local() { Left(mplace) => mplace.offset_with_meta(offset, mode, meta, layout, ecx)?.into(), @@ -304,9 +305,9 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for PlaceTy<'tcx, Prov> { }) } - fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( + fn to_op<M: Machine<'tcx, Provenance = Prov>>( &self, - ecx: &InterpCx<'mir, 'tcx, M>, + ecx: &InterpCx<'tcx, M>, ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { ecx.place_to_op(self) } @@ -340,9 +341,9 @@ pub trait Writeable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> { &self, ) -> Either<MPlaceTy<'tcx, Prov>, (mir::Local, Option<Size>, usize, TyAndLayout<'tcx>)>; - fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( + fn force_mplace<M: Machine<'tcx, Provenance = Prov>>( &self, - ecx: &mut InterpCx<'mir, 'tcx, M>, + ecx: &mut InterpCx<'tcx, M>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>>; } @@ -356,9 +357,9 @@ impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for PlaceTy<'tcx, Prov> { } #[inline(always)] - fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( + fn force_mplace<M: Machine<'tcx, Provenance = Prov>>( &self, - ecx: &mut InterpCx<'mir, 'tcx, M>, + ecx: &mut InterpCx<'tcx, M>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>> { ecx.force_allocation(self) } @@ -373,19 +374,19 @@ impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for MPlaceTy<'tcx, Prov> { } #[inline(always)] - fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( + fn force_mplace<M: Machine<'tcx, Provenance = Prov>>( &self, - _ecx: &mut InterpCx<'mir, 'tcx, M>, + _ecx: &mut InterpCx<'tcx, M>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>> { Ok(self.clone()) } } // FIXME: Working around https://github.com/rust-lang/rust/issues/54385 -impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M> +impl<'tcx, Prov, M> InterpCx<'tcx, M> where Prov: Provenance, - M: Machine<'mir, 'tcx, Provenance = Prov>, + M: Machine<'tcx, Provenance = Prov>, { pub fn ptr_with_meta_to_mplace( &self, diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs index 0a2fedb4840..0e594914c3a 100644 --- a/compiler/rustc_const_eval/src/interpret/projection.rs +++ b/compiler/rustc_const_eval/src/interpret/projection.rs @@ -18,6 +18,8 @@ use rustc_middle::{bug, span_bug}; use rustc_target::abi::Size; use rustc_target::abi::{self, VariantIdx}; +use tracing::{debug, instrument}; + use super::{ throw_ub, throw_unsup_format, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, Provenance, Scalar, @@ -41,9 +43,9 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug { fn meta(&self) -> MemPlaceMeta<Prov>; /// Get the length of a slice/string/array stored here. - fn len<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( + fn len<M: Machine<'tcx, Provenance = Prov>>( &self, - ecx: &InterpCx<'mir, 'tcx, M>, + ecx: &InterpCx<'tcx, M>, ) -> InterpResult<'tcx, u64> { let layout = self.layout(); if layout.is_unsized() { @@ -63,29 +65,29 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug { } /// Offset the value by the given amount, replacing the layout and metadata. - fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( + fn offset_with_meta<M: Machine<'tcx, Provenance = Prov>>( &self, offset: Size, mode: OffsetMode, meta: MemPlaceMeta<Prov>, layout: TyAndLayout<'tcx>, - ecx: &InterpCx<'mir, 'tcx, M>, + ecx: &InterpCx<'tcx, M>, ) -> InterpResult<'tcx, Self>; - fn offset<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( + fn offset<M: Machine<'tcx, Provenance = Prov>>( &self, offset: Size, layout: TyAndLayout<'tcx>, - ecx: &InterpCx<'mir, 'tcx, M>, + ecx: &InterpCx<'tcx, M>, ) -> InterpResult<'tcx, Self> { assert!(layout.is_sized()); self.offset_with_meta(offset, OffsetMode::Inbounds, MemPlaceMeta::None, layout, ecx) } - fn transmute<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( + fn transmute<M: Machine<'tcx, Provenance = Prov>>( &self, layout: TyAndLayout<'tcx>, - ecx: &InterpCx<'mir, 'tcx, M>, + ecx: &InterpCx<'tcx, M>, ) -> InterpResult<'tcx, Self> { assert!(self.layout().is_sized() && layout.is_sized()); assert_eq!(self.layout().size, layout.size); @@ -94,9 +96,9 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug { /// Convert this to an `OpTy`. This might be an irreversible transformation, but is useful for /// reading from this thing. - fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( + fn to_op<M: Machine<'tcx, Provenance = Prov>>( &self, - ecx: &InterpCx<'mir, 'tcx, M>, + ecx: &InterpCx<'tcx, M>, ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>; } @@ -111,9 +113,9 @@ pub struct ArrayIterator<'tcx, 'a, Prov: Provenance, P: Projectable<'tcx, Prov>> impl<'tcx, 'a, Prov: Provenance, P: Projectable<'tcx, Prov>> ArrayIterator<'tcx, 'a, Prov, P> { /// Should be the same `ecx` on each call, and match the one used to create the iterator. - pub fn next<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>( + pub fn next<M: Machine<'tcx, Provenance = Prov>>( &mut self, - ecx: &InterpCx<'mir, 'tcx, M>, + ecx: &InterpCx<'tcx, M>, ) -> InterpResult<'tcx, Option<(u64, P)>> { let Some(idx) = self.range.next() else { return Ok(None) }; // We use `Wrapping` here since the offset has already been checked when the iterator was created. @@ -131,10 +133,10 @@ impl<'tcx, 'a, Prov: Provenance, P: Projectable<'tcx, Prov>> ArrayIterator<'tcx, } // FIXME: Working around https://github.com/rust-lang/rust/issues/54385 -impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M> +impl<'tcx, Prov, M> InterpCx<'tcx, M> where Prov: Provenance, - M: Machine<'mir, 'tcx, Provenance = Prov>, + M: Machine<'tcx, Provenance = Prov>, { /// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is /// always possible without allocating, so it can take `&self`. Also return the field's layout. diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs index cb72d55a9ba..d0bb821862a 100644 --- a/compiler/rustc_const_eval/src/interpret/step.rs +++ b/compiler/rustc_const_eval/src/interpret/step.rs @@ -3,6 +3,7 @@ //! The main entry point is the `step` method. use either::Either; +use tracing::{info, instrument, trace}; use rustc_index::IndexSlice; use rustc_middle::mir; @@ -15,7 +16,7 @@ use super::{ }; use crate::util; -impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { +impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Returns `true` as long as there are more things to do. /// /// This is used by [priroda](https://github.com/oli-obk/priroda) @@ -167,19 +168,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let left = self.read_immediate(&self.eval_operand(left, layout)?)?; let layout = util::binop_right_homogeneous(bin_op).then_some(left.layout); let right = self.read_immediate(&self.eval_operand(right, layout)?)?; - if let Some(bin_op) = bin_op.overflowing_to_wrapping() { - self.binop_with_overflow(bin_op, &left, &right, &dest)?; - } else { - self.binop_ignore_overflow(bin_op, &left, &right, &dest)?; - } + let result = self.binary_op(bin_op, &left, &right)?; + assert_eq!(result.layout, dest.layout, "layout mismatch for result of {bin_op:?}"); + self.write_immediate(*result, &dest)?; } UnaryOp(un_op, ref operand) => { // The operand always has the same type as the result. let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?; - let val = self.wrapping_unary_op(un_op, &val)?; - assert_eq!(val.layout, dest.layout, "layout mismatch for result of {un_op:?}"); - self.write_immediate(*val, &dest)?; + let result = self.unary_op(un_op, &val)?; + assert_eq!(result.layout, dest.layout, "layout mismatch for result of {un_op:?}"); + self.write_immediate(*result, &dest)?; } Aggregate(box ref kind, ref operands) => { diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs index b82c1857858..0649bb5617c 100644 --- a/compiler/rustc_const_eval/src/interpret/terminator.rs +++ b/compiler/rustc_const_eval/src/interpret/terminator.rs @@ -1,6 +1,7 @@ use std::borrow::Cow; use either::Either; +use tracing::trace; use rustc_middle::span_bug; use rustc_middle::{ @@ -45,7 +46,7 @@ impl<'tcx, Prov: Provenance> FnArg<'tcx, Prov> { } } -impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { +impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Make a copy of the given fn_arg. Any `InPlace` are degenerated to copies, no protection of the /// original memory occurs. pub fn copy_fn_arg(&self, arg: &FnArg<'tcx, M::Provenance>) -> OpTy<'tcx, M::Provenance> { @@ -97,7 +98,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { for (const_int, target) in targets.iter() { // Compare using MIR BinOp::Eq, to also support pointer values. // (Avoiding `self.binary_op` as that does some redundant layout computation.) - let res = self.wrapping_binary_op( + let res = self.binary_op( mir::BinOp::Eq, &discr, &ImmTy::from_uint(const_int, discr.layout), diff --git a/compiler/rustc_const_eval/src/interpret/traits.rs b/compiler/rustc_const_eval/src/interpret/traits.rs index b603ef0d27a..244a6ba48a4 100644 --- a/compiler/rustc_const_eval/src/interpret/traits.rs +++ b/compiler/rustc_const_eval/src/interpret/traits.rs @@ -2,11 +2,12 @@ use rustc_middle::mir::interpret::{InterpResult, Pointer}; use rustc_middle::ty::layout::LayoutOf; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_target::abi::{Align, Size}; +use tracing::trace; use super::util::ensure_monomorphic_enough; use super::{InterpCx, Machine}; -impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { +impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Creates a dynamic vtable for the given type and vtable origin. This is used only for /// objects. /// diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs index e304d1e1cc5..10fd6399b9a 100644 --- a/compiler/rustc_const_eval/src/interpret/util.rs +++ b/compiler/rustc_const_eval/src/interpret/util.rs @@ -7,6 +7,7 @@ use rustc_middle::ty::{ self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitableExt, TypeVisitor, }; use std::ops::ControlFlow; +use tracing::debug; use super::{throw_inval, InterpCx, MPlaceTy, MemPlaceMeta, MemoryKind}; @@ -81,9 +82,9 @@ where } impl<'tcx> InterpretationResult<'tcx> for mir::interpret::ConstAllocation<'tcx> { - fn make_result<'mir>( + fn make_result( mplace: MPlaceTy<'tcx>, - ecx: &mut InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>, + ecx: &mut InterpCx<'tcx, CompileTimeInterpreter<'tcx>>, ) -> Self { let alloc_id = mplace.ptr().provenance.unwrap().alloc_id(); let alloc = ecx.memory.alloc_map.swap_remove(&alloc_id).unwrap().1; @@ -91,8 +92,8 @@ impl<'tcx> InterpretationResult<'tcx> for mir::interpret::ConstAllocation<'tcx> } } -pub(crate) fn create_static_alloc<'mir, 'tcx: 'mir>( - ecx: &mut CompileTimeEvalContext<'mir, 'tcx>, +pub(crate) fn create_static_alloc<'tcx>( + ecx: &mut CompileTimeEvalContext<'tcx>, static_def_id: LocalDefId, layout: TyAndLayout<'tcx>, ) -> InterpResult<'tcx, MPlaceTy<'tcx>> { diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index 2bd4d9dc07a..e35ce9ef28d 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -8,6 +8,7 @@ use std::fmt::Write; use std::num::NonZero; use either::{Left, Right}; +use tracing::trace; use hir::def::DefKind; use rustc_ast::Mutability; @@ -204,7 +205,7 @@ fn write_path(out: &mut String, path: &[PathElem]) { } } -struct ValidityVisitor<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> { +struct ValidityVisitor<'rt, 'tcx, M: Machine<'tcx>> { /// The `path` may be pushed to, but the part that is present when a function /// starts must not be changed! `visit_fields` and `visit_array` rely on /// this stack discipline. @@ -212,10 +213,10 @@ struct ValidityVisitor<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> { ref_tracking: Option<&'rt mut RefTracking<MPlaceTy<'tcx, M::Provenance>, Vec<PathElem>>>, /// `None` indicates this is not validating for CTFE (but for runtime). ctfe_mode: Option<CtfeValidationMode>, - ecx: &'rt InterpCx<'mir, 'tcx, M>, + ecx: &'rt InterpCx<'tcx, M>, } -impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, 'tcx, M> { +impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { fn aggregate_field_path_elem(&mut self, layout: TyAndLayout<'tcx>, field: usize) -> PathElem { // First, check if we are projecting to a variant. match layout.variants { @@ -434,6 +435,11 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' found_bytes: has.bytes() }, ); + // Make sure this is non-null. We checked dereferenceability above, but if `size` is zero + // that does not imply non-null. + if self.ecx.scalar_may_be_null(Scalar::from_maybe_pointer(place.ptr(), self.ecx))? { + throw_validation_failure!(self.path, NullPtr { ptr_kind }) + } // Do not allow pointers to uninhabited types. if place.layout.abi.is_uninhabited() { let ty = place.layout.ty; @@ -456,8 +462,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' // `!` is a ZST and we want to validate it. if let Ok((alloc_id, _offset, _prov)) = self.ecx.ptr_try_get_alloc_id(place.ptr()) { let mut skip_recursive_check = false; - let alloc_actual_mutbl = mutability(self.ecx, alloc_id); - if let GlobalAlloc::Static(did) = self.ecx.tcx.global_alloc(alloc_id) { + if let Some(GlobalAlloc::Static(did)) = self.ecx.tcx.try_get_global_alloc(alloc_id) + { let DefKind::Static { nested, .. } = self.ecx.tcx.def_kind(did) else { bug!() }; // Special handling for pointers to statics (irrespective of their type). assert!(!self.ecx.tcx.is_thread_local_static(did)); @@ -495,6 +501,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' // If this allocation has size zero, there is no actual mutability here. let (size, _align, _alloc_kind) = self.ecx.get_alloc_info(alloc_id); if size != Size::ZERO { + let alloc_actual_mutbl = mutability(self.ecx, alloc_id); // Mutable pointer to immutable memory is no good. if ptr_expected_mutbl == Mutability::Mut && alloc_actual_mutbl == Mutability::Not @@ -699,10 +706,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' /// Returns whether the allocation is mutable, and whether it's actually a static. /// For "root" statics we look at the type to account for interior /// mutability; for nested statics we have no type and directly use the annotated mutability. -fn mutability<'mir, 'tcx: 'mir>( - ecx: &InterpCx<'mir, 'tcx, impl Machine<'mir, 'tcx>>, - alloc_id: AllocId, -) -> Mutability { +fn mutability<'tcx>(ecx: &InterpCx<'tcx, impl Machine<'tcx>>, alloc_id: AllocId) -> Mutability { // Let's see what kind of memory this points to. // We're not using `try_global_alloc` since dangling pointers have already been handled. match ecx.tcx.global_alloc(alloc_id) { @@ -744,13 +748,11 @@ fn mutability<'mir, 'tcx: 'mir>( } } -impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> - for ValidityVisitor<'rt, 'mir, 'tcx, M> -{ +impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt, 'tcx, M> { type V = OpTy<'tcx, M::Provenance>; #[inline(always)] - fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> { + fn ecx(&self) -> &InterpCx<'tcx, M> { self.ecx } @@ -831,6 +833,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> trace!("visit_value: {:?}, {:?}", *op, op.layout); // Check primitive types -- the leaves of our recursive descent. + // We assume that the Scalar validity range does not restrict these values + // any further than `try_visit_primitive` does! if self.try_visit_primitive(op)? { return Ok(()); } @@ -1000,7 +1004,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> } } -impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { +impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { fn validate_operand_internal( &self, op: &OpTy<'tcx, M::Provenance>, diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs index 59bcc5174cb..b812e89854b 100644 --- a/compiler/rustc_const_eval/src/interpret/visitor.rs +++ b/compiler/rustc_const_eval/src/interpret/visitor.rs @@ -6,17 +6,18 @@ use rustc_middle::mir::interpret::InterpResult; use rustc_middle::ty::{self, Ty}; use rustc_target::abi::FieldIdx; use rustc_target::abi::{FieldsShape, VariantIdx, Variants}; +use tracing::trace; use std::num::NonZero; use super::{throw_inval, InterpCx, MPlaceTy, Machine, Projectable}; /// How to traverse a value and what to do when we are at the leaves. -pub trait ValueVisitor<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized { +pub trait ValueVisitor<'tcx, M: Machine<'tcx>>: Sized { type V: Projectable<'tcx, M::Provenance> + From<MPlaceTy<'tcx, M::Provenance>>; /// The visitor must have an `InterpCx` in it. - fn ecx(&self) -> &InterpCx<'mir, 'tcx, M>; + fn ecx(&self) -> &InterpCx<'tcx, M>; /// `read_discriminant` can be hooked for better error messages. #[inline(always)] diff --git a/compiler/rustc_const_eval/src/lib.rs b/compiler/rustc_const_eval/src/lib.rs index a525b838afa..3a7c87c1cad 100644 --- a/compiler/rustc_const_eval/src/lib.rs +++ b/compiler/rustc_const_eval/src/lib.rs @@ -1,9 +1,3 @@ -/*! - -Rust MIR: a lowered representation of Rust. - -*/ - #![allow(internal_features)] #![allow(rustc::diagnostic_outside_of_impl)] #![feature(rustdoc_internals)] @@ -20,13 +14,10 @@ Rust MIR: a lowered representation of Rust. #![feature(yeet_expr)] #![feature(if_let_guard)] -#[macro_use] -extern crate tracing; - +pub mod check_consts; pub mod const_eval; mod errors; pub mod interpret; -pub mod transform; pub mod util; use std::sync::atomic::AtomicBool; diff --git a/compiler/rustc_const_eval/src/transform/mod.rs b/compiler/rustc_const_eval/src/transform/mod.rs deleted file mode 100644 index e3582c7d317..00000000000 --- a/compiler/rustc_const_eval/src/transform/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod check_consts; -pub mod validate; diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs deleted file mode 100644 index 3a2b2c5f300..00000000000 --- a/compiler/rustc_const_eval/src/transform/validate.rs +++ /dev/null @@ -1,1397 +0,0 @@ -//! Validates the MIR to ensure that invariants are upheld. - -use rustc_data_structures::fx::{FxHashMap, FxHashSet}; -use rustc_index::bit_set::BitSet; -use rustc_index::IndexVec; -use rustc_infer::traits::Reveal; -use rustc_middle::mir::coverage::CoverageKind; -use rustc_middle::mir::interpret::Scalar; -use rustc_middle::mir::visit::{NonUseContext, PlaceContext, Visitor}; -use rustc_middle::mir::*; -use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeVisitableExt, Variance}; -use rustc_middle::{bug, span_bug}; -use rustc_target::abi::{Size, FIRST_VARIANT}; -use rustc_target::spec::abi::Abi; - -use crate::util::is_within_packed; - -use crate::util::relate_types; - -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -enum EdgeKind { - Unwind, - Normal, -} - -pub struct Validator { - /// Describes at which point in the pipeline this validation is happening. - pub when: String, - /// The phase for which we are upholding the dialect. If the given phase forbids a specific - /// element, this validator will now emit errors if that specific element is encountered. - /// Note that phases that change the dialect cause all *following* phases to check the - /// invariants of the new dialect. A phase that changes dialects never checks the new invariants - /// itself. - pub mir_phase: MirPhase, -} - -impl<'tcx> MirPass<'tcx> for Validator { - fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - // FIXME(JakobDegen): These bodies never instantiated in codegend anyway, so it's not - // terribly important that they pass the validator. However, I think other passes might - // still see them, in which case they might be surprised. It would probably be better if we - // didn't put this through the MIR pipeline at all. - if matches!(body.source.instance, InstanceDef::Intrinsic(..) | InstanceDef::Virtual(..)) { - return; - } - let def_id = body.source.def_id(); - let mir_phase = self.mir_phase; - let param_env = match mir_phase.reveal() { - Reveal::UserFacing => tcx.param_env(def_id), - Reveal::All => tcx.param_env_reveal_all_normalized(def_id), - }; - - let can_unwind = if mir_phase <= MirPhase::Runtime(RuntimePhase::Initial) { - // In this case `AbortUnwindingCalls` haven't yet been executed. - true - } else if !tcx.def_kind(def_id).is_fn_like() { - true - } else { - let body_ty = tcx.type_of(def_id).skip_binder(); - let body_abi = match body_ty.kind() { - ty::FnDef(..) => body_ty.fn_sig(tcx).abi(), - ty::Closure(..) => Abi::RustCall, - ty::CoroutineClosure(..) => Abi::RustCall, - ty::Coroutine(..) => Abi::Rust, - // No need to do MIR validation on error bodies - ty::Error(_) => return, - _ => { - span_bug!(body.span, "unexpected body ty: {:?} phase {:?}", body_ty, mir_phase) - } - }; - - ty::layout::fn_can_unwind(tcx, Some(def_id), body_abi) - }; - - let mut cfg_checker = CfgChecker { - when: &self.when, - body, - tcx, - mir_phase, - unwind_edge_count: 0, - reachable_blocks: traversal::reachable_as_bitset(body), - value_cache: FxHashSet::default(), - can_unwind, - }; - cfg_checker.visit_body(body); - cfg_checker.check_cleanup_control_flow(); - - // Also run the TypeChecker. - for (location, msg) in validate_types(tcx, self.mir_phase, param_env, body, body) { - cfg_checker.fail(location, msg); - } - - if let MirPhase::Runtime(_) = body.phase { - if let ty::InstanceDef::Item(_) = body.source.instance { - if body.has_free_regions() { - cfg_checker.fail( - Location::START, - format!("Free regions in optimized {} MIR", body.phase.name()), - ); - } - } - } - - // Enforce that coroutine-closure layouts are identical. - if let Some(layout) = body.coroutine_layout_raw() - && let Some(by_move_body) = body.coroutine_by_move_body() - && let Some(by_move_layout) = by_move_body.coroutine_layout_raw() - { - // FIXME(async_closures): We could do other validation here? - if layout.variant_fields.len() != by_move_layout.variant_fields.len() { - cfg_checker.fail( - Location::START, - format!( - "Coroutine layout has different number of variant fields from \ - by-move coroutine layout:\n\ - layout: {layout:#?}\n\ - by_move_layout: {by_move_layout:#?}", - ), - ); - } - } - } -} - -struct CfgChecker<'a, 'tcx> { - when: &'a str, - body: &'a Body<'tcx>, - tcx: TyCtxt<'tcx>, - mir_phase: MirPhase, - unwind_edge_count: usize, - reachable_blocks: BitSet<BasicBlock>, - value_cache: FxHashSet<u128>, - // If `false`, then the MIR must not contain `UnwindAction::Continue` or - // `TerminatorKind::Resume`. - can_unwind: bool, -} - -impl<'a, 'tcx> CfgChecker<'a, 'tcx> { - #[track_caller] - fn fail(&self, location: Location, msg: impl AsRef<str>) { - // We might see broken MIR when other errors have already occurred. - assert!( - self.tcx.dcx().has_errors().is_some(), - "broken MIR in {:?} ({}) at {:?}:\n{}", - self.body.source.instance, - self.when, - location, - msg.as_ref(), - ); - } - - fn check_edge(&mut self, location: Location, bb: BasicBlock, edge_kind: EdgeKind) { - if bb == START_BLOCK { - self.fail(location, "start block must not have predecessors") - } - if let Some(bb) = self.body.basic_blocks.get(bb) { - let src = self.body.basic_blocks.get(location.block).unwrap(); - match (src.is_cleanup, bb.is_cleanup, edge_kind) { - // Non-cleanup blocks can jump to non-cleanup blocks along non-unwind edges - (false, false, EdgeKind::Normal) - // Cleanup blocks can jump to cleanup blocks along non-unwind edges - | (true, true, EdgeKind::Normal) => {} - // Non-cleanup blocks can jump to cleanup blocks along unwind edges - (false, true, EdgeKind::Unwind) => { - self.unwind_edge_count += 1; - } - // All other jumps are invalid - _ => { - self.fail( - location, - format!( - "{:?} edge to {:?} violates unwind invariants (cleanup {:?} -> {:?})", - edge_kind, - bb, - src.is_cleanup, - bb.is_cleanup, - ) - ) - } - } - } else { - self.fail(location, format!("encountered jump to invalid basic block {bb:?}")) - } - } - - fn check_cleanup_control_flow(&self) { - if self.unwind_edge_count <= 1 { - return; - } - let doms = self.body.basic_blocks.dominators(); - let mut post_contract_node = FxHashMap::default(); - // Reusing the allocation across invocations of the closure - let mut dom_path = vec![]; - let mut get_post_contract_node = |mut bb| { - let root = loop { - if let Some(root) = post_contract_node.get(&bb) { - break *root; - } - let parent = doms.immediate_dominator(bb).unwrap(); - dom_path.push(bb); - if !self.body.basic_blocks[parent].is_cleanup { - break bb; - } - bb = parent; - }; - for bb in dom_path.drain(..) { - post_contract_node.insert(bb, root); - } - root - }; - - let mut parent = IndexVec::from_elem(None, &self.body.basic_blocks); - for (bb, bb_data) in self.body.basic_blocks.iter_enumerated() { - if !bb_data.is_cleanup || !self.reachable_blocks.contains(bb) { - continue; - } - let bb = get_post_contract_node(bb); - for s in bb_data.terminator().successors() { - let s = get_post_contract_node(s); - if s == bb { - continue; - } - let parent = &mut parent[bb]; - match parent { - None => { - *parent = Some(s); - } - Some(e) if *e == s => (), - Some(e) => self.fail( - Location { block: bb, statement_index: 0 }, - format!( - "Cleanup control flow violation: The blocks dominated by {:?} have edges to both {:?} and {:?}", - bb, - s, - *e - ) - ), - } - } - } - - // Check for cycles - let mut stack = FxHashSet::default(); - for i in 0..parent.len() { - let mut bb = BasicBlock::from_usize(i); - stack.clear(); - stack.insert(bb); - loop { - let Some(parent) = parent[bb].take() else { break }; - let no_cycle = stack.insert(parent); - if !no_cycle { - self.fail( - Location { block: bb, statement_index: 0 }, - format!( - "Cleanup control flow violation: Cycle involving edge {bb:?} -> {parent:?}", - ), - ); - break; - } - bb = parent; - } - } - } - - fn check_unwind_edge(&mut self, location: Location, unwind: UnwindAction) { - let is_cleanup = self.body.basic_blocks[location.block].is_cleanup; - match unwind { - UnwindAction::Cleanup(unwind) => { - if is_cleanup { - self.fail(location, "`UnwindAction::Cleanup` in cleanup block"); - } - self.check_edge(location, unwind, EdgeKind::Unwind); - } - UnwindAction::Continue => { - if is_cleanup { - self.fail(location, "`UnwindAction::Continue` in cleanup block"); - } - - if !self.can_unwind { - self.fail(location, "`UnwindAction::Continue` in no-unwind function"); - } - } - UnwindAction::Terminate(UnwindTerminateReason::InCleanup) => { - if !is_cleanup { - self.fail( - location, - "`UnwindAction::Terminate(InCleanup)` in a non-cleanup block", - ); - } - } - // These are allowed everywhere. - UnwindAction::Unreachable | UnwindAction::Terminate(UnwindTerminateReason::Abi) => (), - } - } - - fn is_critical_call_edge(&self, target: Option<BasicBlock>, unwind: UnwindAction) -> bool { - let Some(target) = target else { return false }; - matches!(unwind, UnwindAction::Cleanup(_) | UnwindAction::Terminate(_)) - && self.body.basic_blocks.predecessors()[target].len() > 1 - } -} - -impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> { - fn visit_local(&mut self, local: Local, _context: PlaceContext, location: Location) { - if self.body.local_decls.get(local).is_none() { - self.fail( - location, - format!("local {local:?} has no corresponding declaration in `body.local_decls`"), - ); - } - } - - fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) { - match &statement.kind { - StatementKind::AscribeUserType(..) => { - if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) { - self.fail( - location, - "`AscribeUserType` should have been removed after drop lowering phase", - ); - } - } - StatementKind::FakeRead(..) => { - if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) { - self.fail( - location, - "`FakeRead` should have been removed after drop lowering phase", - ); - } - } - StatementKind::SetDiscriminant { .. } => { - if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) { - self.fail(location, "`SetDiscriminant`is not allowed until deaggregation"); - } - } - StatementKind::Deinit(..) => { - if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) { - self.fail(location, "`Deinit`is not allowed until deaggregation"); - } - } - StatementKind::Retag(kind, _) => { - // FIXME(JakobDegen) The validator should check that `self.mir_phase < - // DropsLowered`. However, this causes ICEs with generation of drop shims, which - // seem to fail to set their `MirPhase` correctly. - if matches!(kind, RetagKind::TwoPhase) { - self.fail(location, format!("explicit `{kind:?}` is forbidden")); - } - } - StatementKind::Coverage(kind) => { - if self.mir_phase >= MirPhase::Analysis(AnalysisPhase::PostCleanup) - && let CoverageKind::BlockMarker { .. } | CoverageKind::SpanMarker { .. } = kind - { - self.fail( - location, - format!("{kind:?} should have been removed after analysis"), - ); - } - } - StatementKind::Assign(..) - | StatementKind::StorageLive(_) - | StatementKind::StorageDead(_) - | StatementKind::Intrinsic(_) - | StatementKind::ConstEvalCounter - | StatementKind::PlaceMention(..) - | StatementKind::Nop => {} - } - - self.super_statement(statement, location); - } - - fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) { - match &terminator.kind { - TerminatorKind::Goto { target } => { - self.check_edge(location, *target, EdgeKind::Normal); - } - TerminatorKind::SwitchInt { targets, discr: _ } => { - for (_, target) in targets.iter() { - self.check_edge(location, target, EdgeKind::Normal); - } - self.check_edge(location, targets.otherwise(), EdgeKind::Normal); - - self.value_cache.clear(); - self.value_cache.extend(targets.iter().map(|(value, _)| value)); - let has_duplicates = targets.iter().len() != self.value_cache.len(); - if has_duplicates { - self.fail( - location, - format!( - "duplicated values in `SwitchInt` terminator: {:?}", - terminator.kind, - ), - ); - } - } - TerminatorKind::Drop { target, unwind, .. } => { - self.check_edge(location, *target, EdgeKind::Normal); - self.check_unwind_edge(location, *unwind); - } - TerminatorKind::Call { args, destination, target, unwind, .. } => { - if let Some(target) = target { - self.check_edge(location, *target, EdgeKind::Normal); - } - self.check_unwind_edge(location, *unwind); - - // The code generation assumes that there are no critical call edges. The assumption - // is used to simplify inserting code that should be executed along the return edge - // from the call. FIXME(tmiasko): Since this is a strictly code generation concern, - // the code generation should be responsible for handling it. - if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Optimized) - && self.is_critical_call_edge(*target, *unwind) - { - self.fail( - location, - format!( - "encountered critical edge in `Call` terminator {:?}", - terminator.kind, - ), - ); - } - - // The call destination place and Operand::Move place used as an argument might be - // passed by a reference to the callee. Consequently they cannot be packed. - if is_within_packed(self.tcx, &self.body.local_decls, *destination).is_some() { - // This is bad! The callee will expect the memory to be aligned. - self.fail( - location, - format!( - "encountered packed place in `Call` terminator destination: {:?}", - terminator.kind, - ), - ); - } - for arg in args { - if let Operand::Move(place) = &arg.node { - if is_within_packed(self.tcx, &self.body.local_decls, *place).is_some() { - // This is bad! The callee will expect the memory to be aligned. - self.fail( - location, - format!( - "encountered `Move` of a packed place in `Call` terminator: {:?}", - terminator.kind, - ), - ); - } - } - } - } - TerminatorKind::Assert { target, unwind, .. } => { - self.check_edge(location, *target, EdgeKind::Normal); - self.check_unwind_edge(location, *unwind); - } - TerminatorKind::Yield { resume, drop, .. } => { - if self.body.coroutine.is_none() { - self.fail(location, "`Yield` cannot appear outside coroutine bodies"); - } - if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) { - self.fail(location, "`Yield` should have been replaced by coroutine lowering"); - } - self.check_edge(location, *resume, EdgeKind::Normal); - if let Some(drop) = drop { - self.check_edge(location, *drop, EdgeKind::Normal); - } - } - TerminatorKind::FalseEdge { real_target, imaginary_target } => { - if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) { - self.fail( - location, - "`FalseEdge` should have been removed after drop elaboration", - ); - } - self.check_edge(location, *real_target, EdgeKind::Normal); - self.check_edge(location, *imaginary_target, EdgeKind::Normal); - } - TerminatorKind::FalseUnwind { real_target, unwind } => { - if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) { - self.fail( - location, - "`FalseUnwind` should have been removed after drop elaboration", - ); - } - self.check_edge(location, *real_target, EdgeKind::Normal); - self.check_unwind_edge(location, *unwind); - } - TerminatorKind::InlineAsm { targets, unwind, .. } => { - for &target in targets { - self.check_edge(location, target, EdgeKind::Normal); - } - self.check_unwind_edge(location, *unwind); - } - TerminatorKind::CoroutineDrop => { - if self.body.coroutine.is_none() { - self.fail(location, "`CoroutineDrop` cannot appear outside coroutine bodies"); - } - if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) { - self.fail( - location, - "`CoroutineDrop` should have been replaced by coroutine lowering", - ); - } - } - TerminatorKind::UnwindResume => { - let bb = location.block; - if !self.body.basic_blocks[bb].is_cleanup { - self.fail(location, "Cannot `UnwindResume` from non-cleanup basic block") - } - if !self.can_unwind { - self.fail(location, "Cannot `UnwindResume` in a function that cannot unwind") - } - } - TerminatorKind::UnwindTerminate(_) => { - let bb = location.block; - if !self.body.basic_blocks[bb].is_cleanup { - self.fail(location, "Cannot `UnwindTerminate` from non-cleanup basic block") - } - } - TerminatorKind::Return => { - let bb = location.block; - if self.body.basic_blocks[bb].is_cleanup { - self.fail(location, "Cannot `Return` from cleanup basic block") - } - } - TerminatorKind::Unreachable => {} - } - - self.super_terminator(terminator, location); - } - - fn visit_source_scope(&mut self, scope: SourceScope) { - if self.body.source_scopes.get(scope).is_none() { - self.tcx.dcx().span_bug( - self.body.span, - format!( - "broken MIR in {:?} ({}):\ninvalid source scope {:?}", - self.body.source.instance, self.when, scope, - ), - ); - } - } -} - -/// A faster version of the validation pass that only checks those things which may break when -/// instantiating any generic parameters. -/// -/// `caller_body` is used to detect cycles in MIR inlining and MIR validation before -/// `optimized_mir` is available. -pub fn validate_types<'tcx>( - tcx: TyCtxt<'tcx>, - mir_phase: MirPhase, - param_env: ty::ParamEnv<'tcx>, - body: &Body<'tcx>, - caller_body: &Body<'tcx>, -) -> Vec<(Location, String)> { - let mut type_checker = - TypeChecker { body, caller_body, tcx, param_env, mir_phase, failures: Vec::new() }; - type_checker.visit_body(body); - type_checker.failures -} - -struct TypeChecker<'a, 'tcx> { - body: &'a Body<'tcx>, - caller_body: &'a Body<'tcx>, - tcx: TyCtxt<'tcx>, - param_env: ParamEnv<'tcx>, - mir_phase: MirPhase, - failures: Vec<(Location, String)>, -} - -impl<'a, 'tcx> TypeChecker<'a, 'tcx> { - fn fail(&mut self, location: Location, msg: impl Into<String>) { - self.failures.push((location, msg.into())); - } - - /// Check if src can be assigned into dest. - /// This is not precise, it will accept some incorrect assignments. - fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool { - // Fast path before we normalize. - if src == dest { - // Equal types, all is good. - return true; - } - - // We sometimes have to use `defining_opaque_types` for subtyping - // to succeed here and figuring out how exactly that should work - // is annoying. It is harmless enough to just not validate anything - // in that case. We still check this after analysis as all opaque - // types have been revealed at this point. - if (src, dest).has_opaque_types() { - return true; - } - - // After borrowck subtyping should be fully explicit via - // `Subtype` projections. - let variance = if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) { - Variance::Invariant - } else { - Variance::Covariant - }; - - crate::util::relate_types(self.tcx, self.param_env, variance, src, dest) - } -} - -impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { - fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) { - // This check is somewhat expensive, so only run it when -Zvalidate-mir is passed. - if self.tcx.sess.opts.unstable_opts.validate_mir - && self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) - { - // `Operand::Copy` is only supposed to be used with `Copy` types. - if let Operand::Copy(place) = operand { - let ty = place.ty(&self.body.local_decls, self.tcx).ty; - - if !ty.is_copy_modulo_regions(self.tcx, self.param_env) { - self.fail(location, format!("`Operand::Copy` with non-`Copy` type {ty}")); - } - } - } - - self.super_operand(operand, location); - } - - fn visit_projection_elem( - &mut self, - place_ref: PlaceRef<'tcx>, - elem: PlaceElem<'tcx>, - context: PlaceContext, - location: Location, - ) { - match elem { - ProjectionElem::OpaqueCast(ty) - if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) => - { - self.fail( - location, - format!("explicit opaque type cast to `{ty}` after `RevealAll`"), - ) - } - ProjectionElem::Index(index) => { - let index_ty = self.body.local_decls[index].ty; - if index_ty != self.tcx.types.usize { - self.fail(location, format!("bad index ({index_ty:?} != usize)")) - } - } - ProjectionElem::Deref - if self.mir_phase >= MirPhase::Runtime(RuntimePhase::PostCleanup) => - { - let base_ty = place_ref.ty(&self.body.local_decls, self.tcx).ty; - - if base_ty.is_box() { - self.fail( - location, - format!("{base_ty:?} dereferenced after ElaborateBoxDerefs"), - ) - } - } - ProjectionElem::Field(f, ty) => { - let parent_ty = place_ref.ty(&self.body.local_decls, self.tcx); - let fail_out_of_bounds = |this: &mut Self, location| { - this.fail(location, format!("Out of bounds field {f:?} for {parent_ty:?}")); - }; - let check_equal = |this: &mut Self, location, f_ty| { - if !this.mir_assign_valid_types(ty, f_ty) { - this.fail( - location, - format!( - "Field projection `{place_ref:?}.{f:?}` specified type `{ty:?}`, but actual type is `{f_ty:?}`" - ) - ) - } - }; - - let kind = match parent_ty.ty.kind() { - &ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => { - self.tcx.type_of(def_id).instantiate(self.tcx, args).kind() - } - kind => kind, - }; - - match kind { - ty::Tuple(fields) => { - let Some(f_ty) = fields.get(f.as_usize()) else { - fail_out_of_bounds(self, location); - return; - }; - check_equal(self, location, *f_ty); - } - ty::Adt(adt_def, args) => { - let var = parent_ty.variant_index.unwrap_or(FIRST_VARIANT); - let Some(field) = adt_def.variant(var).fields.get(f) else { - fail_out_of_bounds(self, location); - return; - }; - check_equal(self, location, field.ty(self.tcx, args)); - } - ty::Closure(_, args) => { - let args = args.as_closure(); - let Some(&f_ty) = args.upvar_tys().get(f.as_usize()) else { - fail_out_of_bounds(self, location); - return; - }; - check_equal(self, location, f_ty); - } - ty::CoroutineClosure(_, args) => { - let args = args.as_coroutine_closure(); - let Some(&f_ty) = args.upvar_tys().get(f.as_usize()) else { - fail_out_of_bounds(self, location); - return; - }; - check_equal(self, location, f_ty); - } - &ty::Coroutine(def_id, args) => { - let f_ty = if let Some(var) = parent_ty.variant_index { - // If we're currently validating an inlined copy of this body, - // then it will no longer be parameterized over the original - // args of the coroutine. Otherwise, we prefer to use this body - // since we may be in the process of computing this MIR in the - // first place. - let layout = if def_id == self.caller_body.source.def_id() { - // FIXME: This is not right for async closures. - self.caller_body.coroutine_layout_raw() - } else { - self.tcx.coroutine_layout(def_id, args.as_coroutine().kind_ty()) - }; - - let Some(layout) = layout else { - self.fail( - location, - format!("No coroutine layout for {parent_ty:?}"), - ); - return; - }; - - let Some(&local) = layout.variant_fields[var].get(f) else { - fail_out_of_bounds(self, location); - return; - }; - - let Some(f_ty) = layout.field_tys.get(local) else { - self.fail( - location, - format!("Out of bounds local {local:?} for {parent_ty:?}"), - ); - return; - }; - - ty::EarlyBinder::bind(f_ty.ty).instantiate(self.tcx, args) - } else { - let Some(&f_ty) = args.as_coroutine().prefix_tys().get(f.index()) - else { - fail_out_of_bounds(self, location); - return; - }; - - f_ty - }; - - check_equal(self, location, f_ty); - } - _ => { - self.fail(location, format!("{:?} does not have fields", parent_ty.ty)); - } - } - } - ProjectionElem::Subtype(ty) => { - if !relate_types( - self.tcx, - self.param_env, - Variance::Covariant, - ty, - place_ref.ty(&self.body.local_decls, self.tcx).ty, - ) { - self.fail( - location, - format!( - "Failed subtyping {ty:#?} and {:#?}", - place_ref.ty(&self.body.local_decls, self.tcx).ty - ), - ) - } - } - _ => {} - } - self.super_projection_elem(place_ref, elem, context, location); - } - - fn visit_var_debug_info(&mut self, debuginfo: &VarDebugInfo<'tcx>) { - if let Some(box VarDebugInfoFragment { ty, ref projection }) = debuginfo.composite { - if ty.is_union() || ty.is_enum() { - self.fail( - START_BLOCK.start_location(), - format!("invalid type {ty:?} in debuginfo for {:?}", debuginfo.name), - ); - } - if projection.is_empty() { - self.fail( - START_BLOCK.start_location(), - format!("invalid empty projection in debuginfo for {:?}", debuginfo.name), - ); - } - if projection.iter().any(|p| !matches!(p, PlaceElem::Field(..))) { - self.fail( - START_BLOCK.start_location(), - format!( - "illegal projection {:?} in debuginfo for {:?}", - projection, debuginfo.name - ), - ); - } - } - match debuginfo.value { - VarDebugInfoContents::Const(_) => {} - VarDebugInfoContents::Place(place) => { - if place.projection.iter().any(|p| !p.can_use_in_debuginfo()) { - self.fail( - START_BLOCK.start_location(), - format!("illegal place {:?} in debuginfo for {:?}", place, debuginfo.name), - ); - } - } - } - self.super_var_debug_info(debuginfo); - } - - fn visit_place(&mut self, place: &Place<'tcx>, cntxt: PlaceContext, location: Location) { - // Set off any `bug!`s in the type computation code - let _ = place.ty(&self.body.local_decls, self.tcx); - - if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) - && place.projection.len() > 1 - && cntxt != PlaceContext::NonUse(NonUseContext::VarDebugInfo) - && place.projection[1..].contains(&ProjectionElem::Deref) - { - self.fail(location, format!("{place:?}, has deref at the wrong place")); - } - - self.super_place(place, cntxt, location); - } - - fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) { - macro_rules! check_kinds { - ($t:expr, $text:literal, $typat:pat) => { - if !matches!(($t).kind(), $typat) { - self.fail(location, format!($text, $t)); - } - }; - } - match rvalue { - Rvalue::Use(_) | Rvalue::CopyForDeref(_) => {} - Rvalue::Aggregate(kind, fields) => match **kind { - AggregateKind::Tuple => {} - AggregateKind::Array(dest) => { - for src in fields { - if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) { - self.fail(location, "array field has the wrong type"); - } - } - } - AggregateKind::Adt(def_id, idx, args, _, Some(field)) => { - let adt_def = self.tcx.adt_def(def_id); - assert!(adt_def.is_union()); - assert_eq!(idx, FIRST_VARIANT); - let dest_ty = self.tcx.normalize_erasing_regions( - self.param_env, - adt_def.non_enum_variant().fields[field].ty(self.tcx, args), - ); - if fields.len() == 1 { - let src_ty = fields.raw[0].ty(self.body, self.tcx); - if !self.mir_assign_valid_types(src_ty, dest_ty) { - self.fail(location, "union field has the wrong type"); - } - } else { - self.fail(location, "unions should have one initialized field"); - } - } - AggregateKind::Adt(def_id, idx, args, _, None) => { - let adt_def = self.tcx.adt_def(def_id); - assert!(!adt_def.is_union()); - let variant = &adt_def.variants()[idx]; - if variant.fields.len() != fields.len() { - self.fail(location, "adt has the wrong number of initialized fields"); - } - for (src, dest) in std::iter::zip(fields, &variant.fields) { - let dest_ty = self - .tcx - .normalize_erasing_regions(self.param_env, dest.ty(self.tcx, args)); - if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest_ty) { - self.fail(location, "adt field has the wrong type"); - } - } - } - AggregateKind::Closure(_, args) => { - let upvars = args.as_closure().upvar_tys(); - if upvars.len() != fields.len() { - self.fail(location, "closure has the wrong number of initialized fields"); - } - for (src, dest) in std::iter::zip(fields, upvars) { - if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) { - self.fail(location, "closure field has the wrong type"); - } - } - } - AggregateKind::Coroutine(_, args) => { - let upvars = args.as_coroutine().upvar_tys(); - if upvars.len() != fields.len() { - self.fail(location, "coroutine has the wrong number of initialized fields"); - } - for (src, dest) in std::iter::zip(fields, upvars) { - if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) { - self.fail(location, "coroutine field has the wrong type"); - } - } - } - AggregateKind::CoroutineClosure(_, args) => { - let upvars = args.as_coroutine_closure().upvar_tys(); - if upvars.len() != fields.len() { - self.fail( - location, - "coroutine-closure has the wrong number of initialized fields", - ); - } - for (src, dest) in std::iter::zip(fields, upvars) { - if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) { - self.fail(location, "coroutine-closure field has the wrong type"); - } - } - } - AggregateKind::RawPtr(pointee_ty, mutability) => { - if !matches!(self.mir_phase, MirPhase::Runtime(_)) { - // It would probably be fine to support this in earlier phases, - // but at the time of writing it's only ever introduced from intrinsic lowering, - // so earlier things just `bug!` on it. - self.fail(location, "RawPtr should be in runtime MIR only"); - } - - if fields.len() != 2 { - self.fail(location, "raw pointer aggregate must have 2 fields"); - } else { - let data_ptr_ty = fields.raw[0].ty(self.body, self.tcx); - let metadata_ty = fields.raw[1].ty(self.body, self.tcx); - if let ty::RawPtr(in_pointee, in_mut) = data_ptr_ty.kind() { - if *in_mut != mutability { - self.fail(location, "input and output mutability must match"); - } - - // FIXME: check `Thin` instead of `Sized` - if !in_pointee.is_sized(self.tcx, self.param_env) { - self.fail(location, "input pointer must be thin"); - } - } else { - self.fail( - location, - "first operand to raw pointer aggregate must be a raw pointer", - ); - } - - // FIXME: Check metadata more generally - if pointee_ty.is_slice() { - if !self.mir_assign_valid_types(metadata_ty, self.tcx.types.usize) { - self.fail(location, "slice metadata must be usize"); - } - } else if pointee_ty.is_sized(self.tcx, self.param_env) { - if metadata_ty != self.tcx.types.unit { - self.fail(location, "metadata for pointer-to-thin must be unit"); - } - } - } - } - }, - Rvalue::Ref(_, BorrowKind::Fake(_), _) => { - if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) { - self.fail( - location, - "`Assign` statement with a `Fake` borrow should have been removed in runtime MIR", - ); - } - } - Rvalue::Ref(..) => {} - Rvalue::Len(p) => { - let pty = p.ty(&self.body.local_decls, self.tcx).ty; - check_kinds!( - pty, - "Cannot compute length of non-array type {:?}", - ty::Array(..) | ty::Slice(..) - ); - } - Rvalue::BinaryOp(op, vals) => { - use BinOp::*; - let a = vals.0.ty(&self.body.local_decls, self.tcx); - let b = vals.1.ty(&self.body.local_decls, self.tcx); - if crate::util::binop_right_homogeneous(*op) { - if let Eq | Lt | Le | Ne | Ge | Gt = op { - // The function pointer types can have lifetimes - if !self.mir_assign_valid_types(a, b) { - self.fail( - location, - format!("Cannot {op:?} compare incompatible types {a:?} and {b:?}"), - ); - } - } else if a != b { - self.fail( - location, - format!( - "Cannot perform binary op {op:?} on unequal types {a:?} and {b:?}" - ), - ); - } - } - - match op { - Offset => { - check_kinds!(a, "Cannot offset non-pointer type {:?}", ty::RawPtr(..)); - if b != self.tcx.types.isize && b != self.tcx.types.usize { - self.fail(location, format!("Cannot offset by non-isize type {b:?}")); - } - } - Eq | Lt | Le | Ne | Ge | Gt => { - for x in [a, b] { - check_kinds!( - x, - "Cannot {op:?} compare type {:?}", - ty::Bool - | ty::Char - | ty::Int(..) - | ty::Uint(..) - | ty::Float(..) - | ty::RawPtr(..) - | ty::FnPtr(..) - ) - } - } - Cmp => { - for x in [a, b] { - check_kinds!( - x, - "Cannot three-way compare non-integer type {:?}", - ty::Char | ty::Uint(..) | ty::Int(..) - ) - } - } - AddUnchecked | AddWithOverflow | SubUnchecked | SubWithOverflow - | MulUnchecked | MulWithOverflow | Shl | ShlUnchecked | Shr | ShrUnchecked => { - for x in [a, b] { - check_kinds!( - x, - "Cannot {op:?} non-integer type {:?}", - ty::Uint(..) | ty::Int(..) - ) - } - } - BitAnd | BitOr | BitXor => { - for x in [a, b] { - check_kinds!( - x, - "Cannot perform bitwise op {op:?} on type {:?}", - ty::Uint(..) | ty::Int(..) | ty::Bool - ) - } - } - Add | Sub | Mul | Div | Rem => { - for x in [a, b] { - check_kinds!( - x, - "Cannot perform arithmetic {op:?} on type {:?}", - ty::Uint(..) | ty::Int(..) | ty::Float(..) - ) - } - } - } - } - Rvalue::UnaryOp(op, operand) => { - let a = operand.ty(&self.body.local_decls, self.tcx); - match op { - UnOp::Neg => { - check_kinds!(a, "Cannot negate type {:?}", ty::Int(..) | ty::Float(..)) - } - UnOp::Not => { - check_kinds!( - a, - "Cannot binary not type {:?}", - ty::Int(..) | ty::Uint(..) | ty::Bool - ); - } - } - } - Rvalue::ShallowInitBox(operand, _) => { - let a = operand.ty(&self.body.local_decls, self.tcx); - check_kinds!(a, "Cannot shallow init type {:?}", ty::RawPtr(..)); - } - Rvalue::Cast(kind, operand, target_type) => { - let op_ty = operand.ty(self.body, self.tcx); - match kind { - CastKind::DynStar => { - // FIXME(dyn-star): make sure nothing needs to be done here. - } - // FIXME: Add Checks for these - CastKind::PointerWithExposedProvenance - | CastKind::PointerExposeProvenance - | CastKind::PointerCoercion(_) => {} - CastKind::IntToInt | CastKind::IntToFloat => { - let input_valid = op_ty.is_integral() || op_ty.is_char() || op_ty.is_bool(); - let target_valid = target_type.is_numeric() || target_type.is_char(); - if !input_valid || !target_valid { - self.fail( - location, - format!("Wrong cast kind {kind:?} for the type {op_ty}",), - ); - } - } - CastKind::FnPtrToPtr | CastKind::PtrToPtr => { - if !(op_ty.is_any_ptr() && target_type.is_unsafe_ptr()) { - self.fail(location, "Can't cast {op_ty} into 'Ptr'"); - } - } - CastKind::FloatToFloat | CastKind::FloatToInt => { - if !op_ty.is_floating_point() || !target_type.is_numeric() { - self.fail( - location, - format!( - "Trying to cast non 'Float' as {kind:?} into {target_type:?}" - ), - ); - } - } - CastKind::Transmute => { - if let MirPhase::Runtime(..) = self.mir_phase { - // Unlike `mem::transmute`, a MIR `Transmute` is well-formed - // for any two `Sized` types, just potentially UB to run. - - if !self - .tcx - .normalize_erasing_regions(self.param_env, op_ty) - .is_sized(self.tcx, self.param_env) - { - self.fail( - location, - format!("Cannot transmute from non-`Sized` type {op_ty:?}"), - ); - } - if !self - .tcx - .normalize_erasing_regions(self.param_env, *target_type) - .is_sized(self.tcx, self.param_env) - { - self.fail( - location, - format!("Cannot transmute to non-`Sized` type {target_type:?}"), - ); - } - } else { - self.fail( - location, - format!( - "Transmute is not supported in non-runtime phase {:?}.", - self.mir_phase - ), - ); - } - } - } - } - Rvalue::NullaryOp(NullOp::OffsetOf(indices), container) => { - let fail_out_of_bounds = |this: &mut Self, location, field, ty| { - this.fail(location, format!("Out of bounds field {field:?} for {ty:?}")); - }; - - let mut current_ty = *container; - - for (variant, field) in indices.iter() { - match current_ty.kind() { - ty::Tuple(fields) => { - if variant != FIRST_VARIANT { - self.fail( - location, - format!("tried to get variant {variant:?} of tuple"), - ); - return; - } - let Some(&f_ty) = fields.get(field.as_usize()) else { - fail_out_of_bounds(self, location, field, current_ty); - return; - }; - - current_ty = self.tcx.normalize_erasing_regions(self.param_env, f_ty); - } - ty::Adt(adt_def, args) => { - let Some(field) = adt_def.variant(variant).fields.get(field) else { - fail_out_of_bounds(self, location, field, current_ty); - return; - }; - - let f_ty = field.ty(self.tcx, args); - current_ty = self.tcx.normalize_erasing_regions(self.param_env, f_ty); - } - _ => { - self.fail( - location, - format!("Cannot get offset ({variant:?}, {field:?}) from type {current_ty:?}"), - ); - return; - } - } - } - } - Rvalue::Repeat(_, _) - | Rvalue::ThreadLocalRef(_) - | Rvalue::AddressOf(_, _) - | Rvalue::NullaryOp(NullOp::SizeOf | NullOp::AlignOf | NullOp::UbChecks, _) - | Rvalue::Discriminant(_) => {} - } - self.super_rvalue(rvalue, location); - } - - fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) { - match &statement.kind { - StatementKind::Assign(box (dest, rvalue)) => { - // LHS and RHS of the assignment must have the same type. - let left_ty = dest.ty(&self.body.local_decls, self.tcx).ty; - let right_ty = rvalue.ty(&self.body.local_decls, self.tcx); - - if !self.mir_assign_valid_types(right_ty, left_ty) { - self.fail( - location, - format!( - "encountered `{:?}` with incompatible types:\n\ - left-hand side has type: {}\n\ - right-hand side has type: {}", - statement.kind, left_ty, right_ty, - ), - ); - } - if let Rvalue::CopyForDeref(place) = rvalue { - if place.ty(&self.body.local_decls, self.tcx).ty.builtin_deref(true).is_none() { - self.fail( - location, - "`CopyForDeref` should only be used for dereferenceable types", - ) - } - } - } - StatementKind::AscribeUserType(..) => { - if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) { - self.fail( - location, - "`AscribeUserType` should have been removed after drop lowering phase", - ); - } - } - StatementKind::FakeRead(..) => { - if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) { - self.fail( - location, - "`FakeRead` should have been removed after drop lowering phase", - ); - } - } - StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(op)) => { - let ty = op.ty(&self.body.local_decls, self.tcx); - if !ty.is_bool() { - self.fail( - location, - format!("`assume` argument must be `bool`, but got: `{ty}`"), - ); - } - } - StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping( - CopyNonOverlapping { src, dst, count }, - )) => { - let src_ty = src.ty(&self.body.local_decls, self.tcx); - let op_src_ty = if let Some(src_deref) = src_ty.builtin_deref(true) { - src_deref - } else { - self.fail( - location, - format!("Expected src to be ptr in copy_nonoverlapping, got: {src_ty}"), - ); - return; - }; - let dst_ty = dst.ty(&self.body.local_decls, self.tcx); - let op_dst_ty = if let Some(dst_deref) = dst_ty.builtin_deref(true) { - dst_deref - } else { - self.fail( - location, - format!("Expected dst to be ptr in copy_nonoverlapping, got: {dst_ty}"), - ); - return; - }; - // since CopyNonOverlapping is parametrized by 1 type, - // we only need to check that they are equal and not keep an extra parameter. - if !self.mir_assign_valid_types(op_src_ty, op_dst_ty) { - self.fail(location, format!("bad arg ({op_src_ty:?} != {op_dst_ty:?})")); - } - - let op_cnt_ty = count.ty(&self.body.local_decls, self.tcx); - if op_cnt_ty != self.tcx.types.usize { - self.fail(location, format!("bad arg ({op_cnt_ty:?} != usize)")) - } - } - StatementKind::SetDiscriminant { place, .. } => { - if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) { - self.fail(location, "`SetDiscriminant`is not allowed until deaggregation"); - } - let pty = place.ty(&self.body.local_decls, self.tcx).ty.kind(); - if !matches!(pty, ty::Adt(..) | ty::Coroutine(..) | ty::Alias(ty::Opaque, ..)) { - self.fail( - location, - format!( - "`SetDiscriminant` is only allowed on ADTs and coroutines, not {pty:?}" - ), - ); - } - } - StatementKind::Deinit(..) => { - if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) { - self.fail(location, "`Deinit`is not allowed until deaggregation"); - } - } - StatementKind::Retag(kind, _) => { - // FIXME(JakobDegen) The validator should check that `self.mir_phase < - // DropsLowered`. However, this causes ICEs with generation of drop shims, which - // seem to fail to set their `MirPhase` correctly. - if matches!(kind, RetagKind::TwoPhase) { - self.fail(location, format!("explicit `{kind:?}` is forbidden")); - } - } - StatementKind::StorageLive(_) - | StatementKind::StorageDead(_) - | StatementKind::Coverage(_) - | StatementKind::ConstEvalCounter - | StatementKind::PlaceMention(..) - | StatementKind::Nop => {} - } - - self.super_statement(statement, location); - } - - fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) { - match &terminator.kind { - TerminatorKind::SwitchInt { targets, discr } => { - let switch_ty = discr.ty(&self.body.local_decls, self.tcx); - - let target_width = self.tcx.sess.target.pointer_width; - - let size = Size::from_bits(match switch_ty.kind() { - ty::Uint(uint) => uint.normalize(target_width).bit_width().unwrap(), - ty::Int(int) => int.normalize(target_width).bit_width().unwrap(), - ty::Char => 32, - ty::Bool => 1, - other => bug!("unhandled type: {:?}", other), - }); - - for (value, _) in targets.iter() { - if Scalar::<()>::try_from_uint(value, size).is_none() { - self.fail( - location, - format!("the value {value:#x} is not a proper {switch_ty:?}"), - ) - } - } - } - TerminatorKind::Call { func, .. } => { - let func_ty = func.ty(&self.body.local_decls, self.tcx); - match func_ty.kind() { - ty::FnPtr(..) | ty::FnDef(..) => {} - _ => self.fail( - location, - format!("encountered non-callable type {func_ty} in `Call` terminator"), - ), - } - } - TerminatorKind::Assert { cond, .. } => { - let cond_ty = cond.ty(&self.body.local_decls, self.tcx); - if cond_ty != self.tcx.types.bool { - self.fail( - location, - format!( - "encountered non-boolean condition of type {cond_ty} in `Assert` terminator" - ), - ); - } - } - TerminatorKind::Goto { .. } - | TerminatorKind::Drop { .. } - | TerminatorKind::Yield { .. } - | TerminatorKind::FalseEdge { .. } - | TerminatorKind::FalseUnwind { .. } - | TerminatorKind::InlineAsm { .. } - | TerminatorKind::CoroutineDrop - | TerminatorKind::UnwindResume - | TerminatorKind::UnwindTerminate(_) - | TerminatorKind::Return - | TerminatorKind::Unreachable => {} - } - - self.super_terminator(terminator, location); - } -} diff --git a/compiler/rustc_const_eval/src/util/alignment.rs b/compiler/rustc_const_eval/src/util/alignment.rs index 8642dfccd78..528274e6aba 100644 --- a/compiler/rustc_const_eval/src/util/alignment.rs +++ b/compiler/rustc_const_eval/src/util/alignment.rs @@ -1,6 +1,7 @@ use rustc_middle::mir::*; use rustc_middle::ty::{self, TyCtxt}; use rustc_target::abi::Align; +use tracing::debug; /// Returns `true` if this place is allowed to be less aligned /// than its containing struct (because it is within a packed diff --git a/compiler/rustc_const_eval/src/util/caller_location.rs b/compiler/rustc_const_eval/src/util/caller_location.rs index 403bc1eca13..62c5f8734a2 100644 --- a/compiler/rustc_const_eval/src/util/caller_location.rs +++ b/compiler/rustc_const_eval/src/util/caller_location.rs @@ -5,13 +5,14 @@ use rustc_middle::query::TyCtxtAt; use rustc_middle::ty::layout::LayoutOf; use rustc_middle::ty::{self, Mutability}; use rustc_span::symbol::Symbol; +use tracing::trace; use crate::const_eval::{mk_eval_cx_to_read_const_val, CanAccessMutGlobal, CompileTimeEvalContext}; use crate::interpret::*; /// Allocate a `const core::panic::Location` with the provided filename and line/column numbers. -fn alloc_caller_location<'mir, 'tcx>( - ecx: &mut CompileTimeEvalContext<'mir, 'tcx>, +fn alloc_caller_location<'tcx>( + ecx: &mut CompileTimeEvalContext<'tcx>, filename: Symbol, line: u32, col: u32,  | 
