From 522af10cccd9c23f5147d123cd6373ae0aa0795d Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 5 Aug 2024 17:34:44 +0200 Subject: interpret: refactor function call handling to be better-abstracted --- .../src/const_eval/eval_queries.rs | 4 +- .../rustc_const_eval/src/const_eval/machine.rs | 17 +- compiler/rustc_const_eval/src/interpret/call.rs | 978 ++++++++++++++++++ .../rustc_const_eval/src/interpret/eval_context.rs | 763 +------------- compiler/rustc_const_eval/src/interpret/machine.rs | 2 +- compiler/rustc_const_eval/src/interpret/mod.rs | 8 +- compiler/rustc_const_eval/src/interpret/operand.rs | 1 + compiler/rustc_const_eval/src/interpret/stack.rs | 638 ++++++++++++ compiler/rustc_const_eval/src/interpret/step.rs | 250 ++++- .../rustc_const_eval/src/interpret/terminator.rs | 1073 -------------------- 10 files changed, 1883 insertions(+), 1851 deletions(-) create mode 100644 compiler/rustc_const_eval/src/interpret/call.rs create mode 100644 compiler/rustc_const_eval/src/interpret/stack.rs delete mode 100644 compiler/rustc_const_eval/src/interpret/terminator.rs (limited to 'compiler/rustc_const_eval') diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index 6d5bca57313..ff27e400016 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -73,7 +73,9 @@ fn eval_body_using_ecx<'tcx, R: InterpretationResult<'tcx>>( cid.promoted.map_or_else(String::new, |p| format!("::{p:?}")) ); - ecx.push_stack_frame( + // This can't use `init_stack_frame` since `body` is not a function, + // so computing its ABI would fail. It's also not worth it since there are no arguments to pass. + ecx.push_stack_frame_raw( cid.instance, body, &ret.clone().into(), diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs index 79e8e212776..a075bdc1911 100644 --- a/compiler/rustc_const_eval/src/const_eval/machine.rs +++ b/compiler/rustc_const_eval/src/const_eval/machine.rs @@ -24,8 +24,9 @@ use crate::errors::{LongRunning, LongRunningWarn}; use crate::fluent_generated as fluent; use crate::interpret::{ self, compile_time_machine, err_ub, throw_exhaust, throw_inval, throw_ub_custom, throw_unsup, - throw_unsup_format, AllocId, AllocRange, ConstAllocation, CtfeProvenance, FnArg, FnVal, Frame, + throw_unsup_format, AllocId, AllocRange, ConstAllocation, CtfeProvenance, FnArg, Frame, GlobalAlloc, ImmTy, InterpCx, InterpResult, MPlaceTy, OpTy, Pointer, PointerArithmetic, Scalar, + StackPopCleanup, }; /// When hitting this many interpreted terminators we emit a deny by default lint @@ -306,17 +307,15 @@ impl<'tcx> CompileTimeInterpCx<'tcx> { let align = ImmTy::from_uint(target_align, args[1].layout).into(); let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?; - // We replace the entire function call with a "tail call". - // Note that this happens before the frame of the original function - // is pushed on the stack. - self.eval_fn_call( - FnVal::Instance(instance), - (CallAbi::Rust, fn_abi), + // Push the stack frame with our own adjusted arguments. + self.init_stack_frame( + instance, + self.load_mir(instance.def, None)?, + fn_abi, &[FnArg::Copy(addr), FnArg::Copy(align)], /* with_caller_location = */ false, dest, - ret, - mir::UnwindAction::Unreachable, + StackPopCleanup::Goto { ret, unwind: mir::UnwindAction::Unreachable }, )?; Ok(ControlFlow::Break(())) } else { diff --git a/compiler/rustc_const_eval/src/interpret/call.rs b/compiler/rustc_const_eval/src/interpret/call.rs new file mode 100644 index 00000000000..9d2870a4a31 --- /dev/null +++ b/compiler/rustc_const_eval/src/interpret/call.rs @@ -0,0 +1,978 @@ +//! Manages calling a concrete function (with known MIR body) with argument passing, +//! and returning the return value to the caller. +use std::borrow::Cow; + +use either::{Left, Right}; +use rustc_middle::ty::layout::{FnAbiOf, IntegerExt, LayoutOf, TyAndLayout}; +use rustc_middle::ty::{self, AdtDef, Instance, Ty}; +use rustc_middle::{bug, mir, span_bug}; +use rustc_span::sym; +use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode}; +use rustc_target::abi::{self, FieldIdx, Integer}; +use rustc_target::spec::abi::Abi; +use tracing::{info, instrument, trace}; + +use super::{ + throw_ub, throw_ub_custom, throw_unsup_format, CtfeProvenance, FnVal, ImmTy, InterpCx, + InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Projectable, Provenance, ReturnAction, Scalar, + StackPopCleanup, StackPopInfo, +}; +use crate::fluent_generated as fluent; + +/// An argment passed to a function. +#[derive(Clone, Debug)] +pub enum FnArg<'tcx, Prov: Provenance = CtfeProvenance> { + /// Pass a copy of the given operand. + Copy(OpTy<'tcx, Prov>), + /// Allow for the argument to be passed in-place: destroy the value originally stored at that place and + /// make the place inaccessible for the duration of the function call. + InPlace(MPlaceTy<'tcx, Prov>), +} + +impl<'tcx, Prov: Provenance> FnArg<'tcx, Prov> { + pub fn layout(&self) -> &TyAndLayout<'tcx> { + match self { + FnArg::Copy(op) => &op.layout, + FnArg::InPlace(mplace) => &mplace.layout, + } + } +} + +impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { + /// Make a copy of the given fn_arg. Any `InPlace` are degenerated to copies, no protection of the + /// original memory occurs. + pub fn copy_fn_arg(&self, arg: &FnArg<'tcx, M::Provenance>) -> OpTy<'tcx, M::Provenance> { + match arg { + FnArg::Copy(op) => op.clone(), + FnArg::InPlace(mplace) => mplace.clone().into(), + } + } + + /// Make a copy of the given fn_args. Any `InPlace` are degenerated to copies, no protection of the + /// original memory occurs. + pub fn copy_fn_args( + &self, + args: &[FnArg<'tcx, M::Provenance>], + ) -> Vec> { + args.iter().map(|fn_arg| self.copy_fn_arg(fn_arg)).collect() + } + + /// Helper function for argument untupling. + pub(super) fn fn_arg_field( + &self, + arg: &FnArg<'tcx, M::Provenance>, + field: usize, + ) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> { + Ok(match arg { + FnArg::Copy(op) => FnArg::Copy(self.project_field(op, field)?), + FnArg::InPlace(mplace) => FnArg::InPlace(self.project_field(mplace, field)?), + }) + } + + /// Find the wrapped inner type of a transparent wrapper. + /// Must not be called on 1-ZST (as they don't have a uniquely defined "wrapped field"). + /// + /// We work with `TyAndLayout` here since that makes it much easier to iterate over all fields. + fn unfold_transparent( + &self, + layout: TyAndLayout<'tcx>, + may_unfold: impl Fn(AdtDef<'tcx>) -> bool, + ) -> TyAndLayout<'tcx> { + match layout.ty.kind() { + ty::Adt(adt_def, _) if adt_def.repr().transparent() && may_unfold(*adt_def) => { + assert!(!adt_def.is_enum()); + // Find the non-1-ZST field, and recurse. + let (_, field) = layout.non_1zst_field(self).unwrap(); + self.unfold_transparent(field, may_unfold) + } + // Not a transparent type, no further unfolding. + _ => layout, + } + } + + /// Unwrap types that are guaranteed a null-pointer-optimization + fn unfold_npo(&self, layout: TyAndLayout<'tcx>) -> InterpResult<'tcx, TyAndLayout<'tcx>> { + // Check if this is `Option` wrapping some type or if this is `Result` wrapping a 1-ZST and + // another type. + let ty::Adt(def, args) = layout.ty.kind() else { + // Not an ADT, so definitely no NPO. + return Ok(layout); + }; + let inner = if self.tcx.is_diagnostic_item(sym::Option, def.did()) { + // The wrapped type is the only arg. + self.layout_of(args[0].as_type().unwrap())? + } else if self.tcx.is_diagnostic_item(sym::Result, def.did()) { + // We want to extract which (if any) of the args is not a 1-ZST. + let lhs = self.layout_of(args[0].as_type().unwrap())?; + let rhs = self.layout_of(args[1].as_type().unwrap())?; + if lhs.is_1zst() { + rhs + } else if rhs.is_1zst() { + lhs + } else { + return Ok(layout); // no NPO + } + } else { + return Ok(layout); // no NPO + }; + + // Check if the inner type is one of the NPO-guaranteed ones. + // For that we first unpeel transparent *structs* (but not unions). + let is_npo = |def: AdtDef<'tcx>| { + self.tcx.has_attr(def.did(), sym::rustc_nonnull_optimization_guaranteed) + }; + let inner = self.unfold_transparent(inner, /* may_unfold */ |def| { + // Stop at NPO tpyes so that we don't miss that attribute in the check below! + def.is_struct() && !is_npo(def) + }); + Ok(match inner.ty.kind() { + ty::Ref(..) | ty::FnPtr(..) => { + // Option<&T> behaves like &T, and same for fn() + inner + } + ty::Adt(def, _) if is_npo(*def) => { + // Once we found a `nonnull_optimization_guaranteed` type, further strip off + // newtype structs from it to find the underlying ABI type. + self.unfold_transparent(inner, /* may_unfold */ |def| def.is_struct()) + } + _ => { + // Everything else we do not unfold. + layout + } + }) + } + + /// Check if these two layouts look like they are fn-ABI-compatible. + /// (We also compare the `PassMode`, so this doesn't have to check everything. But it turns out + /// that only checking the `PassMode` is insufficient.) + fn layout_compat( + &self, + caller: TyAndLayout<'tcx>, + callee: TyAndLayout<'tcx>, + ) -> InterpResult<'tcx, bool> { + // Fast path: equal types are definitely compatible. + if caller.ty == callee.ty { + return Ok(true); + } + // 1-ZST are compatible with all 1-ZST (and with nothing else). + if caller.is_1zst() || callee.is_1zst() { + return Ok(caller.is_1zst() && callee.is_1zst()); + } + // Unfold newtypes and NPO optimizations. + let unfold = |layout: TyAndLayout<'tcx>| { + self.unfold_npo(self.unfold_transparent(layout, /* may_unfold */ |_def| true)) + }; + let caller = unfold(caller)?; + let callee = unfold(callee)?; + // Now see if these inner types are compatible. + + // Compatible pointer types. For thin pointers, we have to accept even non-`repr(transparent)` + // things as compatible due to `DispatchFromDyn`. For instance, `Rc` and `*mut i32` + // must be compatible. So we just accept everything with Pointer ABI as compatible, + // even if this will accept some code that is not stably guaranteed to work. + // This also handles function pointers. + let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.abi { + abi::Abi::Scalar(s) => match s.primitive() { + abi::Primitive::Pointer(addr_space) => Some(addr_space), + _ => None, + }, + _ => None, + }; + if let (Some(caller), Some(callee)) = (thin_pointer(caller), thin_pointer(callee)) { + return Ok(caller == callee); + } + // For wide pointers we have to get the pointee type. + let pointee_ty = |ty: Ty<'tcx>| -> InterpResult<'tcx, Option>> { + // We cannot use `builtin_deref` here since we need to reject `Box`. + Ok(Some(match ty.kind() { + ty::Ref(_, ty, _) => *ty, + ty::RawPtr(ty, _) => *ty, + // We only accept `Box` with the default allocator. + _ if ty.is_box_global(*self.tcx) => ty.boxed_ty(), + _ => return Ok(None), + })) + }; + if let (Some(caller), Some(callee)) = (pointee_ty(caller.ty)?, pointee_ty(callee.ty)?) { + // This is okay if they have the same metadata type. + let meta_ty = |ty: Ty<'tcx>| { + // Even if `ty` is normalized, the search for the unsized tail will project + // to fields, which can yield non-normalized types. So we need to provide a + // normalization function. + let normalize = |ty| self.tcx.normalize_erasing_regions(self.param_env, ty); + ty.ptr_metadata_ty(*self.tcx, normalize) + }; + return Ok(meta_ty(caller) == meta_ty(callee)); + } + + // Compatible integer types (in particular, usize vs ptr-sized-u32/u64). + // `char` counts as `u32.` + let int_ty = |ty: Ty<'tcx>| { + Some(match ty.kind() { + ty::Int(ity) => (Integer::from_int_ty(&self.tcx, *ity), /* signed */ true), + ty::Uint(uty) => (Integer::from_uint_ty(&self.tcx, *uty), /* signed */ false), + ty::Char => (Integer::I32, /* signed */ false), + _ => return None, + }) + }; + if let (Some(caller), Some(callee)) = (int_ty(caller.ty), int_ty(callee.ty)) { + // This is okay if they are the same integer type. + return Ok(caller == callee); + } + + // Fall back to exact equality. + // FIXME: We are missing the rules for "repr(C) wrapping compatible types". + Ok(caller == callee) + } + + fn check_argument_compat( + &self, + caller_abi: &ArgAbi<'tcx, Ty<'tcx>>, + callee_abi: &ArgAbi<'tcx, Ty<'tcx>>, + ) -> InterpResult<'tcx, bool> { + // We do not want to accept things as ABI-compatible that just "happen to be" compatible on the current target, + // so we implement a type-based check that reflects the guaranteed rules for ABI compatibility. + if self.layout_compat(caller_abi.layout, callee_abi.layout)? { + // Ensure that our checks imply actual ABI compatibility for this concrete call. + assert!(caller_abi.eq_abi(callee_abi)); + return Ok(true); + } else { + trace!( + "check_argument_compat: incompatible ABIs:\ncaller: {:?}\ncallee: {:?}", + caller_abi, callee_abi + ); + return Ok(false); + } + } + + /// Initialize a single callee argument, checking the types for compatibility. + fn pass_argument<'x, 'y>( + &mut self, + caller_args: &mut impl Iterator< + Item = (&'x FnArg<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>), + >, + callee_abi: &ArgAbi<'tcx, Ty<'tcx>>, + callee_arg: &mir::Place<'tcx>, + callee_ty: Ty<'tcx>, + already_live: bool, + ) -> InterpResult<'tcx> + where + 'tcx: 'x, + 'tcx: 'y, + { + assert_eq!(callee_ty, callee_abi.layout.ty); + if matches!(callee_abi.mode, PassMode::Ignore) { + // This one is skipped. Still must be made live though! + if !already_live { + self.storage_live(callee_arg.as_local().unwrap())?; + } + return Ok(()); + } + // Find next caller arg. + let Some((caller_arg, caller_abi)) = caller_args.next() else { + throw_ub_custom!(fluent::const_eval_not_enough_caller_args); + }; + assert_eq!(caller_arg.layout().layout, caller_abi.layout.layout); + // Sadly we cannot assert that `caller_arg.layout().ty` and `caller_abi.layout.ty` are + // equal; in closures the types sometimes differ. We just hope that `caller_abi` is the + // right type to print to the user. + + // Check compatibility + if !self.check_argument_compat(caller_abi, callee_abi)? { + throw_ub!(AbiMismatchArgument { + caller_ty: caller_abi.layout.ty, + callee_ty: callee_abi.layout.ty + }); + } + // We work with a copy of the argument for now; if this is in-place argument passing, we + // will later protect the source it comes from. This means the callee cannot observe if we + // did in-place of by-copy argument passing, except for pointer equality tests. + let caller_arg_copy = self.copy_fn_arg(caller_arg); + if !already_live { + let local = callee_arg.as_local().unwrap(); + let meta = caller_arg_copy.meta(); + // `check_argument_compat` ensures that if metadata is needed, both have the same type, + // so we know they will use the metadata the same way. + assert!(!meta.has_meta() || caller_arg_copy.layout.ty == callee_ty); + + self.storage_live_dyn(local, meta)?; + } + // Now we can finally actually evaluate the callee place. + let callee_arg = self.eval_place(*callee_arg)?; + // We allow some transmutes here. + // FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This + // is true for all `copy_op`, but there are a lot of special cases for argument passing + // specifically.) + self.copy_op_allow_transmute(&caller_arg_copy, &callee_arg)?; + // If this was an in-place pass, protect the place it comes from for the duration of the call. + if let FnArg::InPlace(mplace) = caller_arg { + M::protect_in_place_function_argument(self, mplace)?; + } + Ok(()) + } + + fn check_fn_target_features(&self, instance: ty::Instance<'tcx>) -> InterpResult<'tcx, ()> { + // Calling functions with `#[target_feature]` is not unsafe on WASM, see #84988 + let attrs = self.tcx.codegen_fn_attrs(instance.def_id()); + if !self.tcx.sess.target.is_like_wasm + && attrs + .target_features + .iter() + .any(|feature| !self.tcx.sess.target_features.contains(feature)) + { + throw_ub_custom!( + fluent::const_eval_unavailable_target_features_for_fn, + unavailable_feats = attrs + .target_features + .iter() + .filter(|&feature| !self.tcx.sess.target_features.contains(feature)) + .fold(String::new(), |mut s, feature| { + if !s.is_empty() { + s.push_str(", "); + } + s.push_str(feature.as_str()); + s + }), + ); + } + Ok(()) + } + + /// The main entry point for creating a new stack frame: performs ABI checks and initializes + /// arguments. + #[instrument(skip(self), level = "trace")] + pub fn init_stack_frame( + &mut self, + instance: Instance<'tcx>, + body: &'tcx mir::Body<'tcx>, + caller_fn_abi: &FnAbi<'tcx, Ty<'tcx>>, + args: &[FnArg<'tcx, M::Provenance>], + with_caller_location: bool, + destination: &MPlaceTy<'tcx, M::Provenance>, + mut stack_pop: StackPopCleanup, + ) -> InterpResult<'tcx> { + // Compute callee information. + // FIXME: for variadic support, do we have to somehow determine callee's extra_args? + let callee_fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?; + + if callee_fn_abi.c_variadic || caller_fn_abi.c_variadic { + throw_unsup_format!("calling a c-variadic function is not supported"); + } + + if M::enforce_abi(self) { + if caller_fn_abi.conv != callee_fn_abi.conv { + throw_ub_custom!( + fluent::const_eval_incompatible_calling_conventions, + callee_conv = format!("{:?}", callee_fn_abi.conv), + caller_conv = format!("{:?}", caller_fn_abi.conv), + ) + } + } + + // Check that all target features required by the callee (i.e., from + // the attribute `#[target_feature(enable = ...)]`) are enabled at + // compile time. + self.check_fn_target_features(instance)?; + + if !callee_fn_abi.can_unwind { + // The callee cannot unwind, so force the `Unreachable` unwind handling. + match &mut stack_pop { + StackPopCleanup::Root { .. } => {} + StackPopCleanup::Goto { unwind, .. } => { + *unwind = mir::UnwindAction::Unreachable; + } + } + } + + self.push_stack_frame_raw(instance, body, destination, stack_pop)?; + + // If an error is raised here, pop the frame again to get an accurate backtrace. + // To this end, we wrap it all in a `try` block. + let res: InterpResult<'tcx> = try { + trace!( + "caller ABI: {:#?}, args: {:#?}", + caller_fn_abi, + args.iter() + .map(|arg| ( + arg.layout().ty, + match arg { + FnArg::Copy(op) => format!("copy({op:?})"), + FnArg::InPlace(mplace) => format!("in-place({mplace:?})"), + } + )) + .collect::>() + ); + trace!( + "spread_arg: {:?}, locals: {:#?}", + body.spread_arg, + body.args_iter() + .map(|local| ( + local, + self.layout_of_local(self.frame(), local, None).unwrap().ty, + )) + .collect::>() + ); + + // In principle, we have two iterators: Where the arguments come from, and where + // they go to. + + // The "where they come from" part is easy, we expect the caller to do any special handling + // that might be required here (e.g. for untupling). + // If `with_caller_location` is set we pretend there is an extra argument (that + // we will not pass). + assert_eq!( + args.len() + if with_caller_location { 1 } else { 0 }, + caller_fn_abi.args.len(), + "mismatch between caller ABI and caller arguments", + ); + let mut caller_args = args + .iter() + .zip(caller_fn_abi.args.iter()) + .filter(|arg_and_abi| !matches!(arg_and_abi.1.mode, PassMode::Ignore)); + + // Now we have to spread them out across the callee's locals, + // taking into account the `spread_arg`. If we could write + // this is a single iterator (that handles `spread_arg`), then + // `pass_argument` would be the loop body. It takes care to + // not advance `caller_iter` for ignored arguments. + let mut callee_args_abis = callee_fn_abi.args.iter(); + for local in body.args_iter() { + // Construct the destination place for this argument. At this point all + // locals are still dead, so we cannot construct a `PlaceTy`. + let dest = mir::Place::from(local); + // `layout_of_local` does more than just the instantiation we need to get the + // type, but the result gets cached so this avoids calling the instantiation + // query *again* the next time this local is accessed. + let ty = self.layout_of_local(self.frame(), local, None)?.ty; + if Some(local) == body.spread_arg { + // Make the local live once, then fill in the value field by field. + self.storage_live(local)?; + // Must be a tuple + let ty::Tuple(fields) = ty.kind() else { + span_bug!(self.cur_span(), "non-tuple type for `spread_arg`: {ty}") + }; + for (i, field_ty) in fields.iter().enumerate() { + let dest = dest.project_deeper( + &[mir::ProjectionElem::Field(FieldIdx::from_usize(i), field_ty)], + *self.tcx, + ); + let callee_abi = callee_args_abis.next().unwrap(); + self.pass_argument( + &mut caller_args, + callee_abi, + &dest, + field_ty, + /* already_live */ true, + )?; + } + } else { + // Normal argument. Cannot mark it as live yet, it might be unsized! + let callee_abi = callee_args_abis.next().unwrap(); + self.pass_argument( + &mut caller_args, + callee_abi, + &dest, + ty, + /* already_live */ false, + )?; + } + } + // If the callee needs a caller location, pretend we consume one more argument from the ABI. + if instance.def.requires_caller_location(*self.tcx) { + callee_args_abis.next().unwrap(); + } + // Now we should have no more caller args or callee arg ABIs + assert!( + callee_args_abis.next().is_none(), + "mismatch between callee ABI and callee body arguments" + ); + if caller_args.next().is_some() { + throw_ub_custom!(fluent::const_eval_too_many_caller_args); + } + // Don't forget to check the return type! + if !self.check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret)? { + throw_ub!(AbiMismatchReturn { + caller_ty: caller_fn_abi.ret.layout.ty, + callee_ty: callee_fn_abi.ret.layout.ty + }); + } + + // Protect return place for in-place return value passing. + M::protect_in_place_function_argument(self, &destination)?; + + // Don't forget to mark "initially live" locals as live. + self.storage_live_for_always_live_locals()?; + }; + match res { + Err(err) => { + // Don't show the incomplete stack frame in the error stacktrace. + self.stack_mut().pop(); + Err(err) + } + Ok(()) => Ok(()), + } + } + + /// Initiate a call to this function -- pushing the stack frame and initializing the arguments. + /// + /// `caller_fn_abi` is used to determine if all the arguments are passed the proper way. + /// However, we also need `caller_abi` to determine if we need to do untupling of arguments. + /// + /// `with_caller_location` indicates whether the caller passed a caller location. Miri + /// implements caller locations without argument passing, but to match `FnAbi` we need to know + /// when those arguments are present. + pub(super) fn init_fn_call( + &mut self, + fn_val: FnVal<'tcx, M::ExtraFnVal>, + (caller_abi, caller_fn_abi): (Abi, &FnAbi<'tcx, Ty<'tcx>>), + args: &[FnArg<'tcx, M::Provenance>], + with_caller_location: bool, + destination: &MPlaceTy<'tcx, M::Provenance>, + target: Option, + unwind: mir::UnwindAction, + ) -> InterpResult<'tcx> { + trace!("init_fn_call: {:#?}", fn_val); + + let instance = match fn_val { + FnVal::Instance(instance) => instance, + FnVal::Other(extra) => { + return M::call_extra_fn( + self, + extra, + caller_abi, + args, + destination, + target, + unwind, + ); + } + }; + + match instance.def { + ty::InstanceKind::Intrinsic(def_id) => { + assert!(self.tcx.intrinsic(def_id).is_some()); + // FIXME: Should `InPlace` arguments be reset to uninit? + if let Some(fallback) = M::call_intrinsic( + self, + instance, + &self.copy_fn_args(args), + destination, + target, + unwind, + )? { + assert!(!self.tcx.intrinsic(fallback.def_id()).unwrap().must_be_overridden); + assert!(matches!(fallback.def, ty::InstanceKind::Item(_))); + return self.init_fn_call( + FnVal::Instance(fallback), + (caller_abi, caller_fn_abi), + args, + with_caller_location, + destination, + target, + unwind, + ); + } else { + Ok(()) + } + } + ty::InstanceKind::VTableShim(..) + | ty::InstanceKind::ReifyShim(..) + | ty::InstanceKind::ClosureOnceShim { .. } + | ty::InstanceKind::ConstructCoroutineInClosureShim { .. } + | ty::InstanceKind::CoroutineKindShim { .. } + | ty::InstanceKind::FnPtrShim(..) + | ty::InstanceKind::DropGlue(..) + | ty::InstanceKind::CloneShim(..) + | ty::InstanceKind::FnPtrAddrShim(..) + | ty::InstanceKind::ThreadLocalShim(..) + | ty::InstanceKind::AsyncDropGlueCtorShim(..) + | ty::InstanceKind::Item(_) => { + // We need MIR for this fn + let Some((body, instance)) = M::find_mir_or_eval_fn( + self, + instance, + caller_abi, + args, + destination, + target, + unwind, + )? + else { + return Ok(()); + }; + + // Special handling for the closure ABI: untuple the last argument. + let args: Cow<'_, [FnArg<'tcx, M::Provenance>]> = + if caller_abi == Abi::RustCall && !args.is_empty() { + // Untuple + let (untuple_arg, args) = args.split_last().unwrap(); + trace!("init_fn_call: Will pass last argument by untupling"); + Cow::from( + args.iter() + .map(|a| Ok(a.clone())) + .chain( + (0..untuple_arg.layout().fields.count()) + .map(|i| self.fn_arg_field(untuple_arg, i)), + ) + .collect::>>()?, + ) + } else { + // Plain arg passing + Cow::from(args) + }; + + self.init_stack_frame( + instance, + body, + caller_fn_abi, + &args, + with_caller_location, + destination, + StackPopCleanup::Goto { ret: target, unwind }, + ) + } + // `InstanceKind::Virtual` does not have callable MIR. Calls to `Virtual` instances must be + // codegen'd / interpreted as virtual calls through the vtable. + ty::InstanceKind::Virtual(def_id, idx) => { + let mut args = args.to_vec(); + // We have to implement all "object safe receivers". So we have to go search for a + // pointer or `dyn Trait` type, but it could be wrapped in newtypes. So recursively + // unwrap those newtypes until we are there. + // An `InPlace` does nothing here, we keep the original receiver intact. We can't + // really pass the argument in-place anyway, and we are constructing a new + // `Immediate` receiver. + let mut receiver = self.copy_fn_arg(&args[0]); + let receiver_place = loop { + match receiver.layout.ty.kind() { + ty::Ref(..) | ty::RawPtr(..) => { + // We do *not* use `deref_pointer` here: we don't want to conceptually + // create a place that must be dereferenceable, since the receiver might + // be a raw pointer and (for `*const dyn Trait`) we don't need to + // actually access memory to resolve this method. + // Also see . + let val = self.read_immediate(&receiver)?; + break self.ref_to_mplace(&val)?; + } + ty::Dynamic(.., ty::Dyn) => break receiver.assert_mem_place(), // no immediate unsized values + ty::Dynamic(.., ty::DynStar) => { + // Not clear how to handle this, so far we assume the receiver is always a pointer. + span_bug!( + self.cur_span(), + "by-value calls on a `dyn*`... are those a thing?" + ); + } + _ => { + // Not there yet, search for the only non-ZST field. + // (The rules for `DispatchFromDyn` ensure there's exactly one such field.) + let (idx, _) = receiver.layout.non_1zst_field(self).expect( + "not exactly one non-1-ZST field in a `DispatchFromDyn` type", + ); + receiver = self.project_field(&receiver, idx)?; + } + } + }; + + // Obtain the underlying trait we are working on, and the adjusted receiver argument. + let (trait_, dyn_ty, adjusted_recv) = if let ty::Dynamic(data, _, ty::DynStar) = + receiver_place.layout.ty.kind() + { + let recv = self.unpack_dyn_star(&receiver_place, data)?; + + (data.principal(), recv.layout.ty, recv.ptr()) + } else { + // Doesn't have to be a `dyn Trait`, but the unsized tail must be `dyn Trait`. + // (For that reason we also cannot use `unpack_dyn_trait`.) + let receiver_tail = self + .tcx + .struct_tail_erasing_lifetimes(receiver_place.layout.ty, self.param_env); + let ty::Dynamic(receiver_trait, _, ty::Dyn) = receiver_tail.kind() else { + span_bug!( + self.cur_span(), + "dynamic call on non-`dyn` type {}", + receiver_tail + ) + }; + assert!(receiver_place.layout.is_unsized()); + + // Get the required information from the vtable. + let vptr = receiver_place.meta().unwrap_meta().to_pointer(self)?; + let dyn_ty = self.get_ptr_vtable_ty(vptr, Some(receiver_trait))?; + + // It might be surprising that we use a pointer as the receiver even if this + // is a by-val case; this works because by-val passing of an unsized `dyn + // Trait` to a function is actually desugared to a pointer. + (receiver_trait.principal(), dyn_ty, receiver_place.ptr()) + }; + + // Now determine the actual method to call. Usually we use the easy way of just + // looking up the method at index `idx`. + let vtable_entries = self.vtable_entries(trait_, dyn_ty); + let Some(ty::VtblEntry::Method(fn_inst)) = vtable_entries.get(idx).copied() else { + // FIXME(fee1-dead) these could be variants of the UB info enum instead of this + throw_ub_custom!(fluent::const_eval_dyn_call_not_a_method); + }; + trace!("Virtual call dispatches to {fn_inst:#?}"); + // We can also do the lookup based on `def_id` and `dyn_ty`, and check that that + // produces the same result. + if cfg!(debug_assertions) { + let tcx = *self.tcx; + + let trait_def_id = tcx.trait_of_item(def_id).unwrap(); + let virtual_trait_ref = + ty::TraitRef::from_method(tcx, trait_def_id, instance.args); + let existential_trait_ref = + ty::ExistentialTraitRef::erase_self_ty(tcx, virtual_trait_ref); + let concrete_trait_ref = existential_trait_ref.with_self_ty(tcx, dyn_ty); + + let concrete_method = Instance::expect_resolve_for_vtable( + tcx, + self.param_env, + def_id, + instance.args.rebase_onto(tcx, trait_def_id, concrete_trait_ref.args), + self.cur_span(), + ); + assert_eq!(fn_inst, concrete_method); + } + + // Adjust receiver argument. Layout can be any (thin) ptr. + let receiver_ty = Ty::new_mut_ptr(self.tcx.tcx, dyn_ty); + args[0] = FnArg::Copy( + ImmTy::from_immediate( + Scalar::from_maybe_pointer(adjusted_recv, self).into(), + self.layout_of(receiver_ty)?, + ) + .into(), + ); + trace!("Patched receiver operand to {:#?}", args[0]); + // Need to also adjust the type in the ABI. Strangely, the layout there is actually + // already fine! Just the type is bogus. This is due to what `force_thin_self_ptr` + // does in `fn_abi_new_uncached`; supposedly, codegen relies on having the bogus + // type, so we just patch this up locally. + let mut caller_fn_abi = caller_fn_abi.clone(); + caller_fn_abi.args[0].layout.ty = receiver_ty; + + // recurse with concrete function + self.init_fn_call( + FnVal::Instance(fn_inst), + (caller_abi, &caller_fn_abi), + &args, + with_caller_location, + destination, + target, + unwind, + ) + } + } + } + + /// Initiate a tail call to this function -- popping the current stack frame, pushing the new + /// stack frame and initializing the arguments. + pub(super) fn init_fn_tail_call( + &mut self, + fn_val: FnVal<'tcx, M::ExtraFnVal>, + (caller_abi, caller_fn_abi): (Abi, &FnAbi<'tcx, Ty<'tcx>>), + args: &[FnArg<'tcx, M::Provenance>], + with_caller_location: bool, + ) -> InterpResult<'tcx> { + trace!("init_fn_tail_call: {:#?}", fn_val); + + // This is the "canonical" implementation of tails calls, + // a pop of the current stack frame, followed by a normal call + // which pushes a new stack frame, with the return address from + // the popped stack frame. + // + // Note that we are using `pop_stack_frame_raw` and not `return_from_current_stack_frame`, + // as the latter "executes" the goto to the return block, but we don't want to, + // only the tail called function should return to the current return block. + M::before_stack_pop(self, self.frame())?; + + let StackPopInfo { return_action, return_to_block, return_place } = + self.pop_stack_frame_raw(false)?; + + assert_eq!(return_action, ReturnAction::Normal); + + // Take the "stack pop cleanup" info, and use that to initiate the next call. + let StackPopCleanup::Goto { ret, unwind } = return_to_block else { + bug!("can't tailcall as root"); + }; + + // FIXME(explicit_tail_calls): + // we should check if both caller&callee can/n't unwind, + // see + + self.init_fn_call( + fn_val, + (caller_abi, caller_fn_abi), + args, + with_caller_location, + &return_place, + ret, + unwind, + ) + } + + pub(super) fn init_drop_in_place_call( + &mut self, + place: &PlaceTy<'tcx, M::Provenance>, + instance: ty::Instance<'tcx>, + target: mir::BasicBlock, + unwind: mir::UnwindAction, + ) -> InterpResult<'tcx> { + trace!("init_drop_in_place_call: {:?},\n instance={:?}", place, instance); + // We take the address of the object. This may well be unaligned, which is fine + // for us here. However, unaligned accesses will probably make the actual drop + // implementation fail -- a problem shared by rustc. + let place = self.force_allocation(place)?; + + // We behave a bit different from codegen here. + // Codegen creates an `InstanceKind::Virtual` with index 0 (the slot of the drop method) and + // then dispatches that to the normal call machinery. However, our call machinery currently + // only supports calling `VtblEntry::Method`; it would choke on a `MetadataDropInPlace`. So + // instead we do the virtual call stuff ourselves. It's easier here than in `eval_fn_call` + // since we can just get a place of the underlying type and use `mplace_to_ref`. + let place = match place.layout.ty.kind() { + ty::Dynamic(data, _, ty::Dyn) => { + // Dropping a trait object. Need to find actual drop fn. + self.unpack_dyn_trait(&place, data)? + } + ty::Dynamic(data, _, ty::DynStar) => { + // Dropping a `dyn*`. Need to find actual drop fn. + self.unpack_dyn_star(&place, data)? + } + _ => { + debug_assert_eq!( + instance, + ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty) + ); + place + } + }; + let instance = ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty); + let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?; + + let arg = self.mplace_to_ref(&place)?; + let ret = MPlaceTy::fake_alloc_zst(self.layout_of(self.tcx.types.unit)?); + + self.init_fn_call( + FnVal::Instance(instance), + (Abi::Rust, fn_abi), + &[FnArg::Copy(arg.into())], + false, + &ret.into(), + Some(target), + unwind, + ) + } + + /// Pops the current frame from the stack, copies the return value to the caller, deallocates + /// the memory for allocated locals, and jumps to an appropriate place. + /// + /// If `unwinding` is `false`, then we are performing a normal return + /// from a function. In this case, we jump back into the frame of the caller, + /// and continue execution as normal. + /// + /// If `unwinding` is `true`, then we are in the middle of a panic, + /// and need to unwind this frame. In this case, we jump to the + /// `cleanup` block for the function, which is responsible for running + /// `Drop` impls for any locals that have been initialized at this point. + /// The cleanup block ends with a special `Resume` terminator, which will + /// cause us to continue unwinding. + #[instrument(skip(self), level = "trace")] + pub(super) fn return_from_current_stack_frame( + &mut self, + unwinding: bool, + ) -> InterpResult<'tcx> { + info!( + "popping stack frame ({})", + if unwinding { "during unwinding" } else { "returning from function" } + ); + + // Check `unwinding`. + assert_eq!( + unwinding, + match self.frame().loc { + Left(loc) => self.body().basic_blocks[loc.block].is_cleanup, + Right(_) => true, + } + ); + if unwinding && self.frame_idx() == 0 { + throw_ub_custom!(fluent::const_eval_unwind_past_top); + } + + M::before_stack_pop(self, self.frame())?; + + // Copy return value. Must of course happen *before* we deallocate the locals. + // Must be *after* `before_stack_pop` as otherwise the return place might still be protected. + let copy_ret_result = if !unwinding { + let op = self + .local_to_op(mir::RETURN_PLACE, None) + .expect("return place should always be live"); + let dest = self.frame().return_place.clone(); + let err = if self.stack().len() == 1 { + // The initializer of constants and statics will get validated separately + // after the constant has been fully evaluated. While we could fall back to the default + // code path, that will cause -Zenforce-validity to cycle on static initializers. + // Reading from a static's memory is not allowed during its evaluation, and will always + // trigger a cycle error. Validation must read from the memory of the current item. + // For Miri this means we do not validate the root frame return value, + // but Miri anyway calls `read_target_isize` on that so separate validation + // is not needed. + self.copy_op_no_dest_validation(&op, &dest) + } else { + self.copy_op_allow_transmute(&op, &dest) + }; + trace!("return value: {:?}", self.dump_place(&dest.into())); + // We delay actually short-circuiting on this error until *after* the stack frame is + // popped, since we want this error to be attributed to the caller, whose type defines + // this transmute. + err + } else { + Ok(()) + }; + + // All right, now it is time to actually pop the frame. + let stack_pop_info = self.pop_stack_frame_raw(unwinding)?; + + // Report error from return value copy, if any. + copy_ret_result?; + + match stack_pop_info.return_action { + ReturnAction::Normal => {} + ReturnAction::NoJump => { + // The hook already did everything. + return Ok(()); + } + ReturnAction::NoCleanup => { + // If we are not doing cleanup, also skip everything else. + assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked"); + assert!(!unwinding, "tried to skip cleanup during unwinding"); + // Skip machine hook. + return Ok(()); + } + } + + // Normal return, figure out where to jump. + if unwinding { + // Follow the unwind edge. + let unwind = match stack_pop_info.return_to_block { + StackPopCleanup::Goto { unwind, .. } => unwind, + StackPopCleanup::Root { .. } => { + panic!("encountered StackPopCleanup::Root when unwinding!") + } + }; + // This must be the very last thing that happens, since it can in fact push a new stack frame. + self.unwind_to_block(unwind) + } else { + // Follow the normal return edge. + match stack_pop_info.return_to_block { + StackPopCleanup::Goto { ret, .. } => self.return_to_block(ret), + StackPopCleanup::Root { .. } => { + assert!( + self.stack().is_empty(), + "only the topmost frame can have StackPopCleanup::Root" + ); + Ok(()) + } + } + } + } +} diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs index 18d585e4a7a..7a6bbdfdcb5 100644 --- a/compiler/rustc_const_eval/src/interpret/eval_context.rs +++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs @@ -1,40 +1,29 @@ -use std::cell::Cell; -use std::{fmt, mem}; - -use either::{Either, Left, Right}; +use either::{Left, Right}; use rustc_errors::DiagCtxtHandle; use rustc_hir::def_id::DefId; -use rustc_hir::definitions::DefPathData; -use rustc_hir::{self as hir}; -use rustc_index::IndexVec; use rustc_infer::infer::at::ToTrace; use rustc_infer::infer::TyCtxtInferExt; use rustc_infer::traits::ObligationCause; -use rustc_middle::mir::interpret::{ - CtfeProvenance, ErrorHandled, InvalidMetaKind, ReportedErrorInfo, -}; +use rustc_middle::mir::interpret::{ErrorHandled, InvalidMetaKind, ReportedErrorInfo}; use rustc_middle::query::TyCtxtAt; use rustc_middle::ty::layout::{ - self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOf, LayoutOfHelpers, - TyAndLayout, + self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers, TyAndLayout, }; use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt, TypeFoldable, Variance}; -use rustc_middle::{bug, mir, span_bug}; -use rustc_mir_dataflow::storage::always_storage_live_locals; +use rustc_middle::{mir, span_bug}; use rustc_session::Limit; use rustc_span::Span; use rustc_target::abi::call::FnAbi; use rustc_target::abi::{Align, HasDataLayout, Size, TargetDataLayout}; use rustc_trait_selection::traits::ObligationCtxt; -use tracing::{debug, info, info_span, instrument, trace}; +use tracing::{debug, trace}; use super::{ - err_inval, throw_inval, throw_ub, throw_ub_custom, throw_unsup, GlobalId, Immediate, - InterpErrorInfo, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, MemoryKind, - OpTy, Operand, Place, PlaceTy, Pointer, PointerArithmetic, Projectable, Provenance, - ReturnAction, Scalar, + err_inval, throw_inval, throw_ub, throw_ub_custom, Frame, FrameInfo, GlobalId, InterpErrorInfo, + InterpResult, MPlaceTy, Machine, MemPlaceMeta, Memory, OpTy, Place, PlaceTy, PointerArithmetic, + Projectable, Provenance, }; -use crate::{errors, fluent_generated as fluent, util, ReportErrorExt}; +use crate::{fluent_generated as fluent, util, ReportErrorExt}; pub struct InterpCx<'tcx, M: Machine<'tcx>> { /// Stores the `Machine` instance. @@ -57,314 +46,6 @@ pub struct InterpCx<'tcx, M: Machine<'tcx>> { pub recursion_limit: Limit, } -// The Phantomdata exists to prevent this type from being `Send`. If it were sent across a thread -// boundary and dropped in the other thread, it would exit the span in the other thread. -struct SpanGuard(tracing::Span, std::marker::PhantomData<*const u8>); - -impl SpanGuard { - /// By default a `SpanGuard` does nothing. - fn new() -> Self { - Self(tracing::Span::none(), std::marker::PhantomData) - } - - /// If a span is entered, we exit the previous span (if any, normally none) and enter the - /// new span. This is mainly so we don't have to use `Option` for the `tracing_span` field of - /// `Frame` by creating a dummy span to being with and then entering it once the frame has - /// been pushed. - fn enter(&mut self, span: tracing::Span) { - // This executes the destructor on the previous instance of `SpanGuard`, ensuring that - // we never enter or exit more spans than vice versa. Unless you `mem::leak`, then we - // can't protect the tracing stack, but that'll just lead to weird logging, no actual - // problems. - *self = Self(span, std::marker::PhantomData); - self.0.with_subscriber(|(id, dispatch)| { - dispatch.enter(id); - }); - } -} - -impl Drop for SpanGuard { - fn drop(&mut self) { - self.0.with_subscriber(|(id, dispatch)| { - dispatch.exit(id); - }); - } -} - -/// A stack frame. -pub struct Frame<'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> { - //////////////////////////////////////////////////////////////////////////////// - // Function and callsite information - //////////////////////////////////////////////////////////////////////////////// - /// The MIR for the function called on this frame. - pub body: &'tcx mir::Body<'tcx>, - - /// The def_id and args of the current function. - pub instance: ty::Instance<'tcx>, - - /// Extra data for the machine. - pub extra: Extra, - - //////////////////////////////////////////////////////////////////////////////// - // Return place and locals - //////////////////////////////////////////////////////////////////////////////// - /// Work to perform when returning from this function. - pub return_to_block: StackPopCleanup, - - /// The location where the result of the current stack frame should be written to, - /// and its layout in the caller. - pub return_place: MPlaceTy<'tcx, Prov>, - - /// The list of locals for this stack frame, stored in order as - /// `[return_ptr, arguments..., variables..., temporaries...]`. - /// The locals are stored as `Option`s. - /// `None` represents a local that is currently dead, while a live local - /// can either directly contain `Scalar` or refer to some part of an `Allocation`. - /// - /// Do *not* access this directly; always go through the machine hook! - pub locals: IndexVec>, - - /// The span of the `tracing` crate is stored here. - /// When the guard is dropped, the span is exited. This gives us - /// a full stack trace on all tracing statements. - tracing_span: SpanGuard, - - //////////////////////////////////////////////////////////////////////////////// - // Current position within the function - //////////////////////////////////////////////////////////////////////////////// - /// If this is `Right`, we are not currently executing any particular statement in - /// this frame (can happen e.g. during frame initialization, and during unwinding on - /// frames without cleanup code). - /// - /// Needs to be public because ConstProp does unspeakable things to it. - pub loc: Either, -} - -/// What we store about a frame in an interpreter backtrace. -#[derive(Clone, Debug)] -pub struct FrameInfo<'tcx> { - pub instance: ty::Instance<'tcx>, - pub span: Span, -} - -#[derive(Clone, Copy, Eq, PartialEq, Debug)] // Miri debug-prints these -pub enum StackPopCleanup { - /// Jump to the next block in the caller, or cause UB if None (that's a function - /// that may never return). Also store layout of return place so - /// we can validate it at that layout. - /// `ret` stores the block we jump to on a normal return, while `unwind` - /// stores the block used for cleanup during unwinding. - Goto { ret: Option, unwind: mir::UnwindAction }, - /// The root frame of the stack: nowhere else to jump to. - /// `cleanup` says whether locals are deallocated. Static computation - /// wants them leaked to intern what they need (and just throw away - /// the entire `ecx` when it is done). - Root { cleanup: bool }, -} - -/// Return type of [`InterpCx::pop_stack_frame`]. -pub struct StackPopInfo<'tcx, Prov: Provenance> { - /// Additional information about the action to be performed when returning from the popped - /// stack frame. - pub return_action: ReturnAction, - - /// [`return_to_block`](Frame::return_to_block) of the popped stack frame. - pub return_to_block: StackPopCleanup, - - /// [`return_place`](Frame::return_place) of the popped stack frame. - pub return_place: MPlaceTy<'tcx, Prov>, -} - -/// State of a local variable including a memoized layout -#[derive(Clone)] -pub struct LocalState<'tcx, Prov: Provenance = CtfeProvenance> { - value: LocalValue, - /// Don't modify if `Some`, this is only used to prevent computing the layout twice. - /// Avoids computing the layout of locals that are never actually initialized. - layout: Cell>>, -} - -impl std::fmt::Debug for LocalState<'_, Prov> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("LocalState") - .field("value", &self.value) - .field("ty", &self.layout.get().map(|l| l.ty)) - .finish() - } -} - -/// Current value of a local variable -/// -/// This does not store the type of the local; the type is given by `body.local_decls` and can never -/// change, so by not storing here we avoid having to maintain that as an invariant. -#[derive(Copy, Clone, Debug)] // Miri debug-prints these -pub(super) enum LocalValue { - /// This local is not currently alive, and cannot be used at all. - Dead, - /// A normal, live local. - /// Mostly for convenience, we re-use the `Operand` type here. - /// This is an optimization over just always having a pointer here; - /// we can thus avoid doing an allocation when the local just stores - /// immediate values *and* never has its address taken. - Live(Operand), -} - -impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> { - pub fn make_live_uninit(&mut self) { - self.value = LocalValue::Live(Operand::Immediate(Immediate::Uninit)); - } - - /// This is a hack because Miri needs a way to visit all the provenance in a `LocalState` - /// without having a layout or `TyCtxt` available, and we want to keep the `Operand` type - /// private. - pub fn as_mplace_or_imm( - &self, - ) -> Option>, MemPlaceMeta), Immediate>> { - match self.value { - LocalValue::Dead => None, - LocalValue::Live(Operand::Indirect(mplace)) => Some(Left((mplace.ptr, mplace.meta))), - LocalValue::Live(Operand::Immediate(imm)) => Some(Right(imm)), - } - } - - /// Read the local's value or error if the local is not yet live or not live anymore. - #[inline(always)] - pub(super) fn access(&self) -> InterpResult<'tcx, &Operand> { - match &self.value { - LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"? - LocalValue::Live(val) => Ok(val), - } - } - - /// Overwrite the local. If the local can be overwritten in place, return a reference - /// to do so; otherwise return the `MemPlace` to consult instead. - #[inline(always)] - pub(super) fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand> { - match &mut self.value { - LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"? - LocalValue::Live(val) => Ok(val), - } - } -} - -impl<'tcx, Prov: Provenance> Frame<'tcx, Prov> { - pub fn with_extra(self, extra: Extra) -> Frame<'tcx, Prov, Extra> { - Frame { - body: self.body, - instance: self.instance, - return_to_block: self.return_to_block, - return_place: self.return_place, - locals: self.locals, - loc: self.loc, - extra, - tracing_span: self.tracing_span, - } - } -} - -impl<'tcx, Prov: Provenance, Extra> Frame<'tcx, Prov, Extra> { - /// Get the current location within the Frame. - /// - /// If this is `Right`, we are not currently executing any particular statement in - /// this frame (can happen e.g. during frame initialization, and during unwinding on - /// frames without cleanup code). - /// - /// Used by priroda. - pub fn current_loc(&self) -> Either { - self.loc - } - - /// Return the `SourceInfo` of the current instruction. - pub fn current_source_info(&self) -> Option<&mir::SourceInfo> { - self.loc.left().map(|loc| self.body.source_info(loc)) - } - - pub fn current_span(&self) -> Span { - match self.loc { - Left(loc) => self.body.source_info(loc).span, - Right(span) => span, - } - } - - pub fn lint_root(&self, tcx: TyCtxt<'tcx>) -> Option { - // We first try to get a HirId via the current source scope, - // and fall back to `body.source`. - self.current_source_info() - .and_then(|source_info| match &self.body.source_scopes[source_info.scope].local_data { - mir::ClearCrossCrate::Set(data) => Some(data.lint_root), - mir::ClearCrossCrate::Clear => None, - }) - .or_else(|| { - let def_id = self.body.source.def_id().as_local(); - def_id.map(|def_id| tcx.local_def_id_to_hir_id(def_id)) - }) - } - - /// Returns the address of the buffer where the locals are stored. This is used by `Place` as a - /// sanity check to detect bugs where we mix up which stack frame a place refers to. - #[inline(always)] - pub(super) fn locals_addr(&self) -> usize { - self.locals.raw.as_ptr().addr() - } - - #[must_use] - pub fn generate_stacktrace_from_stack(stack: &[Self]) -> Vec> { - let mut frames = Vec::new(); - // This deliberately does *not* honor `requires_caller_location` since it is used for much - // more than just panics. - for frame in stack.iter().rev() { - let span = match frame.loc { - Left(loc) => { - // If the stacktrace passes through MIR-inlined source scopes, add them. - let mir::SourceInfo { mut span, scope } = *frame.body.source_info(loc); - let mut scope_data = &frame.body.source_scopes[scope]; - while let Some((instance, call_span)) = scope_data.inlined { - frames.push(FrameInfo { span, instance }); - span = call_span; - scope_data = &frame.body.source_scopes[scope_data.parent_scope.unwrap()]; - } - span - } - Right(span) => span, - }; - frames.push(FrameInfo { span, instance: frame.instance }); - } - trace!("generate stacktrace: {:#?}", frames); - frames - } -} - -// FIXME: only used by miri, should be removed once translatable. -impl<'tcx> fmt::Display for FrameInfo<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - ty::tls::with(|tcx| { - if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::Closure { - write!(f, "inside closure") - } else { - // Note: this triggers a `must_produce_diag` state, which means that if we ever - // get here we must emit a diagnostic. We should never display a `FrameInfo` unless - // we actually want to emit a warning or error to the user. - write!(f, "inside `{}`", self.instance) - } - }) - } -} - -impl<'tcx> FrameInfo<'tcx> { - pub fn as_note(&self, tcx: TyCtxt<'tcx>) -> errors::FrameNote { - let span = self.span; - if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::Closure { - errors::FrameNote { where_: "closure", span, instance: String::new(), times: 0 } - } else { - let instance = format!("{}", self.instance); - // Note: this triggers a `must_produce_diag` state, which means that if we ever get - // here we must emit a diagnostic. We should never display a `FrameInfo` unless we - // actually want to emit a warning or error to the user. - errors::FrameNote { where_: "instance", span, instance, times: 0 } - } - } -} - impl<'tcx, M: Machine<'tcx>> HasDataLayout for InterpCx<'tcx, M> { #[inline] fn data_layout(&self) -> &TargetDataLayout { @@ -703,30 +384,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { span_bug!(self.cur_span(), "no non-`#[track_caller]` frame found") } - #[inline(always)] - pub(super) fn layout_of_local( - &self, - frame: &Frame<'tcx, M::Provenance, M::FrameExtra>, - local: mir::Local, - layout: Option>, - ) -> InterpResult<'tcx, TyAndLayout<'tcx>> { - let state = &frame.locals[local]; - if let Some(layout) = state.layout.get() { - return Ok(layout); - } - - let layout = from_known_layout(self.tcx, self.param_env, layout, || { - let local_ty = frame.body.local_decls[local].ty; - let local_ty = - self.instantiate_from_frame_and_normalize_erasing_regions(frame, local_ty)?; - self.layout_of(local_ty) - })?; - - // Layouts of locals are requested a lot, so we cache them. - state.layout.set(Some(layout)); - Ok(layout) - } - /// Returns the actual dynamic size and alignment of the place at the given type. /// Only the "meta" (metadata) part of the place matters. /// This can fail to provide an answer for extern types. @@ -825,132 +482,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { self.size_and_align_of(&mplace.meta(), &mplace.layout) } - #[instrument(skip(self, body, return_place, return_to_block), level = "debug")] - pub fn push_stack_frame( - &mut self, - instance: ty::Instance<'tcx>, - body: &'tcx mir::Body<'tcx>, - return_place: &MPlaceTy<'tcx, M::Provenance>, - return_to_block: StackPopCleanup, - ) -> InterpResult<'tcx> { - trace!("body: {:#?}", body); - - // First push a stack frame so we have access to the local args - self.push_new_stack_frame(instance, body, return_to_block, return_place.clone())?; - - self.after_stack_frame_push(instance, body)?; - - Ok(()) - } - - /// Creates a new stack frame, initializes it and pushes it onto the stack. - /// A private helper for [`push_stack_frame`](InterpCx::push_stack_frame). - fn push_new_stack_frame( - &mut self, - instance: ty::Instance<'tcx>, - body: &'tcx mir::Body<'tcx>, - return_to_block: StackPopCleanup, - return_place: MPlaceTy<'tcx, M::Provenance>, - ) -> InterpResult<'tcx> { - let dead_local = LocalState { value: LocalValue::Dead, layout: Cell::new(None) }; - let locals = IndexVec::from_elem(dead_local, &body.local_decls); - let pre_frame = Frame { - body, - loc: Right(body.span), // Span used for errors caused during preamble. - return_to_block, - return_place, - locals, - instance, - tracing_span: SpanGuard::new(), - extra: (), - }; - let frame = M::init_frame(self, pre_frame)?; - self.stack_mut().push(frame); - - Ok(()) - } - - /// A private helper for [`push_stack_frame`](InterpCx::push_stack_frame). - fn after_stack_frame_push( - &mut self, - instance: ty::Instance<'tcx>, - body: &'tcx mir::Body<'tcx>, - ) -> InterpResult<'tcx> { - // Make sure all the constants required by this frame evaluate successfully (post-monomorphization check). - for &const_ in body.required_consts() { - let c = - self.instantiate_from_current_frame_and_normalize_erasing_regions(const_.const_)?; - c.eval(*self.tcx, self.param_env, const_.span).map_err(|err| { - err.emit_note(*self.tcx); - err - })?; - } - - // done - M::after_stack_push(self)?; - self.frame_mut().loc = Left(mir::Location::START); - - let span = info_span!("frame", "{}", instance); - self.frame_mut().tracing_span.enter(span); - - Ok(()) - } - - /// Pops a stack frame from the stack and returns some information about it. - /// - /// This also deallocates locals, if necessary. - /// - /// [`M::before_stack_pop`] should be called before calling this function. - /// [`M::after_stack_pop`] is called by this function automatically. - /// - /// [`M::before_stack_pop`]: Machine::before_stack_pop - /// [`M::after_stack_pop`]: Machine::after_stack_pop - pub fn pop_stack_frame( - &mut self, - unwinding: bool, - ) -> InterpResult<'tcx, StackPopInfo<'tcx, M::Provenance>> { - let cleanup = self.cleanup_current_frame_locals()?; - - let frame = - self.stack_mut().pop().expect("tried to pop a stack frame, but there were none"); - - let return_to_block = frame.return_to_block; - let return_place = frame.return_place.clone(); - - let return_action; - if cleanup { - return_action = M::after_stack_pop(self, frame, unwinding)?; - assert_ne!(return_action, ReturnAction::NoCleanup); - } else { - return_action = ReturnAction::NoCleanup; - }; - - Ok(StackPopInfo { return_action, return_to_block, return_place }) - } - - /// A private helper for [`pop_stack_frame`](InterpCx::pop_stack_frame). - /// Returns `true` if cleanup has been done, `false` otherwise. - fn cleanup_current_frame_locals(&mut self) -> InterpResult<'tcx, bool> { - // Cleanup: deallocate locals. - // Usually we want to clean up (deallocate locals), but in a few rare cases we don't. - // We do this while the frame is still on the stack, so errors point to the callee. - let return_to_block = self.frame().return_to_block; - let cleanup = match return_to_block { - StackPopCleanup::Goto { .. } => true, - StackPopCleanup::Root { cleanup, .. } => cleanup, - }; - - if cleanup { - // We need to take the locals out, since we need to mutate while iterating. - let locals = mem::take(&mut self.frame_mut().locals); - for local in &locals { - self.deallocate_local(local.value)?; - } - } - - Ok(cleanup) - } - /// Jump to the given block. #[inline] pub fn go_to_block(&mut self, target: mir::BasicBlock) { @@ -997,248 +528,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { Ok(()) } - /// Pops the current frame from the stack, deallocating the - /// memory for allocated locals, and jumps to an appropriate place. - /// - /// If `unwinding` is `false`, then we are performing a normal return - /// from a function. In this case, we jump back into the frame of the caller, - /// and continue execution as normal. - /// - /// If `unwinding` is `true`, then we are in the middle of a panic, - /// and need to unwind this frame. In this case, we jump to the - /// `cleanup` block for the function, which is responsible for running - /// `Drop` impls for any locals that have been initialized at this point. - /// The cleanup block ends with a special `Resume` terminator, which will - /// cause us to continue unwinding. - #[instrument(skip(self), level = "debug")] - pub(super) fn return_from_current_stack_frame( - &mut self, - unwinding: bool, - ) -> InterpResult<'tcx> { - info!( - "popping stack frame ({})", - if unwinding { "during unwinding" } else { "returning from function" } - ); - - // Check `unwinding`. - assert_eq!( - unwinding, - match self.frame().loc { - Left(loc) => self.body().basic_blocks[loc.block].is_cleanup, - Right(_) => true, - } - ); - if unwinding && self.frame_idx() == 0 { - throw_ub_custom!(fluent::const_eval_unwind_past_top); - } - - M::before_stack_pop(self, self.frame())?; - - // Copy return value. Must of course happen *before* we deallocate the locals. - let copy_ret_result = if !unwinding { - let op = self - .local_to_op(mir::RETURN_PLACE, None) - .expect("return place should always be live"); - let dest = self.frame().return_place.clone(); - let err = if self.stack().len() == 1 { - // The initializer of constants and statics will get validated separately - // after the constant has been fully evaluated. While we could fall back to the default - // code path, that will cause -Zenforce-validity to cycle on static initializers. - // Reading from a static's memory is not allowed during its evaluation, and will always - // trigger a cycle error. Validation must read from the memory of the current item. - // For Miri this means we do not validate the root frame return value, - // but Miri anyway calls `read_target_isize` on that so separate validation - // is not needed. - self.copy_op_no_dest_validation(&op, &dest) - } else { - self.copy_op_allow_transmute(&op, &dest) - }; - trace!("return value: {:?}", self.dump_place(&dest.into())); - // We delay actually short-circuiting on this error until *after* the stack frame is - // popped, since we want this error to be attributed to the caller, whose type defines - // this transmute. - err - } else { - Ok(()) - }; - - // All right, now it is time to actually pop the frame. - let stack_pop_info = self.pop_stack_frame(unwinding)?; - - // Report error from return value copy, if any. - copy_ret_result?; - - match stack_pop_info.return_action { - ReturnAction::Normal => {} - ReturnAction::NoJump => { - // The hook already did everything. - return Ok(()); - } - ReturnAction::NoCleanup => { - // If we are not doing cleanup, also skip everything else. - assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked"); - assert!(!unwinding, "tried to skip cleanup during unwinding"); - // Skip machine hook. - return Ok(()); - } - } - - // Normal return, figure out where to jump. - if unwinding { - // Follow the unwind edge. - let unwind = match stack_pop_info.return_to_block { - StackPopCleanup::Goto { unwind, .. } => unwind, - StackPopCleanup::Root { .. } => { - panic!("encountered StackPopCleanup::Root when unwinding!") - } - }; - // This must be the very last thing that happens, since it can in fact push a new stack frame. - self.unwind_to_block(unwind) - } else { - // Follow the normal return edge. - match stack_pop_info.return_to_block { - StackPopCleanup::Goto { ret, .. } => self.return_to_block(ret), - StackPopCleanup::Root { .. } => { - assert!( - self.stack().is_empty(), - "only the topmost frame can have StackPopCleanup::Root" - ); - Ok(()) - } - } - } - } - - /// In the current stack frame, mark all locals as live that are not arguments and don't have - /// `Storage*` annotations (this includes the return place). - pub fn storage_live_for_always_live_locals(&mut self) -> InterpResult<'tcx> { - self.storage_live(mir::RETURN_PLACE)?; - - let body = self.body(); - let always_live = always_storage_live_locals(body); - for local in body.vars_and_temps_iter() { - if always_live.contains(local) { - self.storage_live(local)?; - } - } - Ok(()) - } - - pub fn storage_live_dyn( - &mut self, - local: mir::Local, - meta: MemPlaceMeta, - ) -> InterpResult<'tcx> { - trace!("{:?} is now live", local); - - // We avoid `ty.is_trivially_sized` since that does something expensive for ADTs. - fn is_very_trivially_sized(ty: Ty<'_>) -> bool { - match ty.kind() { - ty::Infer(ty::IntVar(_) | ty::FloatVar(_)) - | ty::Uint(_) - | ty::Int(_) - | ty::Bool - | ty::Float(_) - | ty::FnDef(..) - | ty::FnPtr(_) - | ty::RawPtr(..) - | ty::Char - | ty::Ref(..) - | ty::Coroutine(..) - | ty::CoroutineWitness(..) - | ty::Array(..) - | ty::Closure(..) - | ty::CoroutineClosure(..) - | ty::Never - | ty::Error(_) - | ty::Dynamic(_, _, ty::DynStar) => true, - - ty::Str | ty::Slice(_) | ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => false, - - ty::Tuple(tys) => tys.last().is_none_or(|ty| is_very_trivially_sized(*ty)), - - ty::Pat(ty, ..) => is_very_trivially_sized(*ty), - - // We don't want to do any queries, so there is not much we can do with ADTs. - ty::Adt(..) => false, - - ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => false, - - ty::Infer(ty::TyVar(_)) => false, - - ty::Bound(..) - | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => { - bug!("`is_very_trivially_sized` applied to unexpected type: {}", ty) - } - } - } - - // This is a hot function, we avoid computing the layout when possible. - // `unsized_` will be `None` for sized types and `Some(layout)` for unsized types. - let unsized_ = if is_very_trivially_sized(self.body().local_decls[local].ty) { - None - } else { - // We need the layout. - let layout = self.layout_of_local(self.frame(), local, None)?; - if layout.is_sized() { None } else { Some(layout) } - }; - - let local_val = LocalValue::Live(if let Some(layout) = unsized_ { - if !meta.has_meta() { - throw_unsup!(UnsizedLocal); - } - // Need to allocate some memory, since `Immediate::Uninit` cannot be unsized. - let dest_place = self.allocate_dyn(layout, MemoryKind::Stack, meta)?; - Operand::Indirect(*dest_place.mplace()) - } else { - assert!(!meta.has_meta()); // we're dropping the metadata - // Just make this an efficient immediate. - // Note that not calling `layout_of` here does have one real consequence: - // if the type is too big, we'll only notice this when the local is actually initialized, - // which is a bit too late -- we should ideally notice this already here, when the memory - // is conceptually allocated. But given how rare that error is and that this is a hot function, - // we accept this downside for now. - Operand::Immediate(Immediate::Uninit) - }); - - // If the local is already live, deallocate its old memory. - let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val); - self.deallocate_local(old)?; - Ok(()) - } - - /// Mark a storage as live, killing the previous content. - #[inline(always)] - pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> { - self.storage_live_dyn(local, MemPlaceMeta::None) - } - - pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> { - assert!(local != mir::RETURN_PLACE, "Cannot make return place dead"); - trace!("{:?} is now dead", local); - - // If the local is already dead, this is a NOP. - let old = mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead); - self.deallocate_local(old)?; - Ok(()) - } - - #[instrument(skip(self), level = "debug")] - fn deallocate_local(&mut self, local: LocalValue) -> InterpResult<'tcx> { - if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local { - // All locals have a backing allocation, even if the allocation is empty - // due to the local having ZST type. Hence we can `unwrap`. - trace!( - "deallocating local {:?}: {:?}", - local, - // Locals always have a `alloc_id` (they are never the result of a int2ptr). - self.dump_alloc(ptr.provenance.unwrap().get_alloc_id().unwrap()) - ); - self.deallocate_ptr(ptr, None, MemoryKind::Stack)?; - }; - Ok(()) - } - /// Call a query that can return `ErrorHandled`. Should be used for statics and other globals. /// (`mir::Const`/`ty::Const` have `eval` methods that can be used directly instead.) pub fn ctfe_query( @@ -1328,39 +617,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debug for PlacePrinter<'a, 'tcx, M> { } write!(fmt, ":")?; - match self.ecx.frame().locals[local].value { - LocalValue::Dead => write!(fmt, " is dead")?, - LocalValue::Live(Operand::Immediate(Immediate::Uninit)) => { - write!(fmt, " is uninitialized")? - } - LocalValue::Live(Operand::Indirect(mplace)) => { - write!( - fmt, - " by {} ref {:?}:", - match mplace.meta { - MemPlaceMeta::Meta(meta) => format!(" meta({meta:?})"), - MemPlaceMeta::None => String::new(), - }, - mplace.ptr, - )?; - allocs.extend(mplace.ptr.provenance.map(Provenance::get_alloc_id)); - } - LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => { - write!(fmt, " {val:?}")?; - if let Scalar::Ptr(ptr, _size) = val { - allocs.push(ptr.provenance.get_alloc_id()); - } - } - LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => { - write!(fmt, " ({val1:?}, {val2:?})")?; - if let Scalar::Ptr(ptr, _size) = val1 { - allocs.push(ptr.provenance.get_alloc_id()); - } - if let Scalar::Ptr(ptr, _size) = val2 { - allocs.push(ptr.provenance.get_alloc_id()); - } - } - } + self.ecx.frame().locals[local].print(&mut allocs, fmt)?; write!(fmt, ": {:?}", self.ecx.dump_allocs(allocs.into_iter().flatten().collect())) } diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs index bdce8253b2e..7af4e0c285b 100644 --- a/compiler/rustc_const_eval/src/interpret/machine.rs +++ b/compiler/rustc_const_eval/src/interpret/machine.rs @@ -37,7 +37,7 @@ pub enum ReturnAction { /// took care of everything. NoJump, - /// Returned by [`InterpCx::pop_stack_frame`] when no cleanup should be done. + /// Returned by [`InterpCx::pop_stack_frame_raw`] when no cleanup should be done. NoCleanup, } diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs index afa2303e387..511756e3f86 100644 --- a/compiler/rustc_const_eval/src/interpret/mod.rs +++ b/compiler/rustc_const_eval/src/interpret/mod.rs @@ -1,5 +1,6 @@ //! An interpreter for MIR used in CTFE and by miri +mod call; mod cast; mod discriminant; mod eval_context; @@ -11,8 +12,8 @@ mod operand; mod operator; mod place; mod projection; +mod stack; mod step; -mod terminator; mod traits; mod util; mod validity; @@ -22,7 +23,8 @@ use eval_context::{from_known_layout, mir_assign_valid_types}; #[doc(no_inline)] pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in one place: here -pub use self::eval_context::{format_interp_error, Frame, FrameInfo, InterpCx, StackPopCleanup}; +pub use self::call::FnArg; +pub use self::eval_context::{format_interp_error, InterpCx}; pub use self::intern::{ intern_const_alloc_for_constprop, intern_const_alloc_recursive, HasStaticRootDefId, InternKind, InternResult, @@ -35,7 +37,7 @@ pub use self::operand::{ImmTy, Immediate, OpTy, Readable}; pub use self::place::{MPlaceTy, MemPlaceMeta, PlaceTy, Writeable}; use self::place::{MemPlace, Place}; pub use self::projection::{OffsetMode, Projectable}; -pub use self::terminator::FnArg; +pub use self::stack::{Frame, FrameInfo, LocalState, StackPopCleanup, StackPopInfo}; pub(crate) use self::util::create_static_alloc; pub use self::validity::{CtfeValidationMode, RefTracking}; pub use self::visitor::ValueVisitor; diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index d4559e1e8c6..4ced009ab39 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -184,6 +184,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { #[inline] pub fn from_scalar(val: Scalar, layout: TyAndLayout<'tcx>) -> Self { debug_assert!(layout.abi.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout"); + debug_assert_eq!(val.size(), layout.size); ImmTy { imm: val.into(), layout } } diff --git a/compiler/rustc_const_eval/src/interpret/stack.rs b/compiler/rustc_const_eval/src/interpret/stack.rs new file mode 100644 index 00000000000..b5f55434d5a --- /dev/null +++ b/compiler/rustc_const_eval/src/interpret/stack.rs @@ -0,0 +1,638 @@ +//! Manages the low-level pushing and popping of stack frames and the (de)allocation of local variables. +//! For hadling of argument passing and return values, see the `call` module. +use std::cell::Cell; +use std::{fmt, mem}; + +use either::{Either, Left, Right}; +use rustc_hir as hir; +use rustc_hir::definitions::DefPathData; +use rustc_index::IndexVec; +use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; +use rustc_middle::ty::{self, Ty, TyCtxt}; +use rustc_middle::{bug, mir}; +use rustc_mir_dataflow::storage::always_storage_live_locals; +use rustc_span::Span; +use tracing::{info_span, instrument, trace}; + +use super::{ + from_known_layout, throw_ub, throw_unsup, AllocId, CtfeProvenance, Immediate, InterpCx, + InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, MemoryKind, Operand, Pointer, + Provenance, ReturnAction, Scalar, +}; +use crate::errors; + +// The Phantomdata exists to prevent this type from being `Send`. If it were sent across a thread +// boundary and dropped in the other thread, it would exit the span in the other thread. +struct SpanGuard(tracing::Span, std::marker::PhantomData<*const u8>); + +impl SpanGuard { + /// By default a `SpanGuard` does nothing. + fn new() -> Self { + Self(tracing::Span::none(), std::marker::PhantomData) + } + + /// If a span is entered, we exit the previous span (if any, normally none) and enter the + /// new span. This is mainly so we don't have to use `Option` for the `tracing_span` field of + /// `Frame` by creating a dummy span to being with and then entering it once the frame has + /// been pushed. + fn enter(&mut self, span: tracing::Span) { + // This executes the destructor on the previous instance of `SpanGuard`, ensuring that + // we never enter or exit more spans than vice versa. Unless you `mem::leak`, then we + // can't protect the tracing stack, but that'll just lead to weird logging, no actual + // problems. + *self = Self(span, std::marker::PhantomData); + self.0.with_subscriber(|(id, dispatch)| { + dispatch.enter(id); + }); + } +} + +impl Drop for SpanGuard { + fn drop(&mut self) { + self.0.with_subscriber(|(id, dispatch)| { + dispatch.exit(id); + }); + } +} + +/// A stack frame. +pub struct Frame<'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> { + //////////////////////////////////////////////////////////////////////////////// + // Function and callsite information + //////////////////////////////////////////////////////////////////////////////// + /// The MIR for the function called on this frame. + pub body: &'tcx mir::Body<'tcx>, + + /// The def_id and args of the current function. + pub instance: ty::Instance<'tcx>, + + /// Extra data for the machine. + pub extra: Extra, + + //////////////////////////////////////////////////////////////////////////////// + // Return place and locals + //////////////////////////////////////////////////////////////////////////////// + /// Work to perform when returning from this function. + pub return_to_block: StackPopCleanup, + + /// The location where the result of the current stack frame should be written to, + /// and its layout in the caller. + pub return_place: MPlaceTy<'tcx, Prov>, + + /// The list of locals for this stack frame, stored in order as + /// `[return_ptr, arguments..., variables..., temporaries...]`. + /// The locals are stored as `Option`s. + /// `None` represents a local that is currently dead, while a live local + /// can either directly contain `Scalar` or refer to some part of an `Allocation`. + /// + /// Do *not* access this directly; always go through the machine hook! + pub locals: IndexVec>, + + /// The span of the `tracing` crate is stored here. + /// When the guard is dropped, the span is exited. This gives us + /// a full stack trace on all tracing statements. + tracing_span: SpanGuard, + + //////////////////////////////////////////////////////////////////////////////// + // Current position within the function + //////////////////////////////////////////////////////////////////////////////// + /// If this is `Right`, we are not currently executing any particular statement in + /// this frame (can happen e.g. during frame initialization, and during unwinding on + /// frames without cleanup code). + /// + /// Needs to be public because ConstProp does unspeakable things to it. + pub loc: Either, +} + +#[derive(Clone, Copy, Eq, PartialEq, Debug)] // Miri debug-prints these +pub enum StackPopCleanup { + /// Jump to the next block in the caller, or cause UB if None (that's a function + /// that may never return). Also store layout of return place so + /// we can validate it at that layout. + /// `ret` stores the block we jump to on a normal return, while `unwind` + /// stores the block used for cleanup during unwinding. + Goto { ret: Option, unwind: mir::UnwindAction }, + /// The root frame of the stack: nowhere else to jump to. + /// `cleanup` says whether locals are deallocated. Static computation + /// wants them leaked to intern what they need (and just throw away + /// the entire `ecx` when it is done). + Root { cleanup: bool }, +} + +/// Return type of [`InterpCx::pop_stack_frame_raw`]. +pub struct StackPopInfo<'tcx, Prov: Provenance> { + /// Additional information about the action to be performed when returning from the popped + /// stack frame. + pub return_action: ReturnAction, + + /// [`return_to_block`](Frame::return_to_block) of the popped stack frame. + pub return_to_block: StackPopCleanup, + + /// [`return_place`](Frame::return_place) of the popped stack frame. + pub return_place: MPlaceTy<'tcx, Prov>, +} + +/// State of a local variable including a memoized layout +#[derive(Clone)] +pub struct LocalState<'tcx, Prov: Provenance = CtfeProvenance> { + value: LocalValue, + /// Don't modify if `Some`, this is only used to prevent computing the layout twice. + /// Avoids computing the layout of locals that are never actually initialized. + layout: Cell>>, +} + +impl std::fmt::Debug for LocalState<'_, Prov> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("LocalState") + .field("value", &self.value) + .field("ty", &self.layout.get().map(|l| l.ty)) + .finish() + } +} + +/// Current value of a local variable +/// +/// This does not store the type of the local; the type is given by `body.local_decls` and can never +/// change, so by not storing here we avoid having to maintain that as an invariant. +#[derive(Copy, Clone, Debug)] // Miri debug-prints these +pub(super) enum LocalValue { + /// This local is not currently alive, and cannot be used at all. + Dead, + /// A normal, live local. + /// Mostly for convenience, we re-use the `Operand` type here. + /// This is an optimization over just always having a pointer here; + /// we can thus avoid doing an allocation when the local just stores + /// immediate values *and* never has its address taken. + Live(Operand), +} + +impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> { + pub fn make_live_uninit(&mut self) { + self.value = LocalValue::Live(Operand::Immediate(Immediate::Uninit)); + } + + /// This is a hack because Miri needs a way to visit all the provenance in a `LocalState` + /// without having a layout or `TyCtxt` available, and we want to keep the `Operand` type + /// private. + pub fn as_mplace_or_imm( + &self, + ) -> Option>, MemPlaceMeta), Immediate>> { + match self.value { + LocalValue::Dead => None, + LocalValue::Live(Operand::Indirect(mplace)) => Some(Left((mplace.ptr, mplace.meta))), + LocalValue::Live(Operand::Immediate(imm)) => Some(Right(imm)), + } + } + + /// Read the local's value or error if the local is not yet live or not live anymore. + #[inline(always)] + pub(super) fn access(&self) -> InterpResult<'tcx, &Operand> { + match &self.value { + LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"? + LocalValue::Live(val) => Ok(val), + } + } + + /// Overwrite the local. If the local can be overwritten in place, return a reference + /// to do so; otherwise return the `MemPlace` to consult instead. + #[inline(always)] + pub(super) fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand> { + match &mut self.value { + LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"? + LocalValue::Live(val) => Ok(val), + } + } +} + +/// What we store about a frame in an interpreter backtrace. +#[derive(Clone, Debug)] +pub struct FrameInfo<'tcx> { + pub instance: ty::Instance<'tcx>, + pub span: Span, +} + +// FIXME: only used by miri, should be removed once translatable. +impl<'tcx> fmt::Display for FrameInfo<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ty::tls::with(|tcx| { + if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::Closure { + write!(f, "inside closure") + } else { + // Note: this triggers a `must_produce_diag` state, which means that if we ever + // get here we must emit a diagnostic. We should never display a `FrameInfo` unless + // we actually want to emit a warning or error to the user. + write!(f, "inside `{}`", self.instance) + } + }) + } +} + +impl<'tcx> FrameInfo<'tcx> { + pub fn as_note(&self, tcx: TyCtxt<'tcx>) -> errors::FrameNote { + let span = self.span; + if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::Closure { + errors::FrameNote { where_: "closure", span, instance: String::new(), times: 0 } + } else { + let instance = format!("{}", self.instance); + // Note: this triggers a `must_produce_diag` state, which means that if we ever get + // here we must emit a diagnostic. We should never display a `FrameInfo` unless we + // actually want to emit a warning or error to the user. + errors::FrameNote { where_: "instance", span, instance, times: 0 } + } + } +} + +impl<'tcx, Prov: Provenance> Frame<'tcx, Prov> { + pub fn with_extra(self, extra: Extra) -> Frame<'tcx, Prov, Extra> { + Frame { + body: self.body, + instance: self.instance, + return_to_block: self.return_to_block, + return_place: self.return_place, + locals: self.locals, + loc: self.loc, + extra, + tracing_span: self.tracing_span, + } + } +} + +impl<'tcx, Prov: Provenance, Extra> Frame<'tcx, Prov, Extra> { + /// Get the current location within the Frame. + /// + /// If this is `Right`, we are not currently executing any particular statement in + /// this frame (can happen e.g. during frame initialization, and during unwinding on + /// frames without cleanup code). + /// + /// Used by priroda. + pub fn current_loc(&self) -> Either { + self.loc + } + + /// Return the `SourceInfo` of the current instruction. + pub fn current_source_info(&self) -> Option<&mir::SourceInfo> { + self.loc.left().map(|loc| self.body.source_info(loc)) + } + + pub fn current_span(&self) -> Span { + match self.loc { + Left(loc) => self.body.source_info(loc).span, + Right(span) => span, + } + } + + pub fn lint_root(&self, tcx: TyCtxt<'tcx>) -> Option { + // We first try to get a HirId via the current source scope, + // and fall back to `body.source`. + self.current_source_info() + .and_then(|source_info| match &self.body.source_scopes[source_info.scope].local_data { + mir::ClearCrossCrate::Set(data) => Some(data.lint_root), + mir::ClearCrossCrate::Clear => None, + }) + .or_else(|| { + let def_id = self.body.source.def_id().as_local(); + def_id.map(|def_id| tcx.local_def_id_to_hir_id(def_id)) + }) + } + + /// Returns the address of the buffer where the locals are stored. This is used by `Place` as a + /// sanity check to detect bugs where we mix up which stack frame a place refers to. + #[inline(always)] + pub(super) fn locals_addr(&self) -> usize { + self.locals.raw.as_ptr().addr() + } + + #[must_use] + pub fn generate_stacktrace_from_stack(stack: &[Self]) -> Vec> { + let mut frames = Vec::new(); + // This deliberately does *not* honor `requires_caller_location` since it is used for much + // more than just panics. + for frame in stack.iter().rev() { + let span = match frame.loc { + Left(loc) => { + // If the stacktrace passes through MIR-inlined source scopes, add them. + let mir::SourceInfo { mut span, scope } = *frame.body.source_info(loc); + let mut scope_data = &frame.body.source_scopes[scope]; + while let Some((instance, call_span)) = scope_data.inlined { + frames.push(FrameInfo { span, instance }); + span = call_span; + scope_data = &frame.body.source_scopes[scope_data.parent_scope.unwrap()]; + } + span + } + Right(span) => span, + }; + frames.push(FrameInfo { span, instance: frame.instance }); + } + trace!("generate stacktrace: {:#?}", frames); + frames + } +} + +impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { + /// Very low-level helper that pushes a stack frame without initializing + /// the arguments or local variables. + #[instrument(skip(self, body, return_place, return_to_block), level = "debug")] + pub(crate) fn push_stack_frame_raw( + &mut self, + instance: ty::Instance<'tcx>, + body: &'tcx mir::Body<'tcx>, + return_place: &MPlaceTy<'tcx, M::Provenance>, + return_to_block: StackPopCleanup, + ) -> InterpResult<'tcx> { + trace!("body: {:#?}", body); + + // We can push a `Root` frame if and only if the stack is empty. + debug_assert_eq!( + self.stack().is_empty(), + matches!(return_to_block, StackPopCleanup::Root { .. }) + ); + + // First push a stack frame so we have access to `instantiate_from_current_frame` and other + // `self.frame()`-based functions. + let dead_local = LocalState { value: LocalValue::Dead, layout: Cell::new(None) }; + let locals = IndexVec::from_elem(dead_local, &body.local_decls); + let pre_frame = Frame { + body, + loc: Right(body.span), // Span used for errors caused during preamble. + return_to_block, + return_place: return_place.clone(), + locals, + instance, + tracing_span: SpanGuard::new(), + extra: (), + }; + let frame = M::init_frame(self, pre_frame)?; + self.stack_mut().push(frame); + + // Make sure all the constants required by this frame evaluate successfully (post-monomorphization check). + for &const_ in body.required_consts() { + let c = + self.instantiate_from_current_frame_and_normalize_erasing_regions(const_.const_)?; + c.eval(*self.tcx, self.param_env, const_.span).map_err(|err| { + err.emit_note(*self.tcx); + err + })?; + } + + // Finish things up. + M::after_stack_push(self)?; + self.frame_mut().loc = Left(mir::Location::START); + let span = info_span!("frame", "{}", instance); + self.frame_mut().tracing_span.enter(span); + + Ok(()) + } + + /// Pops a stack frame from the stack and returns some information about it. + /// + /// This also deallocates locals, if necessary. + /// + /// [`M::before_stack_pop`] should be called before calling this function. + /// [`M::after_stack_pop`] is called by this function automatically. + /// + /// [`M::before_stack_pop`]: Machine::before_stack_pop + /// [`M::after_stack_pop`]: Machine::after_stack_pop + pub(super) fn pop_stack_frame_raw( + &mut self, + unwinding: bool, + ) -> InterpResult<'tcx, StackPopInfo<'tcx, M::Provenance>> { + let cleanup = self.cleanup_current_frame_locals()?; + + let frame = + self.stack_mut().pop().expect("tried to pop a stack frame, but there were none"); + + let return_to_block = frame.return_to_block; + let return_place = frame.return_place.clone(); + + let return_action; + if cleanup { + return_action = M::after_stack_pop(self, frame, unwinding)?; + assert_ne!(return_action, ReturnAction::NoCleanup); + } else { + return_action = ReturnAction::NoCleanup; + }; + + Ok(StackPopInfo { return_action, return_to_block, return_place }) + } + + /// A private helper for [`pop_stack_frame_raw`](InterpCx::pop_stack_frame_raw). + /// Returns `true` if cleanup has been done, `false` otherwise. + fn cleanup_current_frame_locals(&mut self) -> InterpResult<'tcx, bool> { + // Cleanup: deallocate locals. + // Usually we want to clean up (deallocate locals), but in a few rare cases we don't. + // We do this while the frame is still on the stack, so errors point to the callee. + let return_to_block = self.frame().return_to_block; + let cleanup = match return_to_block { + StackPopCleanup::Goto { .. } => true, + StackPopCleanup::Root { cleanup, .. } => cleanup, + }; + + if cleanup { + // We need to take the locals out, since we need to mutate while iterating. + let locals = mem::take(&mut self.frame_mut().locals); + for local in &locals { + self.deallocate_local(local.value)?; + } + } + + Ok(cleanup) + } + + /// In the current stack frame, mark all locals as live that are not arguments and don't have + /// `Storage*` annotations (this includes the return place). + pub(crate) fn storage_live_for_always_live_locals(&mut self) -> InterpResult<'tcx> { + self.storage_live(mir::RETURN_PLACE)?; + + let body = self.body(); + let always_live = always_storage_live_locals(body); + for local in body.vars_and_temps_iter() { + if always_live.contains(local) { + self.storage_live(local)?; + } + } + Ok(()) + } + + pub fn storage_live_dyn( + &mut self, + local: mir::Local, + meta: MemPlaceMeta, + ) -> InterpResult<'tcx> { + trace!("{:?} is now live", local); + + // We avoid `ty.is_trivially_sized` since that does something expensive for ADTs. + fn is_very_trivially_sized(ty: Ty<'_>) -> bool { + match ty.kind() { + ty::Infer(ty::IntVar(_) | ty::FloatVar(_)) + | ty::Uint(_) + | ty::Int(_) + | ty::Bool + | ty::Float(_) + | ty::FnDef(..) + | ty::FnPtr(_) + | ty::RawPtr(..) + | ty::Char + | ty::Ref(..) + | ty::Coroutine(..) + | ty::CoroutineWitness(..) + | ty::Array(..) + | ty::Closure(..) + | ty::CoroutineClosure(..) + | ty::Never + | ty::Error(_) + | ty::Dynamic(_, _, ty::DynStar) => true, + + ty::Str | ty::Slice(_) | ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => false, + + ty::Tuple(tys) => tys.last().is_none_or(|ty| is_very_trivially_sized(*ty)), + + ty::Pat(ty, ..) => is_very_trivially_sized(*ty), + + // We don't want to do any queries, so there is not much we can do with ADTs. + ty::Adt(..) => false, + + ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => false, + + ty::Infer(ty::TyVar(_)) => false, + + ty::Bound(..) + | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => { + bug!("`is_very_trivially_sized` applied to unexpected type: {}", ty) + } + } + } + + // This is a hot function, we avoid computing the layout when possible. + // `unsized_` will be `None` for sized types and `Some(layout)` for unsized types. + let unsized_ = if is_very_trivially_sized(self.body().local_decls[local].ty) { + None + } else { + // We need the layout. + let layout = self.layout_of_local(self.frame(), local, None)?; + if layout.is_sized() { None } else { Some(layout) } + }; + + let local_val = LocalValue::Live(if let Some(layout) = unsized_ { + if !meta.has_meta() { + throw_unsup!(UnsizedLocal); + } + // Need to allocate some memory, since `Immediate::Uninit` cannot be unsized. + let dest_place = self.allocate_dyn(layout, MemoryKind::Stack, meta)?; + Operand::Indirect(*dest_place.mplace()) + } else { + assert!(!meta.has_meta()); // we're dropping the metadata + // Just make this an efficient immediate. + // Note that not calling `layout_of` here does have one real consequence: + // if the type is too big, we'll only notice this when the local is actually initialized, + // which is a bit too late -- we should ideally notice this already here, when the memory + // is conceptually allocated. But given how rare that error is and that this is a hot function, + // we accept this downside for now. + Operand::Immediate(Immediate::Uninit) + }); + + // If the local is already live, deallocate its old memory. + let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val); + self.deallocate_local(old)?; + Ok(()) + } + + /// Mark a storage as live, killing the previous content. + #[inline(always)] + pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> { + self.storage_live_dyn(local, MemPlaceMeta::None) + } + + pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> { + assert!(local != mir::RETURN_PLACE, "Cannot make return place dead"); + trace!("{:?} is now dead", local); + + // If the local is already dead, this is a NOP. + let old = mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead); + self.deallocate_local(old)?; + Ok(()) + } + + fn deallocate_local(&mut self, local: LocalValue) -> InterpResult<'tcx> { + if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local { + // All locals have a backing allocation, even if the allocation is empty + // due to the local having ZST type. Hence we can `unwrap`. + trace!( + "deallocating local {:?}: {:?}", + local, + // Locals always have a `alloc_id` (they are never the result of a int2ptr). + self.dump_alloc(ptr.provenance.unwrap().get_alloc_id().unwrap()) + ); + self.deallocate_ptr(ptr, None, MemoryKind::Stack)?; + }; + Ok(()) + } + + #[inline(always)] + pub(super) fn layout_of_local( + &self, + frame: &Frame<'tcx, M::Provenance, M::FrameExtra>, + local: mir::Local, + layout: Option>, + ) -> InterpResult<'tcx, TyAndLayout<'tcx>> { + let state = &frame.locals[local]; + if let Some(layout) = state.layout.get() { + return Ok(layout); + } + + let layout = from_known_layout(self.tcx, self.param_env, layout, || { + let local_ty = frame.body.local_decls[local].ty; + let local_ty = + self.instantiate_from_frame_and_normalize_erasing_regions(frame, local_ty)?; + self.layout_of(local_ty) + })?; + + // Layouts of locals are requested a lot, so we cache them. + state.layout.set(Some(layout)); + Ok(layout) + } +} + +impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> { + pub(super) fn print( + &self, + allocs: &mut Vec>, + fmt: &mut std::fmt::Formatter<'_>, + ) -> std::fmt::Result { + match self.value { + LocalValue::Dead => write!(fmt, " is dead")?, + LocalValue::Live(Operand::Immediate(Immediate::Uninit)) => { + write!(fmt, " is uninitialized")? + } + LocalValue::Live(Operand::Indirect(mplace)) => { + write!( + fmt, + " by {} ref {:?}:", + match mplace.meta { + MemPlaceMeta::Meta(meta) => format!(" meta({meta:?})"), + MemPlaceMeta::None => String::new(), + }, + mplace.ptr, + )?; + allocs.extend(mplace.ptr.provenance.map(Provenance::get_alloc_id)); + } + LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => { + write!(fmt, " {val:?}")?; + if let Scalar::Ptr(ptr, _size) = val { + allocs.push(ptr.provenance.get_alloc_id()); + } + } + LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => { + write!(fmt, " ({val1:?}, {val2:?})")?; + if let Scalar::Ptr(ptr, _size) = val1 { + allocs.push(ptr.provenance.get_alloc_id()); + } + if let Scalar::Ptr(ptr, _size) = val2 { + allocs.push(ptr.provenance.get_alloc_id()); + } + } + } + + Ok(()) + } +} diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs index 28cf1068f40..654c1076545 100644 --- a/compiler/rustc_const_eval/src/interpret/step.rs +++ b/compiler/rustc_const_eval/src/interpret/step.rs @@ -4,15 +4,29 @@ use either::Either; use rustc_index::IndexSlice; -use rustc_middle::{bug, mir}; +use rustc_middle::ty::layout::FnAbiOf; +use rustc_middle::ty::{self, Instance, Ty}; +use rustc_middle::{bug, mir, span_bug}; +use rustc_span::source_map::Spanned; +use rustc_target::abi::call::FnAbi; use rustc_target::abi::{FieldIdx, FIRST_VARIANT}; use tracing::{info, instrument, trace}; use super::{ - ImmTy, Immediate, InterpCx, InterpResult, Machine, MemPlaceMeta, PlaceTy, Projectable, Scalar, + throw_ub, FnArg, FnVal, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemPlaceMeta, + PlaceTy, Projectable, Scalar, }; use crate::util; +struct EvaluatedCalleeAndArgs<'tcx, M: Machine<'tcx>> { + callee: FnVal<'tcx, M::ExtraFnVal>, + args: Vec>, + fn_sig: ty::FnSig<'tcx>, + fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>, + /// True if the function is marked as `#[track_caller]` ([`ty::InstanceKind::requires_caller_location`]) + with_caller_location: bool, +} + impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Returns `true` as long as there are more things to do. /// @@ -36,7 +50,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { if let Some(stmt) = basic_block.statements.get(loc.statement_index) { let old_frames = self.frame_idx(); - self.statement(stmt)?; + self.eval_statement(stmt)?; // Make sure we are not updating `statement_index` of the wrong frame. assert_eq!(old_frames, self.frame_idx()); // Advance the program counter. @@ -47,7 +61,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { M::before_terminator(self)?; let terminator = basic_block.terminator(); - self.terminator(terminator)?; + self.eval_terminator(terminator)?; + if !self.stack().is_empty() { + if let Either::Left(loc) = self.frame().loc { + info!("// executing {:?}", loc.block); + } + } Ok(true) } @@ -55,7 +74,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// statement counter. /// /// This does NOT move the statement counter forward, the caller has to do that! - pub fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> { + pub fn eval_statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> { info!("{:?}", stmt); use rustc_middle::mir::StatementKind::*; @@ -349,16 +368,225 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { Ok(()) } - /// Evaluate the given terminator. Will also adjust the stack frame and statement position accordingly. - fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> { + /// Evaluate the arguments of a function call + fn eval_fn_call_arguments( + &self, + ops: &[Spanned>], + ) -> InterpResult<'tcx, Vec>> { + ops.iter() + .map(|op| { + let arg = match &op.node { + mir::Operand::Copy(_) | mir::Operand::Constant(_) => { + // Make a regular copy. + let op = self.eval_operand(&op.node, None)?; + FnArg::Copy(op) + } + mir::Operand::Move(place) => { + // If this place lives in memory, preserve its location. + // We call `place_to_op` which will be an `MPlaceTy` whenever there exists + // an mplace for this place. (This is in contrast to `PlaceTy::as_mplace_or_local` + // which can return a local even if that has an mplace.) + let place = self.eval_place(*place)?; + let op = self.place_to_op(&place)?; + + match op.as_mplace_or_imm() { + Either::Left(mplace) => FnArg::InPlace(mplace), + Either::Right(_imm) => { + // This argument doesn't live in memory, so there's no place + // to make inaccessible during the call. + // We rely on there not being any stray `PlaceTy` that would let the + // caller directly access this local! + // This is also crucial for tail calls, where we want the `FnArg` to + // stay valid when the old stack frame gets popped. + FnArg::Copy(op) + } + } + } + }; + + Ok(arg) + }) + .collect() + } + + /// Shared part of `Call` and `TailCall` implementation — finding and evaluating all the + /// necessary information about callee and arguments to make a call. + fn eval_callee_and_args( + &self, + terminator: &mir::Terminator<'tcx>, + func: &mir::Operand<'tcx>, + args: &[Spanned>], + ) -> InterpResult<'tcx, EvaluatedCalleeAndArgs<'tcx, M>> { + let func = self.eval_operand(func, None)?; + let args = self.eval_fn_call_arguments(args)?; + + let fn_sig_binder = func.layout.ty.fn_sig(*self.tcx); + let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig_binder); + let extra_args = &args[fn_sig.inputs().len()..]; + let extra_args = + self.tcx.mk_type_list_from_iter(extra_args.iter().map(|arg| arg.layout().ty)); + + let (callee, fn_abi, with_caller_location) = match *func.layout.ty.kind() { + ty::FnPtr(_sig) => { + let fn_ptr = self.read_pointer(&func)?; + let fn_val = self.get_ptr_fn(fn_ptr)?; + (fn_val, self.fn_abi_of_fn_ptr(fn_sig_binder, extra_args)?, false) + } + ty::FnDef(def_id, args) => { + let instance = self.resolve(def_id, args)?; + ( + FnVal::Instance(instance), + self.fn_abi_of_instance(instance, extra_args)?, + instance.def.requires_caller_location(*self.tcx), + ) + } + _ => { + span_bug!(terminator.source_info.span, "invalid callee of type {}", func.layout.ty) + } + }; + + Ok(EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location }) + } + + fn eval_terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> { info!("{:?}", terminator.kind); - self.eval_terminator(terminator)?; - if !self.stack().is_empty() { - if let Either::Left(loc) = self.frame().loc { - info!("// executing {:?}", loc.block); + use rustc_middle::mir::TerminatorKind::*; + match terminator.kind { + Return => { + self.return_from_current_stack_frame(/* unwinding */ false)? + } + + Goto { target } => self.go_to_block(target), + + SwitchInt { ref discr, ref targets } => { + let discr = self.read_immediate(&self.eval_operand(discr, None)?)?; + trace!("SwitchInt({:?})", *discr); + + // Branch to the `otherwise` case by default, if no match is found. + let mut target_block = targets.otherwise(); + + for (const_int, target) in targets.iter() { + // Compare using MIR BinOp::Eq, to also support pointer values. + // (Avoiding `self.binary_op` as that does some redundant layout computation.) + let res = self.binary_op( + mir::BinOp::Eq, + &discr, + &ImmTy::from_uint(const_int, discr.layout), + )?; + if res.to_scalar().to_bool()? { + target_block = target; + break; + } + } + + self.go_to_block(target_block); + } + + Call { + ref func, + ref args, + destination, + target, + unwind, + call_source: _, + fn_span: _, + } => { + let old_stack = self.frame_idx(); + let old_loc = self.frame().loc; + + let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } = + self.eval_callee_and_args(terminator, func, args)?; + + let destination = self.force_allocation(&self.eval_place(destination)?)?; + self.init_fn_call( + callee, + (fn_sig.abi, fn_abi), + &args, + with_caller_location, + &destination, + target, + if fn_abi.can_unwind { unwind } else { mir::UnwindAction::Unreachable }, + )?; + // Sanity-check that `eval_fn_call` either pushed a new frame or + // did a jump to another block. + if self.frame_idx() == old_stack && self.frame().loc == old_loc { + span_bug!(terminator.source_info.span, "evaluating this call made no progress"); + } + } + + TailCall { ref func, ref args, fn_span: _ } => { + let old_frame_idx = self.frame_idx(); + + let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } = + self.eval_callee_and_args(terminator, func, args)?; + + self.init_fn_tail_call(callee, (fn_sig.abi, fn_abi), &args, with_caller_location)?; + + if self.frame_idx() != old_frame_idx { + span_bug!( + terminator.source_info.span, + "evaluating this tail call pushed a new stack frame" + ); + } + } + + Drop { place, target, unwind, replace: _ } => { + let place = self.eval_place(place)?; + let instance = Instance::resolve_drop_in_place(*self.tcx, place.layout.ty); + if let ty::InstanceKind::DropGlue(_, None) = instance.def { + // This is the branch we enter if and only if the dropped type has no drop glue + // whatsoever. This can happen as a result of monomorphizing a drop of a + // generic. In order to make sure that generic and non-generic code behaves + // roughly the same (and in keeping with Mir semantics) we do nothing here. + self.go_to_block(target); + return Ok(()); + } + trace!("TerminatorKind::drop: {:?}, type {}", place, place.layout.ty); + self.init_drop_in_place_call(&place, instance, target, unwind)?; + } + + Assert { ref cond, expected, ref msg, target, unwind } => { + let ignored = + M::ignore_optional_overflow_checks(self) && msg.is_optional_overflow_check(); + let cond_val = self.read_scalar(&self.eval_operand(cond, None)?)?.to_bool()?; + if ignored || expected == cond_val { + self.go_to_block(target); + } else { + M::assert_panic(self, msg, unwind)?; + } + } + + UnwindTerminate(reason) => { + M::unwind_terminate(self, reason)?; + } + + // When we encounter Resume, we've finished unwinding + // cleanup for the current stack frame. We pop it in order + // to continue unwinding the next frame + UnwindResume => { + trace!("unwinding: resuming from cleanup"); + // By definition, a Resume terminator means + // that we're unwinding + self.return_from_current_stack_frame(/* unwinding */ true)?; + return Ok(()); + } + + // It is UB to ever encounter this. + Unreachable => throw_ub!(Unreachable), + + // These should never occur for MIR we actually run. + FalseEdge { .. } | FalseUnwind { .. } | Yield { .. } | CoroutineDrop => span_bug!( + terminator.source_info.span, + "{:#?} should have been eliminated by MIR pass", + terminator.kind + ), + + InlineAsm { template, ref operands, options, ref targets, .. } => { + M::eval_inline_asm(self, template, operands, options, targets)?; } } + Ok(()) } } diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs deleted file mode 100644 index 47d0e22b527..00000000000 --- a/compiler/rustc_const_eval/src/interpret/terminator.rs +++ /dev/null @@ -1,1073 +0,0 @@ -use std::borrow::Cow; - -use either::Either; -use rustc_middle::ty::layout::{FnAbiOf, IntegerExt, LayoutOf, TyAndLayout}; -use rustc_middle::ty::{self, AdtDef, Instance, Ty}; -use rustc_middle::{bug, mir, span_bug}; -use rustc_span::source_map::Spanned; -use rustc_span::sym; -use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode}; -use rustc_target::abi::{self, FieldIdx, Integer}; -use rustc_target::spec::abi::Abi; -use tracing::trace; - -use super::{ - throw_ub, throw_ub_custom, throw_unsup_format, CtfeProvenance, FnVal, ImmTy, InterpCx, - InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Projectable, Provenance, Scalar, - StackPopCleanup, -}; -use crate::fluent_generated as fluent; -use crate::interpret::eval_context::StackPopInfo; -use crate::interpret::ReturnAction; - -/// An argment passed to a function. -#[derive(Clone, Debug)] -pub enum FnArg<'tcx, Prov: Provenance = CtfeProvenance> { - /// Pass a copy of the given operand. - Copy(OpTy<'tcx, Prov>), - /// Allow for the argument to be passed in-place: destroy the value originally stored at that place and - /// make the place inaccessible for the duration of the function call. - InPlace(MPlaceTy<'tcx, Prov>), -} - -impl<'tcx, Prov: Provenance> FnArg<'tcx, Prov> { - pub fn layout(&self) -> &TyAndLayout<'tcx> { - match self { - FnArg::Copy(op) => &op.layout, - FnArg::InPlace(mplace) => &mplace.layout, - } - } -} - -struct EvaluatedCalleeAndArgs<'tcx, M: Machine<'tcx>> { - callee: FnVal<'tcx, M::ExtraFnVal>, - args: Vec>, - fn_sig: ty::FnSig<'tcx>, - fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>, - /// True if the function is marked as `#[track_caller]` ([`ty::InstanceKind::requires_caller_location`]) - with_caller_location: bool, -} - -impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { - /// Make a copy of the given fn_arg. Any `InPlace` are degenerated to copies, no protection of the - /// original memory occurs. - pub fn copy_fn_arg(&self, arg: &FnArg<'tcx, M::Provenance>) -> OpTy<'tcx, M::Provenance> { - match arg { - FnArg::Copy(op) => op.clone(), - FnArg::InPlace(mplace) => mplace.clone().into(), - } - } - - /// Make a copy of the given fn_args. Any `InPlace` are degenerated to copies, no protection of the - /// original memory occurs. - pub fn copy_fn_args( - &self, - args: &[FnArg<'tcx, M::Provenance>], - ) -> Vec> { - args.iter().map(|fn_arg| self.copy_fn_arg(fn_arg)).collect() - } - - pub fn fn_arg_field( - &self, - arg: &FnArg<'tcx, M::Provenance>, - field: usize, - ) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> { - Ok(match arg { - FnArg::Copy(op) => FnArg::Copy(self.project_field(op, field)?), - FnArg::InPlace(mplace) => FnArg::InPlace(self.project_field(mplace, field)?), - }) - } - - pub(super) fn eval_terminator( - &mut self, - terminator: &mir::Terminator<'tcx>, - ) -> InterpResult<'tcx> { - use rustc_middle::mir::TerminatorKind::*; - match terminator.kind { - Return => { - self.return_from_current_stack_frame(/* unwinding */ false)? - } - - Goto { target } => self.go_to_block(target), - - SwitchInt { ref discr, ref targets } => { - let discr = self.read_immediate(&self.eval_operand(discr, None)?)?; - trace!("SwitchInt({:?})", *discr); - - // Branch to the `otherwise` case by default, if no match is found. - let mut target_block = targets.otherwise(); - - for (const_int, target) in targets.iter() { - // Compare using MIR BinOp::Eq, to also support pointer values. - // (Avoiding `self.binary_op` as that does some redundant layout computation.) - let res = self.binary_op( - mir::BinOp::Eq, - &discr, - &ImmTy::from_uint(const_int, discr.layout), - )?; - if res.to_scalar().to_bool()? { - target_block = target; - break; - } - } - - self.go_to_block(target_block); - } - - Call { - ref func, - ref args, - destination, - target, - unwind, - call_source: _, - fn_span: _, - } => { - let old_stack = self.frame_idx(); - let old_loc = self.frame().loc; - - let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } = - self.eval_callee_and_args(terminator, func, args)?; - - let destination = self.force_allocation(&self.eval_place(destination)?)?; - self.eval_fn_call( - callee, - (fn_sig.abi, fn_abi), - &args, - with_caller_location, - &destination, - target, - if fn_abi.can_unwind { unwind } else { mir::UnwindAction::Unreachable }, - )?; - // Sanity-check that `eval_fn_call` either pushed a new frame or - // did a jump to another block. - if self.frame_idx() == old_stack && self.frame().loc == old_loc { - span_bug!(terminator.source_info.span, "evaluating this call made no progress"); - } - } - - TailCall { ref func, ref args, fn_span: _ } => { - let old_frame_idx = self.frame_idx(); - - let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } = - self.eval_callee_and_args(terminator, func, args)?; - - self.eval_fn_tail_call(callee, (fn_sig.abi, fn_abi), &args, with_caller_location)?; - - if self.frame_idx() != old_frame_idx { - span_bug!( - terminator.source_info.span, - "evaluating this tail call pushed a new stack frame" - ); - } - } - - Drop { place, target, unwind, replace: _ } => { - let place = self.eval_place(place)?; - let instance = Instance::resolve_drop_in_place(*self.tcx, place.layout.ty); - if let ty::InstanceKind::DropGlue(_, None) = instance.def { - // This is the branch we enter if and only if the dropped type has no drop glue - // whatsoever. This can happen as a result of monomorphizing a drop of a - // generic. In order to make sure that generic and non-generic code behaves - // roughly the same (and in keeping with Mir semantics) we do nothing here. - self.go_to_block(target); - return Ok(()); - } - trace!("TerminatorKind::drop: {:?}, type {}", place, place.layout.ty); - self.drop_in_place(&place, instance, target, unwind)?; - } - - Assert { ref cond, expected, ref msg, target, unwind } => { - let ignored = - M::ignore_optional_overflow_checks(self) && msg.is_optional_overflow_check(); - let cond_val = self.read_scalar(&self.eval_operand(cond, None)?)?.to_bool()?; - if ignored || expected == cond_val { - self.go_to_block(target); - } else { - M::assert_panic(self, msg, unwind)?; - } - } - - UnwindTerminate(reason) => { - M::unwind_terminate(self, reason)?; - } - - // When we encounter Resume, we've finished unwinding - // cleanup for the current stack frame. We pop it in order - // to continue unwinding the next frame - UnwindResume => { - trace!("unwinding: resuming from cleanup"); - // By definition, a Resume terminator means - // that we're unwinding - self.return_from_current_stack_frame(/* unwinding */ true)?; - return Ok(()); - } - - // It is UB to ever encounter this. - Unreachable => throw_ub!(Unreachable), - - // These should never occur for MIR we actually run. - FalseEdge { .. } | FalseUnwind { .. } | Yield { .. } | CoroutineDrop => span_bug!( - terminator.source_info.span, - "{:#?} should have been eliminated by MIR pass", - terminator.kind - ), - - InlineAsm { template, ref operands, options, ref targets, .. } => { - M::eval_inline_asm(self, template, operands, options, targets)?; - } - } - - Ok(()) - } - - /// Evaluate the arguments of a function call - pub(super) fn eval_fn_call_arguments( - &self, - ops: &[Spanned>], - ) -> InterpResult<'tcx, Vec>> { - ops.iter() - .map(|op| { - let arg = match &op.node { - mir::Operand::Copy(_) | mir::Operand::Constant(_) => { - // Make a regular copy. - let op = self.eval_operand(&op.node, None)?; - FnArg::Copy(op) - } - mir::Operand::Move(place) => { - // If this place lives in memory, preserve its location. - // We call `place_to_op` which will be an `MPlaceTy` whenever there exists - // an mplace for this place. (This is in contrast to `PlaceTy::as_mplace_or_local` - // which can return a local even if that has an mplace.) - let place = self.eval_place(*place)?; - let op = self.place_to_op(&place)?; - - match op.as_mplace_or_imm() { - Either::Left(mplace) => FnArg::InPlace(mplace), - Either::Right(_imm) => { - // This argument doesn't live in memory, so there's no place - // to make inaccessible during the call. - // We rely on there not being any stray `PlaceTy` that would let the - // caller directly access this local! - // This is also crucial for tail calls, where we want the `FnArg` to - // stay valid when the old stack frame gets popped. - FnArg::Copy(op) - } - } - } - }; - - Ok(arg) - }) - .collect() - } - - /// Find the wrapped inner type of a transparent wrapper. - /// Must not be called on 1-ZST (as they don't have a uniquely defined "wrapped field"). - /// - /// We work with `TyAndLayout` here since that makes it much easier to iterate over all fields. - fn unfold_transparent( - &self, - layout: TyAndLayout<'tcx>, - may_unfold: impl Fn(AdtDef<'tcx>) -> bool, - ) -> TyAndLayout<'tcx> { - match layout.ty.kind() { - ty::Adt(adt_def, _) if adt_def.repr().transparent() && may_unfold(*adt_def) => { - assert!(!adt_def.is_enum()); - // Find the non-1-ZST field, and recurse. - let (_, field) = layout.non_1zst_field(self).unwrap(); - self.unfold_transparent(field, may_unfold) - } - // Not a transparent type, no further unfolding. - _ => layout, - } - } - - /// Unwrap types that are guaranteed a null-pointer-optimization - fn unfold_npo(&self, layout: TyAndLayout<'tcx>) -> InterpResult<'tcx, TyAndLayout<'tcx>> { - // Check if this is `Option` wrapping some type or if this is `Result` wrapping a 1-ZST and - // another type. - let ty::Adt(def, args) = layout.ty.kind() else { - // Not an ADT, so definitely no NPO. - return Ok(layout); - }; - let inner = if self.tcx.is_diagnostic_item(sym::Option, def.did()) { - // The wrapped type is the only arg. - self.layout_of(args[0].as_type().unwrap())? - } else if self.tcx.is_diagnostic_item(sym::Result, def.did()) { - // We want to extract which (if any) of the args is not a 1-ZST. - let lhs = self.layout_of(args[0].as_type().unwrap())?; - let rhs = self.layout_of(args[1].as_type().unwrap())?; - if lhs.is_1zst() { - rhs - } else if rhs.is_1zst() { - lhs - } else { - return Ok(layout); // no NPO - } - } else { - return Ok(layout); // no NPO - }; - - // Check if the inner type is one of the NPO-guaranteed ones. - // For that we first unpeel transparent *structs* (but not unions). - let is_npo = |def: AdtDef<'tcx>| { - self.tcx.has_attr(def.did(), sym::rustc_nonnull_optimization_guaranteed) - }; - let inner = self.unfold_transparent(inner, /* may_unfold */ |def| { - // Stop at NPO tpyes so that we don't miss that attribute in the check below! - def.is_struct() && !is_npo(def) - }); - Ok(match inner.ty.kind() { - ty::Ref(..) | ty::FnPtr(..) => { - // Option<&T> behaves like &T, and same for fn() - inner - } - ty::Adt(def, _) if is_npo(*def) => { - // Once we found a `nonnull_optimization_guaranteed` type, further strip off - // newtype structs from it to find the underlying ABI type. - self.unfold_transparent(inner, /* may_unfold */ |def| def.is_struct()) - } - _ => { - // Everything else we do not unfold. - layout - } - }) - } - - /// Check if these two layouts look like they are fn-ABI-compatible. - /// (We also compare the `PassMode`, so this doesn't have to check everything. But it turns out - /// that only checking the `PassMode` is insufficient.) - fn layout_compat( - &self, - caller: TyAndLayout<'tcx>, - callee: TyAndLayout<'tcx>, - ) -> InterpResult<'tcx, bool> { - // Fast path: equal types are definitely compatible. - if caller.ty == callee.ty { - return Ok(true); - } - // 1-ZST are compatible with all 1-ZST (and with nothing else). - if caller.is_1zst() || callee.is_1zst() { - return Ok(caller.is_1zst() && callee.is_1zst()); - } - // Unfold newtypes and NPO optimizations. - let unfold = |layout: TyAndLayout<'tcx>| { - self.unfold_npo(self.unfold_transparent(layout, /* may_unfold */ |_def| true)) - }; - let caller = unfold(caller)?; - let callee = unfold(callee)?; - // Now see if these inner types are compatible. - - // Compatible pointer types. For thin pointers, we have to accept even non-`repr(transparent)` - // things as compatible due to `DispatchFromDyn`. For instance, `Rc` and `*mut i32` - // must be compatible. So we just accept everything with Pointer ABI as compatible, - // even if this will accept some code that is not stably guaranteed to work. - // This also handles function pointers. - let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.abi { - abi::Abi::Scalar(s) => match s.primitive() { - abi::Primitive::Pointer(addr_space) => Some(addr_space), - _ => None, - }, - _ => None, - }; - if let (Some(caller), Some(callee)) = (thin_pointer(caller), thin_pointer(callee)) { - return Ok(caller == callee); - } - // For wide pointers we have to get the pointee type. - let pointee_ty = |ty: Ty<'tcx>| -> InterpResult<'tcx, Option>> { - // We cannot use `builtin_deref` here since we need to reject `Box`. - Ok(Some(match ty.kind() { - ty::Ref(_, ty, _) => *ty, - ty::RawPtr(ty, _) => *ty, - // We only accept `Box` with the default allocator. - _ if ty.is_box_global(*self.tcx) => ty.boxed_ty(), - _ => return Ok(None), - })) - }; - if let (Some(caller), Some(callee)) = (pointee_ty(caller.ty)?, pointee_ty(callee.ty)?) { - // This is okay if they have the same metadata type. - let meta_ty = |ty: Ty<'tcx>| { - // Even if `ty` is normalized, the search for the unsized tail will project - // to fields, which can yield non-normalized types. So we need to provide a - // normalization function. - let normalize = |ty| self.tcx.normalize_erasing_regions(self.param_env, ty); - ty.ptr_metadata_ty(*self.tcx, normalize) - }; - return Ok(meta_ty(caller) == meta_ty(callee)); - } - - // Compatible integer types (in particular, usize vs ptr-sized-u32/u64). - // `char` counts as `u32.` - let int_ty = |ty: Ty<'tcx>| { - Some(match ty.kind() { - ty::Int(ity) => (Integer::from_int_ty(&self.tcx, *ity), /* signed */ true), - ty::Uint(uty) => (Integer::from_uint_ty(&self.tcx, *uty), /* signed */ false), - ty::Char => (Integer::I32, /* signed */ false), - _ => return None, - }) - }; - if let (Some(caller), Some(callee)) = (int_ty(caller.ty), int_ty(callee.ty)) { - // This is okay if they are the same integer type. - return Ok(caller == callee); - } - - // Fall back to exact equality. - // FIXME: We are missing the rules for "repr(C) wrapping compatible types". - Ok(caller == callee) - } - - fn check_argument_compat( - &self, - caller_abi: &ArgAbi<'tcx, Ty<'tcx>>, - callee_abi: &ArgAbi<'tcx, Ty<'tcx>>, - ) -> InterpResult<'tcx, bool> { - // We do not want to accept things as ABI-compatible that just "happen to be" compatible on the current target, - // so we implement a type-based check that reflects the guaranteed rules for ABI compatibility. - if self.layout_compat(caller_abi.layout, callee_abi.layout)? { - // Ensure that our checks imply actual ABI compatibility for this concrete call. - assert!(caller_abi.eq_abi(callee_abi)); - return Ok(true); - } else { - trace!( - "check_argument_compat: incompatible ABIs:\ncaller: {:?}\ncallee: {:?}", - caller_abi, callee_abi - ); - return Ok(false); - } - } - - /// Initialize a single callee argument, checking the types for compatibility. - fn pass_argument<'x, 'y>( - &mut self, - caller_args: &mut impl Iterator< - Item = (&'x FnArg<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>), - >, - callee_abi: &ArgAbi<'tcx, Ty<'tcx>>, - callee_arg: &mir::Place<'tcx>, - callee_ty: Ty<'tcx>, - already_live: bool, - ) -> InterpResult<'tcx> - where - 'tcx: 'x, - 'tcx: 'y, - { - assert_eq!(callee_ty, callee_abi.layout.ty); - if matches!(callee_abi.mode, PassMode::Ignore) { - // This one is skipped. Still must be made live though! - if !already_live { - self.storage_live(callee_arg.as_local().unwrap())?; - } - return Ok(()); - } - // Find next caller arg. - let Some((caller_arg, caller_abi)) = caller_args.next() else { - throw_ub_custom!(fluent::const_eval_not_enough_caller_args); - }; - assert_eq!(caller_arg.layout().layout, caller_abi.layout.layout); - // Sadly we cannot assert that `caller_arg.layout().ty` and `caller_abi.layout.ty` are - // equal; in closures the types sometimes differ. We just hope that `caller_abi` is the - // right type to print to the user. - - // Check compatibility - if !self.check_argument_compat(caller_abi, callee_abi)? { - throw_ub!(AbiMismatchArgument { - caller_ty: caller_abi.layout.ty, - callee_ty: callee_abi.layout.ty - }); - } - // We work with a copy of the argument for now; if this is in-place argument passing, we - // will later protect the source it comes from. This means the callee cannot observe if we - // did in-place of by-copy argument passing, except for pointer equality tests. - let caller_arg_copy = self.copy_fn_arg(caller_arg); - if !already_live { - let local = callee_arg.as_local().unwrap(); - let meta = caller_arg_copy.meta(); - // `check_argument_compat` ensures that if metadata is needed, both have the same type, - // so we know they will use the metadata the same way. - assert!(!meta.has_meta() || caller_arg_copy.layout.ty == callee_ty); - - self.storage_live_dyn(local, meta)?; - } - // Now we can finally actually evaluate the callee place. - let callee_arg = self.eval_place(*callee_arg)?; - // We allow some transmutes here. - // FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This - // is true for all `copy_op`, but there are a lot of special cases for argument passing - // specifically.) - self.copy_op_allow_transmute(&caller_arg_copy, &callee_arg)?; - // If this was an in-place pass, protect the place it comes from for the duration of the call. - if let FnArg::InPlace(mplace) = caller_arg { - M::protect_in_place_function_argument(self, mplace)?; - } - Ok(()) - } - - /// Shared part of `Call` and `TailCall` implementation — finding and evaluating all the - /// necessary information about callee and arguments to make a call. - fn eval_callee_and_args( - &self, - terminator: &mir::Terminator<'tcx>, - func: &mir::Operand<'tcx>, - args: &[Spanned>], - ) -> InterpResult<'tcx, EvaluatedCalleeAndArgs<'tcx, M>> { - let func = self.eval_operand(func, None)?; - let args = self.eval_fn_call_arguments(args)?; - - let fn_sig_binder = func.layout.ty.fn_sig(*self.tcx); - let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig_binder); - let extra_args = &args[fn_sig.inputs().len()..]; - let extra_args = - self.tcx.mk_type_list_from_iter(extra_args.iter().map(|arg| arg.layout().ty)); - - let (callee, fn_abi, with_caller_location) = match *func.layout.ty.kind() { - ty::FnPtr(_sig) => { - let fn_ptr = self.read_pointer(&func)?; - let fn_val = self.get_ptr_fn(fn_ptr)?; - (fn_val, self.fn_abi_of_fn_ptr(fn_sig_binder, extra_args)?, false) - } - ty::FnDef(def_id, args) => { - let instance = self.resolve(def_id, args)?; - ( - FnVal::Instance(instance), - self.fn_abi_of_instance(instance, extra_args)?, - instance.def.requires_caller_location(*self.tcx), - ) - } - _ => { - span_bug!(terminator.source_info.span, "invalid callee of type {}", func.layout.ty) - } - }; - - Ok(EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location }) - } - - /// Call this function -- pushing the stack frame and initializing the arguments. - /// - /// `caller_fn_abi` is used to determine if all the arguments are passed the proper way. - /// However, we also need `caller_abi` to determine if we need to do untupling of arguments. - /// - /// `with_caller_location` indicates whether the caller passed a caller location. Miri - /// implements caller locations without argument passing, but to match `FnAbi` we need to know - /// when those arguments are present. - pub(crate) fn eval_fn_call( - &mut self, - fn_val: FnVal<'tcx, M::ExtraFnVal>, - (caller_abi, caller_fn_abi): (Abi, &FnAbi<'tcx, Ty<'tcx>>), - args: &[FnArg<'tcx, M::Provenance>], - with_caller_location: bool, - destination: &MPlaceTy<'tcx, M::Provenance>, - target: Option, - mut unwind: mir::UnwindAction, - ) -> InterpResult<'tcx> { - trace!("eval_fn_call: {:#?}", fn_val); - - let instance = match fn_val { - FnVal::Instance(instance) => instance, - FnVal::Other(extra) => { - return M::call_extra_fn( - self, - extra, - caller_abi, - args, - destination, - target, - unwind, - ); - } - }; - - match instance.def { - ty::InstanceKind::Intrinsic(def_id) => { - assert!(self.tcx.intrinsic(def_id).is_some()); - // FIXME: Should `InPlace` arguments be reset to uninit? - if let Some(fallback) = M::call_intrinsic( - self, - instance, - &self.copy_fn_args(args), - destination, - target, - unwind, - )? { - assert!(!self.tcx.intrinsic(fallback.def_id()).unwrap().must_be_overridden); - assert!(matches!(fallback.def, ty::InstanceKind::Item(_))); - return self.eval_fn_call( - FnVal::Instance(fallback), - (caller_abi, caller_fn_abi), - args, - with_caller_location, - destination, - target, - unwind, - ); - } else { - Ok(()) - } - } - ty::InstanceKind::VTableShim(..) - | ty::InstanceKind::ReifyShim(..) - | ty::InstanceKind::ClosureOnceShim { .. } - | ty::InstanceKind::ConstructCoroutineInClosureShim { .. } - | ty::InstanceKind::CoroutineKindShim { .. } - | ty::InstanceKind::FnPtrShim(..) - | ty::InstanceKind::DropGlue(..) - | ty::InstanceKind::CloneShim(..) - | ty::InstanceKind::FnPtrAddrShim(..) - | ty::InstanceKind::ThreadLocalShim(..) - | ty::InstanceKind::AsyncDropGlueCtorShim(..) - | ty::InstanceKind::Item(_) => { - // We need MIR for this fn - let Some((body, instance)) = M::find_mir_or_eval_fn( - self, - instance, - caller_abi, - args, - destination, - target, - unwind, - )? - else { - return Ok(()); - }; - - // Compute callee information using the `instance` returned by - // `find_mir_or_eval_fn`. - // FIXME: for variadic support, do we have to somehow determine callee's extra_args? - let callee_fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?; - - if callee_fn_abi.c_variadic || caller_fn_abi.c_variadic { - throw_unsup_format!("calling a c-variadic function is not supported"); - } - - if M::enforce_abi(self) { - if caller_fn_abi.conv != callee_fn_abi.conv { - throw_ub_custom!( - fluent::const_eval_incompatible_calling_conventions, - callee_conv = format!("{:?}", callee_fn_abi.conv), - caller_conv = format!("{:?}", caller_fn_abi.conv), - ) - } - } - - // Check that all target features required by the callee (i.e., from - // the attribute `#[target_feature(enable = ...)]`) are enabled at - // compile time. - self.check_fn_target_features(instance)?; - - if !callee_fn_abi.can_unwind { - // The callee cannot unwind, so force the `Unreachable` unwind handling. - unwind = mir::UnwindAction::Unreachable; - } - - self.push_stack_frame( - instance, - body, - destination, - StackPopCleanup::Goto { ret: target, unwind }, - )?; - - // If an error is raised here, pop the frame again to get an accurate backtrace. - // To this end, we wrap it all in a `try` block. - let res: InterpResult<'tcx> = try { - trace!( - "caller ABI: {:?}, args: {:#?}", - caller_abi, - args.iter() - .map(|arg| ( - arg.layout().ty, - match arg { - FnArg::Copy(op) => format!("copy({op:?})"), - FnArg::InPlace(mplace) => format!("in-place({mplace:?})"), - } - )) - .collect::>() - ); - trace!( - "spread_arg: {:?}, locals: {:#?}", - body.spread_arg, - body.args_iter() - .map(|local| ( - local, - self.layout_of_local(self.frame(), local, None).unwrap().ty, - )) - .collect::>() - ); - - // In principle, we have two iterators: Where the arguments come from, and where - // they go to. - - // For where they come from: If the ABI is RustCall, we untuple the - // last incoming argument. These two iterators do not have the same type, - // so to keep the code paths uniform we accept an allocation - // (for RustCall ABI only). - let caller_args: Cow<'_, [FnArg<'tcx, M::Provenance>]> = - if caller_abi == Abi::RustCall && !args.is_empty() { - // Untuple - let (untuple_arg, args) = args.split_last().unwrap(); - trace!("eval_fn_call: Will pass last argument by untupling"); - Cow::from( - args.iter() - .map(|a| Ok(a.clone())) - .chain( - (0..untuple_arg.layout().fields.count()) - .map(|i| self.fn_arg_field(untuple_arg, i)), - ) - .collect::>>()?, - ) - } else { - // Plain arg passing - Cow::from(args) - }; - // If `with_caller_location` is set we pretend there is an extra argument (that - // we will not pass). - assert_eq!( - caller_args.len() + if with_caller_location { 1 } else { 0 }, - caller_fn_abi.args.len(), - "mismatch between caller ABI and caller arguments", - ); - let mut caller_args = caller_args - .iter() - .zip(caller_fn_abi.args.iter()) - .filter(|arg_and_abi| !matches!(arg_and_abi.1.mode, PassMode::Ignore)); - - // Now we have to spread them out across the callee's locals, - // taking into account the `spread_arg`. If we could write - // this is a single iterator (that handles `spread_arg`), then - // `pass_argument` would be the loop body. It takes care to - // not advance `caller_iter` for ignored arguments. - let mut callee_args_abis = callee_fn_abi.args.iter(); - for local in body.args_iter() { - // Construct the destination place for this argument. At this point all - // locals are still dead, so we cannot construct a `PlaceTy`. - let dest = mir::Place::from(local); - // `layout_of_local` does more than just the instantiation we need to get the - // type, but the result gets cached so this avoids calling the instantiation - // query *again* the next time this local is accessed. - let ty = self.layout_of_local(self.frame(), local, None)?.ty; - if Some(local) == body.spread_arg { - // Make the local live once, then fill in the value field by field. - self.storage_live(local)?; - // Must be a tuple - let ty::Tuple(fields) = ty.kind() else { - span_bug!(self.cur_span(), "non-tuple type for `spread_arg`: {ty}") - }; - for (i, field_ty) in fields.iter().enumerate() { - let dest = dest.project_deeper( - &[mir::ProjectionElem::Field( - FieldIdx::from_usize(i), - field_ty, - )], - *self.tcx, - ); - let callee_abi = callee_args_abis.next().unwrap(); - self.pass_argument( - &mut caller_args, - callee_abi, - &dest, - field_ty, - /* already_live */ true, - )?; - } - } else { - // Normal argument. Cannot mark it as live yet, it might be unsized! - let callee_abi = callee_args_abis.next().unwrap(); - self.pass_argument( - &mut caller_args, - callee_abi, - &dest, - ty, - /* already_live */ false, - )?; - } - } - // If the callee needs a caller location, pretend we consume one more argument from the ABI. - if instance.def.requires_caller_location(*self.tcx) { - callee_args_abis.next().unwrap(); - } - // Now we should have no more caller args or callee arg ABIs - assert!( - callee_args_abis.next().is_none(), - "mismatch between callee ABI and callee body arguments" - ); - if caller_args.next().is_some() { - throw_ub_custom!(fluent::const_eval_too_many_caller_args); - } - // Don't forget to check the return type! - if !self.check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret)? { - throw_ub!(AbiMismatchReturn { - caller_ty: caller_fn_abi.ret.layout.ty, - callee_ty: callee_fn_abi.ret.layout.ty - }); - } - - // Protect return place for in-place return value passing. - M::protect_in_place_function_argument(self, &destination)?; - - // Don't forget to mark "initially live" locals as live. - self.storage_live_for_always_live_locals()?; - }; - match res { - Err(err) => { - self.stack_mut().pop(); - Err(err) - } - Ok(()) => Ok(()), - } - } - // `InstanceKind::Virtual` does not have callable MIR. Calls to `Virtual` instances must be - // codegen'd / interpreted as virtual calls through the vtable. - ty::InstanceKind::Virtual(def_id, idx) => { - let mut args = args.to_vec(); - // We have to implement all "object safe receivers". So we have to go search for a - // pointer or `dyn Trait` type, but it could be wrapped in newtypes. So recursively - // unwrap those newtypes until we are there. - // An `InPlace` does nothing here, we keep the original receiver intact. We can't - // really pass the argument in-place anyway, and we are constructing a new - // `Immediate` receiver. - let mut receiver = self.copy_fn_arg(&args[0]); - let receiver_place = loop { - match receiver.layout.ty.kind() { - ty::Ref(..) | ty::RawPtr(..) => { - // We do *not* use `deref_pointer` here: we don't want to conceptually - // create a place that must be dereferenceable, since the receiver might - // be a raw pointer and (for `*const dyn Trait`) we don't need to - // actually access memory to resolve this method. - // Also see . - let val = self.read_immediate(&receiver)?; - break self.ref_to_mplace(&val)?; - } - ty::Dynamic(.., ty::Dyn) => break receiver.assert_mem_place(), // no immediate unsized values - ty::Dynamic(.., ty::DynStar) => { - // Not clear how to handle this, so far we assume the receiver is always a pointer. - span_bug!( - self.cur_span(), - "by-value calls on a `dyn*`... are those a thing?" - ); - } - _ => { - // Not there yet, search for the only non-ZST field. - // (The rules for `DispatchFromDyn` ensure there's exactly one such field.) - let (idx, _) = receiver.layout.non_1zst_field(self).expect( - "not exactly one non-1-ZST field in a `DispatchFromDyn` type", - ); - receiver = self.project_field(&receiver, idx)?; - } - } - }; - - // Obtain the underlying trait we are working on, and the adjusted receiver argument. - let (trait_, dyn_ty, adjusted_recv) = if let ty::Dynamic(data, _, ty::DynStar) = - receiver_place.layout.ty.kind() - { - let recv = self.unpack_dyn_star(&receiver_place, data)?; - - (data.principal(), recv.layout.ty, recv.ptr()) - } else { - // Doesn't have to be a `dyn Trait`, but the unsized tail must be `dyn Trait`. - // (For that reason we also cannot use `unpack_dyn_trait`.) - let receiver_tail = self - .tcx - .struct_tail_erasing_lifetimes(receiver_place.layout.ty, self.param_env); - let ty::Dynamic(receiver_trait, _, ty::Dyn) = receiver_tail.kind() else { - span_bug!( - self.cur_span(), - "dynamic call on non-`dyn` type {}", - receiver_tail - ) - }; - assert!(receiver_place.layout.is_unsized()); - - // Get the required information from the vtable. - let vptr = receiver_place.meta().unwrap_meta().to_pointer(self)?; - let dyn_ty = self.get_ptr_vtable_ty(vptr, Some(receiver_trait))?; - - // It might be surprising that we use a pointer as the receiver even if this - // is a by-val case; this works because by-val passing of an unsized `dyn - // Trait` to a function is actually desugared to a pointer. - (receiver_trait.principal(), dyn_ty, receiver_place.ptr()) - }; - - // Now determine the actual method to call. Usually we use the easy way of just - // looking up the method at index `idx`. - let vtable_entries = self.vtable_entries(trait_, dyn_ty); - let Some(ty::VtblEntry::Method(fn_inst)) = vtable_entries.get(idx).copied() else { - // FIXME(fee1-dead) these could be variants of the UB info enum instead of this - throw_ub_custom!(fluent::const_eval_dyn_call_not_a_method); - }; - trace!("Virtual call dispatches to {fn_inst:#?}"); - // We can also do the lookup based on `def_id` and `dyn_ty`, and check that that - // produces the same result. - if cfg!(debug_assertions) { - let tcx = *self.tcx; - - let trait_def_id = tcx.trait_of_item(def_id).unwrap(); - let virtual_trait_ref = - ty::TraitRef::from_method(tcx, trait_def_id, instance.args); - let existential_trait_ref = - ty::ExistentialTraitRef::erase_self_ty(tcx, virtual_trait_ref); - let concrete_trait_ref = existential_trait_ref.with_self_ty(tcx, dyn_ty); - - let concrete_method = Instance::expect_resolve_for_vtable( - tcx, - self.param_env, - def_id, - instance.args.rebase_onto(tcx, trait_def_id, concrete_trait_ref.args), - self.cur_span(), - ); - assert_eq!(fn_inst, concrete_method); - } - - // Adjust receiver argument. Layout can be any (thin) ptr. - let receiver_ty = Ty::new_mut_ptr(self.tcx.tcx, dyn_ty); - args[0] = FnArg::Copy( - ImmTy::from_immediate( - Scalar::from_maybe_pointer(adjusted_recv, self).into(), - self.layout_of(receiver_ty)?, - ) - .into(), - ); - trace!("Patched receiver operand to {:#?}", args[0]); - // Need to also adjust the type in the ABI. Strangely, the layout there is actually - // already fine! Just the type is bogus. This is due to what `force_thin_self_ptr` - // does in `fn_abi_new_uncached`; supposedly, codegen relies on having the bogus - // type, so we just patch this up locally. - let mut caller_fn_abi = caller_fn_abi.clone(); - caller_fn_abi.args[0].layout.ty = receiver_ty; - - // recurse with concrete function - self.eval_fn_call( - FnVal::Instance(fn_inst), - (caller_abi, &caller_fn_abi), - &args, - with_caller_location, - destination, - target, - unwind, - ) - } - } - } - - pub(crate) fn eval_fn_tail_call( - &mut self, - fn_val: FnVal<'tcx, M::ExtraFnVal>, - (caller_abi, caller_fn_abi): (Abi, &FnAbi<'tcx, Ty<'tcx>>), - args: &[FnArg<'tcx, M::Provenance>], - with_caller_location: bool, - ) -> InterpResult<'tcx> { - trace!("eval_fn_call: {:#?}", fn_val); - - // This is the "canonical" implementation of tails calls, - // a pop of the current stack frame, followed by a normal call - // which pushes a new stack frame, with the return address from - // the popped stack frame. - // - // Note that we are using `pop_stack_frame` and not `return_from_current_stack_frame`, - // as the latter "executes" the goto to the return block, but we don't want to, - // only the tail called function should return to the current return block. - M::before_stack_pop(self, self.frame())?; - - let StackPopInfo { return_action, return_to_block, return_place } = - self.pop_stack_frame(false)?; - - assert_eq!(return_action, ReturnAction::Normal); - - let StackPopCleanup::Goto { ret, unwind } = return_to_block else { - bug!("can't tailcall as root"); - }; - - // FIXME(explicit_tail_calls): - // we should check if both caller&callee can/n't unwind, - // see - - self.eval_fn_call( - fn_val, - (caller_abi, caller_fn_abi), - args, - with_caller_location, - &return_place, - ret, - unwind, - ) - } - - fn check_fn_target_features(&self, instance: ty::Instance<'tcx>) -> InterpResult<'tcx, ()> { - // Calling functions with `#[target_feature]` is not unsafe on WASM, see #84988 - let attrs = self.tcx.codegen_fn_attrs(instance.def_id()); - if !self.tcx.sess.target.is_like_wasm - && attrs - .target_features - .iter() - .any(|feature| !self.tcx.sess.target_features.contains(feature)) - { - throw_ub_custom!( - fluent::const_eval_unavailable_target_features_for_fn, - unavailable_feats = attrs - .target_features - .iter() - .filter(|&feature| !self.tcx.sess.target_features.contains(feature)) - .fold(String::new(), |mut s, feature| { - if !s.is_empty() { - s.push_str(", "); - } - s.push_str(feature.as_str()); - s - }), - ); - } - Ok(()) - } - - fn drop_in_place( - &mut self, - place: &PlaceTy<'tcx, M::Provenance>, - instance: ty::Instance<'tcx>, - target: mir::BasicBlock, - unwind: mir::UnwindAction, - ) -> InterpResult<'tcx> { - trace!("drop_in_place: {:?},\n instance={:?}", place, instance); - // We take the address of the object. This may well be unaligned, which is fine - // for us here. However, unaligned accesses will probably make the actual drop - // implementation fail -- a problem shared by rustc. - let place = self.force_allocation(place)?; - - // We behave a bit different from codegen here. - // Codegen creates an `InstanceKind::Virtual` with index 0 (the slot of the drop method) and - // then dispatches that to the normal call machinery. However, our call machinery currently - // only supports calling `VtblEntry::Method`; it would choke on a `MetadataDropInPlace`. So - // instead we do the virtual call stuff ourselves. It's easier here than in `eval_fn_call` - // since we can just get a place of the underlying type and use `mplace_to_ref`. - let place = match place.layout.ty.kind() { - ty::Dynamic(data, _, ty::Dyn) => { - // Dropping a trait object. Need to find actual drop fn. - self.unpack_dyn_trait(&place, data)? - } - ty::Dynamic(data, _, ty::DynStar) => { - // Dropping a `dyn*`. Need to find actual drop fn. - self.unpack_dyn_star(&place, data)? - } - _ => { - debug_assert_eq!( - instance, - ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty) - ); - place - } - }; - let instance = ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty); - let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?; - - let arg = self.mplace_to_ref(&place)?; - let ret = MPlaceTy::fake_alloc_zst(self.layout_of(self.tcx.types.unit)?); - - self.eval_fn_call( - FnVal::Instance(instance), - (Abi::Rust, fn_abi), - &[FnArg::Copy(arg.into())], - false, - &ret.into(), - Some(target), - unwind, - ) - } -} -- cgit 1.4.1-3-g733a5 From 5783e73f4662595ccb3261c3cb42cdc45eb157e0 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 5 Aug 2024 21:05:51 +0200 Subject: make some Frame fields more private --- compiler/rustc_const_eval/src/interpret/stack.rs | 16 ++++++++++++---- src/tools/miri/src/concurrency/thread.rs | 4 ---- src/tools/miri/src/helpers.rs | 10 +++++----- src/tools/miri/src/machine.rs | 8 ++++---- src/tools/miri/src/shims/backtrace.rs | 4 ++-- src/tools/miri/src/shims/panic.rs | 4 ++-- 6 files changed, 25 insertions(+), 21 deletions(-) (limited to 'compiler/rustc_const_eval') diff --git a/compiler/rustc_const_eval/src/interpret/stack.rs b/compiler/rustc_const_eval/src/interpret/stack.rs index b5f55434d5a..5733e2316fd 100644 --- a/compiler/rustc_const_eval/src/interpret/stack.rs +++ b/compiler/rustc_const_eval/src/interpret/stack.rs @@ -61,10 +61,10 @@ pub struct Frame<'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> { // Function and callsite information //////////////////////////////////////////////////////////////////////////////// /// The MIR for the function called on this frame. - pub body: &'tcx mir::Body<'tcx>, + pub(super) body: &'tcx mir::Body<'tcx>, /// The def_id and args of the current function. - pub instance: ty::Instance<'tcx>, + pub(super) instance: ty::Instance<'tcx>, /// Extra data for the machine. pub extra: Extra, @@ -73,7 +73,7 @@ pub struct Frame<'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> { // Return place and locals //////////////////////////////////////////////////////////////////////////////// /// Work to perform when returning from this function. - pub return_to_block: StackPopCleanup, + return_to_block: StackPopCleanup, /// The location where the result of the current stack frame should be written to, /// and its layout in the caller. @@ -101,7 +101,7 @@ pub struct Frame<'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> { /// frames without cleanup code). /// /// Needs to be public because ConstProp does unspeakable things to it. - pub loc: Either, + pub(super) loc: Either, } #[derive(Clone, Copy, Eq, PartialEq, Debug)] // Miri debug-prints these @@ -269,6 +269,14 @@ impl<'tcx, Prov: Provenance, Extra> Frame<'tcx, Prov, Extra> { self.loc } + pub fn body(&self) -> &'tcx mir::Body<'tcx> { + self.body + } + + pub fn instance(&self) -> ty::Instance<'tcx> { + self.instance + } + /// Return the `SourceInfo` of the current instruction. pub fn current_source_info(&self) -> Option<&mir::SourceInfo> { self.loc.left().map(|loc| self.body.source_info(loc)) diff --git a/src/tools/miri/src/concurrency/thread.rs b/src/tools/miri/src/concurrency/thread.rs index afa91df78f9..f72591f0c4b 100644 --- a/src/tools/miri/src/concurrency/thread.rs +++ b/src/tools/miri/src/concurrency/thread.rs @@ -377,10 +377,6 @@ impl VisitProvenance for Frame<'_, Provenance, FrameExtra<'_>> { return_place, locals, extra, - body: _, - instance: _, - return_to_block: _, - loc: _, // There are some private fields we cannot access; they contain no tags. .. } = self; diff --git a/src/tools/miri/src/helpers.rs b/src/tools/miri/src/helpers.rs index 5c89889506f..1bdf9f06dcd 100644 --- a/src/tools/miri/src/helpers.rs +++ b/src/tools/miri/src/helpers.rs @@ -1105,12 +1105,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { // Make an attempt to get at the instance of the function this is inlined from. let instance: Option<_> = try { let scope = frame.current_source_info()?.scope; - let inlined_parent = frame.body.source_scopes[scope].inlined_parent_scope?; - let source = &frame.body.source_scopes[inlined_parent]; + let inlined_parent = frame.body().source_scopes[scope].inlined_parent_scope?; + let source = &frame.body().source_scopes[inlined_parent]; source.inlined.expect("inlined_parent_scope points to scope without inline info").0 }; // Fall back to the instance of the function itself. - let instance = instance.unwrap_or(frame.instance); + let instance = instance.unwrap_or(frame.instance()); // Now check the crate it is in. We could try to be clever here and e.g. check if this is // the same crate as `start_fn`, but that would not work for running std tests in Miri, so // we'd need some more hacks anyway. So we just check the name of the crate. If someone @@ -1350,9 +1350,9 @@ impl<'tcx> MiriMachine<'tcx> { /// This is the source of truth for the `is_user_relevant` flag in our `FrameExtra`. pub fn is_user_relevant(&self, frame: &Frame<'tcx, Provenance>) -> bool { - let def_id = frame.instance.def_id(); + let def_id = frame.instance().def_id(); (def_id.is_local() || self.local_crates.contains(&def_id.krate)) - && !frame.instance.def.requires_caller_location(self.tcx) + && !frame.instance().def.requires_caller_location(self.tcx) } } diff --git a/src/tools/miri/src/machine.rs b/src/tools/miri/src/machine.rs index 3e45a3a7e1a..94598e7d2e3 100644 --- a/src/tools/miri/src/machine.rs +++ b/src/tools/miri/src/machine.rs @@ -1352,7 +1352,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { ) -> InterpResult<'tcx, Frame<'tcx, Provenance, FrameExtra<'tcx>>> { // Start recording our event before doing anything else let timing = if let Some(profiler) = ecx.machine.profiler.as_ref() { - let fn_name = frame.instance.to_string(); + let fn_name = frame.instance().to_string(); let entry = ecx.machine.string_cache.entry(fn_name.clone()); let name = entry.or_insert_with(|| profiler.alloc_string(&*fn_name)); @@ -1443,7 +1443,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { // tracing-tree can autoamtically annotate scope changes, but it gets very confused by our // concurrency and what it prints is just plain wrong. So we print our own information // instead. (Cc https://github.com/rust-lang/miri/issues/2266) - info!("Leaving {}", ecx.frame().instance); + info!("Leaving {}", ecx.frame().instance()); Ok(()) } @@ -1473,7 +1473,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { // Needs to be done after dropping frame to show up on the right nesting level. // (Cc https://github.com/rust-lang/miri/issues/2266) if !ecx.active_thread_stack().is_empty() { - info!("Continuing in {}", ecx.frame().instance); + info!("Continuing in {}", ecx.frame().instance()); } res } @@ -1486,7 +1486,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { let Some(Provenance::Concrete { alloc_id, .. }) = mplace.ptr().provenance else { panic!("after_local_allocated should only be called on fresh allocations"); }; - let local_decl = &ecx.frame().body.local_decls[local]; + let local_decl = &ecx.frame().body().local_decls[local]; let span = local_decl.source_info.span; ecx.machine.allocation_spans.borrow_mut().insert(alloc_id, (span, None)); Ok(()) diff --git a/src/tools/miri/src/shims/backtrace.rs b/src/tools/miri/src/shims/backtrace.rs index 42babb4c78d..9bb6777a9b0 100644 --- a/src/tools/miri/src/shims/backtrace.rs +++ b/src/tools/miri/src/shims/backtrace.rs @@ -46,8 +46,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { let mut data = Vec::new(); for frame in this.active_thread_stack().iter().rev() { // Match behavior of debuginfo (`FunctionCx::adjusted_span_and_dbg_scope`). - let span = hygiene::walk_chain_collapsed(frame.current_span(), frame.body.span); - data.push((frame.instance, span.lo())); + let span = hygiene::walk_chain_collapsed(frame.current_span(), frame.body().span); + data.push((frame.instance(), span.lo())); } let ptrs: Vec<_> = data diff --git a/src/tools/miri/src/shims/panic.rs b/src/tools/miri/src/shims/panic.rs index 2fb0319a843..ab705ddccab 100644 --- a/src/tools/miri/src/shims/panic.rs +++ b/src/tools/miri/src/shims/panic.rs @@ -48,7 +48,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { fn handle_miri_start_unwind(&mut self, payload: &OpTy<'tcx>) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - trace!("miri_start_unwind: {:?}", this.frame().instance); + trace!("miri_start_unwind: {:?}", this.frame().instance()); let payload = this.read_immediate(payload)?; let thread = this.active_thread_mut(); @@ -124,7 +124,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { // and we are unwinding, so we should catch that. trace!( "unwinding: found catch_panic frame during unwinding: {:?}", - this.frame().instance + this.frame().instance() ); // We set the return value of `try` to 1, since there was a panic. -- cgit 1.4.1-3-g733a5 From 1c2705c6229c16d68c8769e570b5500a123aebfb Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 6 Aug 2024 13:38:54 +0200 Subject: various cleanups based on review --- compiler/rustc_const_eval/src/interpret/call.rs | 31 +++++------ compiler/rustc_const_eval/src/interpret/stack.rs | 9 +++- compiler/rustc_const_eval/src/interpret/step.rs | 69 ++++++++++++------------ 3 files changed, 54 insertions(+), 55 deletions(-) (limited to 'compiler/rustc_const_eval') diff --git a/compiler/rustc_const_eval/src/interpret/call.rs b/compiler/rustc_const_eval/src/interpret/call.rs index 9d2870a4a31..2c5147678e8 100644 --- a/compiler/rustc_const_eval/src/interpret/call.rs +++ b/compiler/rustc_const_eval/src/interpret/call.rs @@ -418,7 +418,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // The "where they come from" part is easy, we expect the caller to do any special handling // that might be required here (e.g. for untupling). // If `with_caller_location` is set we pretend there is an extra argument (that - // we will not pass). + // we will not pass; our `caller_location` intrinsic implementation walks the stack instead). assert_eq!( args.len() + if with_caller_location { 1 } else { 0 }, caller_fn_abi.args.len(), @@ -502,14 +502,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // Don't forget to mark "initially live" locals as live. self.storage_live_for_always_live_locals()?; }; - match res { - Err(err) => { - // Don't show the incomplete stack frame in the error stacktrace. - self.stack_mut().pop(); - Err(err) - } - Ok(()) => Ok(()), - } + res.inspect_err(|_| { + // Don't show the incomplete stack frame in the error stacktrace. + self.stack_mut().pop(); + }) } /// Initiate a call to this function -- pushing the stack frame and initializing the arguments. @@ -907,7 +903,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { .local_to_op(mir::RETURN_PLACE, None) .expect("return place should always be live"); let dest = self.frame().return_place.clone(); - let err = if self.stack().len() == 1 { + let res = if self.stack().len() == 1 { // The initializer of constants and statics will get validated separately // after the constant has been fully evaluated. While we could fall back to the default // code path, that will cause -Zenforce-validity to cycle on static initializers. @@ -924,7 +920,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // We delay actually short-circuiting on this error until *after* the stack frame is // popped, since we want this error to be attributed to the caller, whose type defines // this transmute. - err + res } else { Ok(()) }; @@ -953,14 +949,15 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // Normal return, figure out where to jump. if unwinding { // Follow the unwind edge. - let unwind = match stack_pop_info.return_to_block { - StackPopCleanup::Goto { unwind, .. } => unwind, + match stack_pop_info.return_to_block { + StackPopCleanup::Goto { unwind, .. } => { + // This must be the very last thing that happens, since it can in fact push a new stack frame. + self.unwind_to_block(unwind) + } StackPopCleanup::Root { .. } => { panic!("encountered StackPopCleanup::Root when unwinding!") } - }; - // This must be the very last thing that happens, since it can in fact push a new stack frame. - self.unwind_to_block(unwind) + } } else { // Follow the normal return edge. match stack_pop_info.return_to_block { @@ -968,7 +965,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { StackPopCleanup::Root { .. } => { assert!( self.stack().is_empty(), - "only the topmost frame can have StackPopCleanup::Root" + "only the bottommost frame can have StackPopCleanup::Root" ); Ok(()) } diff --git a/compiler/rustc_const_eval/src/interpret/stack.rs b/compiler/rustc_const_eval/src/interpret/stack.rs index 5733e2316fd..50dbced6a2a 100644 --- a/compiler/rustc_const_eval/src/interpret/stack.rs +++ b/compiler/rustc_const_eval/src/interpret/stack.rs @@ -264,7 +264,7 @@ impl<'tcx, Prov: Provenance, Extra> Frame<'tcx, Prov, Extra> { /// this frame (can happen e.g. during frame initialization, and during unwinding on /// frames without cleanup code). /// - /// Used by priroda. + /// Used by [priroda](https://github.com/oli-obk/priroda). pub fn current_loc(&self) -> Either { self.loc } @@ -340,6 +340,8 @@ impl<'tcx, Prov: Provenance, Extra> Frame<'tcx, Prov, Extra> { impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Very low-level helper that pushes a stack frame without initializing /// the arguments or local variables. + /// + /// The high-level version of this is `init_stack_frame`. #[instrument(skip(self, body, return_place, return_to_block), level = "debug")] pub(crate) fn push_stack_frame_raw( &mut self, @@ -392,13 +394,16 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { Ok(()) } - /// Pops a stack frame from the stack and returns some information about it. + /// Low-level helper that pops a stack frame from the stack and returns some information about + /// it. /// /// This also deallocates locals, if necessary. /// /// [`M::before_stack_pop`] should be called before calling this function. /// [`M::after_stack_pop`] is called by this function automatically. /// + /// The high-level version of this is `return_from_current_stack_frame`. + /// /// [`M::before_stack_pop`]: Machine::before_stack_pop /// [`M::after_stack_pop`]: Machine::after_stack_pop pub(super) fn pop_stack_frame_raw( diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs index 654c1076545..2527eca3446 100644 --- a/compiler/rustc_const_eval/src/interpret/step.rs +++ b/compiler/rustc_const_eval/src/interpret/step.rs @@ -369,44 +369,38 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { } /// Evaluate the arguments of a function call - fn eval_fn_call_arguments( + fn eval_fn_call_argument( &self, - ops: &[Spanned>], - ) -> InterpResult<'tcx, Vec>> { - ops.iter() - .map(|op| { - let arg = match &op.node { - mir::Operand::Copy(_) | mir::Operand::Constant(_) => { - // Make a regular copy. - let op = self.eval_operand(&op.node, None)?; + op: &mir::Operand<'tcx>, + ) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> { + Ok(match op { + mir::Operand::Copy(_) | mir::Operand::Constant(_) => { + // Make a regular copy. + let op = self.eval_operand(op, None)?; + FnArg::Copy(op) + } + mir::Operand::Move(place) => { + // If this place lives in memory, preserve its location. + // We call `place_to_op` which will be an `MPlaceTy` whenever there exists + // an mplace for this place. (This is in contrast to `PlaceTy::as_mplace_or_local` + // which can return a local even if that has an mplace.) + let place = self.eval_place(*place)?; + let op = self.place_to_op(&place)?; + + match op.as_mplace_or_imm() { + Either::Left(mplace) => FnArg::InPlace(mplace), + Either::Right(_imm) => { + // This argument doesn't live in memory, so there's no place + // to make inaccessible during the call. + // We rely on there not being any stray `PlaceTy` that would let the + // caller directly access this local! + // This is also crucial for tail calls, where we want the `FnArg` to + // stay valid when the old stack frame gets popped. FnArg::Copy(op) } - mir::Operand::Move(place) => { - // If this place lives in memory, preserve its location. - // We call `place_to_op` which will be an `MPlaceTy` whenever there exists - // an mplace for this place. (This is in contrast to `PlaceTy::as_mplace_or_local` - // which can return a local even if that has an mplace.) - let place = self.eval_place(*place)?; - let op = self.place_to_op(&place)?; - - match op.as_mplace_or_imm() { - Either::Left(mplace) => FnArg::InPlace(mplace), - Either::Right(_imm) => { - // This argument doesn't live in memory, so there's no place - // to make inaccessible during the call. - // We rely on there not being any stray `PlaceTy` that would let the - // caller directly access this local! - // This is also crucial for tail calls, where we want the `FnArg` to - // stay valid when the old stack frame gets popped. - FnArg::Copy(op) - } - } - } - }; - - Ok(arg) - }) - .collect() + } + } + }) } /// Shared part of `Call` and `TailCall` implementation — finding and evaluating all the @@ -418,7 +412,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { args: &[Spanned>], ) -> InterpResult<'tcx, EvaluatedCalleeAndArgs<'tcx, M>> { let func = self.eval_operand(func, None)?; - let args = self.eval_fn_call_arguments(args)?; + let args = args + .iter() + .map(|arg| self.eval_fn_call_argument(&arg.node)) + .collect::>>()?; let fn_sig_binder = func.layout.ty.fn_sig(*self.tcx); let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig_binder); -- cgit 1.4.1-3-g733a5