diff options
Diffstat (limited to 'compiler/rustc_const_eval/src')
28 files changed, 456 insertions, 294 deletions
diff --git a/compiler/rustc_const_eval/src/check_consts/check.rs b/compiler/rustc_const_eval/src/check_consts/check.rs index 0eb6f28bdb3..ca707b50d50 100644 --- a/compiler/rustc_const_eval/src/check_consts/check.rs +++ b/compiler/rustc_const_eval/src/check_consts/check.rs @@ -6,7 +6,6 @@ use std::mem; use std::num::NonZero; use std::ops::Deref; -use rustc_attr_data_structures as attrs; use rustc_errors::{Diag, ErrorGuaranteed}; use rustc_hir::def::DefKind; use rustc_hir::def_id::DefId; @@ -466,7 +465,7 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> { /// Check the const stability of the given item (fn or trait). fn check_callee_stability(&mut self, def_id: DefId) { match self.tcx.lookup_const_stability(def_id) { - Some(attrs::ConstStability { level: attrs::StabilityLevel::Stable { .. }, .. }) => { + Some(hir::ConstStability { level: hir::StabilityLevel::Stable { .. }, .. }) => { // All good. } None => { @@ -482,8 +481,8 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> { }); } } - Some(attrs::ConstStability { - level: attrs::StabilityLevel::Unstable { implied_by: implied_feature, issue, .. }, + Some(hir::ConstStability { + level: hir::StabilityLevel::Unstable { implied_by: implied_feature, issue, .. }, feature, .. }) => { @@ -714,10 +713,10 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { fn visit_operand(&mut self, op: &Operand<'tcx>, location: Location) { self.super_operand(op, location); - if let Operand::Constant(c) = op { - if let Some(def_id) = c.check_static_ptr(self.tcx) { - self.check_static(def_id, self.span); - } + if let Operand::Constant(c) = op + && let Some(def_id) = c.check_static_ptr(self.tcx) + { + self.check_static(def_id, self.span); } } @@ -784,7 +783,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { self.revalidate_conditional_constness(callee, fn_args, *fn_span); // Attempting to call a trait method? - if let Some(trait_did) = tcx.trait_of_item(callee) { + if let Some(trait_did) = tcx.trait_of_assoc(callee) { // We can't determine the actual callee here, so we have to do different checks // than usual. @@ -828,7 +827,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { // At this point, we are calling a function, `callee`, whose `DefId` is known... - // `begin_panic` and `#[rustc_const_panic_str]` functions accept generic + // `begin_panic` and `panic_display` functions accept generic // types other than str. Check to enforce that only str can be used in // const-eval. @@ -842,8 +841,8 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { return; } - // const-eval of `#[rustc_const_panic_str]` functions assumes the argument is `&&str` - if tcx.has_attr(callee, sym::rustc_const_panic_str) { + // const-eval of `panic_display` assumes the argument is `&&str` + if tcx.is_lang_item(callee, LangItem::PanicDisplay) { match args[0].node.ty(&self.ccx.body.local_decls, tcx).kind() { ty::Ref(_, ty, _) if matches!(ty.kind(), ty::Ref(_, ty, _) if ty.is_str()) => {} @@ -891,8 +890,8 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { }); } } - Some(attrs::ConstStability { - level: attrs::StabilityLevel::Unstable { .. }, + Some(hir::ConstStability { + level: hir::StabilityLevel::Unstable { .. }, feature, .. }) => { @@ -902,8 +901,8 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { const_stable_indirect: is_const_stable, }); } - Some(attrs::ConstStability { - level: attrs::StabilityLevel::Stable { .. }, + Some(hir::ConstStability { + level: hir::StabilityLevel::Stable { .. }, .. }) => { // All good. Note that a `#[rustc_const_stable]` intrinsic (meaning it diff --git a/compiler/rustc_const_eval/src/check_consts/mod.rs b/compiler/rustc_const_eval/src/check_consts/mod.rs index ebf18c6f2ac..4a88d039ef3 100644 --- a/compiler/rustc_const_eval/src/check_consts/mod.rs +++ b/compiler/rustc_const_eval/src/check_consts/mod.rs @@ -5,11 +5,12 @@ //! it finds operations that are invalid in a certain context. use rustc_errors::DiagCtxtHandle; +use rustc_hir::attrs::AttributeKind; use rustc_hir::def_id::{DefId, LocalDefId}; +use rustc_hir::{self as hir, find_attr}; use rustc_middle::ty::{self, PolyFnSig, TyCtxt}; use rustc_middle::{bug, mir}; use rustc_span::Symbol; -use {rustc_attr_data_structures as attrs, rustc_hir as hir}; pub use self::qualifs::Qualif; @@ -82,7 +83,7 @@ pub fn rustc_allow_const_fn_unstable( ) -> bool { let attrs = tcx.hir_attrs(tcx.local_def_id_to_hir_id(def_id)); - attrs::find_attr!(attrs, attrs::AttributeKind::AllowConstFnUnstable(syms, _) if syms.contains(&feature_gate)) + find_attr!(attrs, AttributeKind::AllowConstFnUnstable(syms, _) if syms.contains(&feature_gate)) } /// Returns `true` if the given `def_id` (trait or function) is "safe to expose on stable". diff --git a/compiler/rustc_const_eval/src/check_consts/ops.rs b/compiler/rustc_const_eval/src/check_consts/ops.rs index b2e0577cc82..79e32dcf105 100644 --- a/compiler/rustc_const_eval/src/check_consts/ops.rs +++ b/compiler/rustc_const_eval/src/check_consts/ops.rs @@ -142,7 +142,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> { |err, self_ty, trait_id| { // FIXME(const_trait_impl): Do we need any of this on the non-const codepath? - let trait_ref = TraitRef::from_method(tcx, trait_id, self.args); + let trait_ref = TraitRef::from_assoc(tcx, trait_id, self.args); match self_ty.kind() { Param(param_ty) => { @@ -295,21 +295,18 @@ fn build_error_for_const_call<'tcx>( } let deref = "*".repeat(num_refs); - if let Ok(call_str) = ccx.tcx.sess.source_map().span_to_snippet(span) { - if let Some(eq_idx) = call_str.find("==") { - if let Some(rhs_idx) = - call_str[(eq_idx + 2)..].find(|c: char| !c.is_whitespace()) - { - let rhs_pos = - span.lo() + BytePos::from_usize(eq_idx + 2 + rhs_idx); - let rhs_span = span.with_lo(rhs_pos).with_hi(rhs_pos); - sugg = Some(errors::ConsiderDereferencing { - deref, - span: span.shrink_to_lo(), - rhs_span, - }); - } - } + if let Ok(call_str) = ccx.tcx.sess.source_map().span_to_snippet(span) + && let Some(eq_idx) = call_str.find("==") + && let Some(rhs_idx) = + call_str[(eq_idx + 2)..].find(|c: char| !c.is_whitespace()) + { + let rhs_pos = span.lo() + BytePos::from_usize(eq_idx + 2 + rhs_idx); + let rhs_span = span.with_lo(rhs_pos).with_hi(rhs_pos); + sugg = Some(errors::ConsiderDereferencing { + deref, + span: span.shrink_to_lo(), + rhs_span, + }); } } _ => {} diff --git a/compiler/rustc_const_eval/src/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/check_consts/qualifs.rs index 166491b47a1..faf41f1658b 100644 --- a/compiler/rustc_const_eval/src/check_consts/qualifs.rs +++ b/compiler/rustc_const_eval/src/check_consts/qualifs.rs @@ -369,7 +369,7 @@ where assert!(promoted.is_none() || Q::ALLOW_PROMOTED); // Don't peek inside trait associated constants. - if promoted.is_none() && cx.tcx.trait_of_item(def).is_none() { + if promoted.is_none() && cx.tcx.trait_of_assoc(def).is_none() { let qualifs = cx.tcx.at(constant.span).mir_const_qualif(def); if !Q::in_qualifs(&qualifs) { diff --git a/compiler/rustc_const_eval/src/check_consts/resolver.rs b/compiler/rustc_const_eval/src/check_consts/resolver.rs index 9f7fcc509a5..d98e5027e4d 100644 --- a/compiler/rustc_const_eval/src/check_consts/resolver.rs +++ b/compiler/rustc_const_eval/src/check_consts/resolver.rs @@ -137,14 +137,14 @@ where // If a local with no projections is moved from (e.g. `x` in `y = x`), record that // it no longer needs to be dropped. - if let mir::Operand::Move(place) = operand { - if let Some(local) = place.as_local() { - // For backward compatibility with the MaybeMutBorrowedLocals used in an earlier - // implementation we retain qualif if a local had been borrowed before. This might - // not be strictly necessary since the local is no longer initialized. - if !self.state.borrow.contains(local) { - self.state.qualif.remove(local); - } + if let mir::Operand::Move(place) = operand + && let Some(local) = place.as_local() + { + // For backward compatibility with the MaybeMutBorrowedLocals used in an earlier + // implementation we retain qualif if a local had been borrowed before. This might + // not be strictly necessary since the local is no longer initialized. + if !self.state.borrow.contains(local) { + self.state.qualif.remove(local); } } } diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index f584f6c948e..4da2663319c 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -117,6 +117,13 @@ fn eval_body_using_ecx<'tcx, R: InterpretationResult<'tcx>>( ecx.tcx.dcx().emit_err(errors::ConstHeapPtrInFinal { span: ecx.tcx.span }), ))); } + Err(InternError::PartialPointer) => { + throw_inval!(AlreadyReported(ReportedErrorInfo::non_const_eval_error( + ecx.tcx + .dcx() + .emit_err(errors::PartialPtrInFinal { span: ecx.tcx.span, kind: intern_kind }), + ))); + } } interp_ok(R::make_result(ret, ecx)) @@ -152,7 +159,7 @@ pub(crate) fn mk_eval_cx_to_read_const_val<'tcx>( pub fn mk_eval_cx_for_const_val<'tcx>( tcx: TyCtxtAt<'tcx>, typing_env: ty::TypingEnv<'tcx>, - val: mir::ConstValue<'tcx>, + val: mir::ConstValue, ty: Ty<'tcx>, ) -> Option<(CompileTimeInterpCx<'tcx>, OpTy<'tcx>)> { let ecx = mk_eval_cx_to_read_const_val(tcx.tcx, tcx.span, typing_env, CanAccessMutGlobal::No); @@ -172,7 +179,7 @@ pub(super) fn op_to_const<'tcx>( ecx: &CompileTimeInterpCx<'tcx>, op: &OpTy<'tcx>, for_diagnostics: bool, -) -> ConstValue<'tcx> { +) -> ConstValue { // Handle ZST consistently and early. if op.layout.is_zst() { return ConstValue::ZeroSized; @@ -241,10 +248,9 @@ pub(super) fn op_to_const<'tcx>( let (prov, offset) = ptr.into_pointer_or_addr().expect(msg).prov_and_relative_offset(); let alloc_id = prov.alloc_id(); - let data = ecx.tcx.global_alloc(alloc_id).unwrap_memory(); assert!(offset == abi::Size::ZERO, "{}", msg); let meta = b.to_target_usize(ecx).expect(msg); - ConstValue::Slice { data, meta } + ConstValue::Slice { alloc_id, meta } } Immediate::Uninit => bug!("`Uninit` is not a valid value for {}", op.layout.ty), }, @@ -256,7 +262,7 @@ pub(crate) fn turn_into_const_value<'tcx>( tcx: TyCtxt<'tcx>, constant: ConstAlloc<'tcx>, key: ty::PseudoCanonicalInput<'tcx, GlobalId<'tcx>>, -) -> ConstValue<'tcx> { +) -> ConstValue { let cid = key.value; let def_id = cid.instance.def.def_id(); let is_static = tcx.is_static(def_id); diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs index a18ae79f318..da954cf4ed7 100644 --- a/compiler/rustc_const_eval/src/const_eval/machine.rs +++ b/compiler/rustc_const_eval/src/const_eval/machine.rs @@ -237,7 +237,7 @@ impl<'tcx> CompileTimeInterpCx<'tcx> { ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> { let def_id = instance.def_id(); - if self.tcx.has_attr(def_id, sym::rustc_const_panic_str) + if self.tcx.is_lang_item(def_id, LangItem::PanicDisplay) || self.tcx.is_lang_item(def_id, LangItem::BeginPanic) { let args = self.copy_fn_args(args); diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs index 0082f90f3b8..624ca1dd2da 100644 --- a/compiler/rustc_const_eval/src/const_eval/mod.rs +++ b/compiler/rustc_const_eval/src/const_eval/mod.rs @@ -28,7 +28,7 @@ const VALTREE_MAX_NODES: usize = 100000; #[instrument(skip(tcx), level = "debug")] pub(crate) fn try_destructure_mir_constant_for_user_output<'tcx>( tcx: TyCtxt<'tcx>, - val: mir::ConstValue<'tcx>, + val: mir::ConstValue, ty: Ty<'tcx>, ) -> Option<mir::DestructuredConstant<'tcx>> { let typing_env = ty::TypingEnv::fully_monomorphized(); diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs index 5ab72c853c4..37c6c4a61d8 100644 --- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs +++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs @@ -259,7 +259,7 @@ pub fn valtree_to_const_value<'tcx>( tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>, cv: ty::Value<'tcx>, -) -> mir::ConstValue<'tcx> { +) -> mir::ConstValue { // Basic idea: We directly construct `Scalar` values from trivial `ValTree`s // (those for constants with type bool, int, uint, float or char). // For all other types we create an `MPlace` and fill that by walking diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs index b6a64035261..2d412ee5ec2 100644 --- a/compiler/rustc_const_eval/src/errors.rs +++ b/compiler/rustc_const_eval/src/errors.rs @@ -52,6 +52,15 @@ pub(crate) struct ConstHeapPtrInFinal { } #[derive(Diagnostic)] +#[diag(const_eval_partial_pointer_in_final)] +#[note] +pub(crate) struct PartialPtrInFinal { + #[primary_span] + pub span: Span, + pub kind: InternKind, +} + +#[derive(Diagnostic)] #[diag(const_eval_unstable_in_stable_exposed)] pub(crate) struct UnstableInStableExposed { pub gate: String, @@ -500,7 +509,7 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> { InvalidNichedEnumVariantWritten { .. } => { const_eval_invalid_niched_enum_variant_written } - AbiMismatchArgument { .. } => const_eval_incompatible_types, + AbiMismatchArgument { .. } => const_eval_incompatible_arg_types, AbiMismatchReturn { .. } => const_eval_incompatible_return_types, } } @@ -625,12 +634,16 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> { diag.arg("data_size", info.data_size); } InvalidNichedEnumVariantWritten { enum_ty } => { - diag.arg("ty", enum_ty.to_string()); + diag.arg("ty", enum_ty); + } + AbiMismatchArgument { arg_idx, caller_ty, callee_ty } => { + diag.arg("arg_idx", arg_idx + 1); // adjust for 1-indexed lists in output + diag.arg("caller_ty", caller_ty); + diag.arg("callee_ty", callee_ty); } - AbiMismatchArgument { caller_ty, callee_ty } - | AbiMismatchReturn { caller_ty, callee_ty } => { - diag.arg("caller_ty", caller_ty.to_string()); - diag.arg("callee_ty", callee_ty.to_string()); + AbiMismatchReturn { caller_ty, callee_ty } => { + diag.arg("caller_ty", caller_ty); + diag.arg("callee_ty", callee_ty); } } } @@ -832,8 +845,7 @@ impl ReportErrorExt for UnsupportedOpInfo { UnsupportedOpInfo::Unsupported(s) => s.clone().into(), UnsupportedOpInfo::ExternTypeField => const_eval_extern_type_field, UnsupportedOpInfo::UnsizedLocal => const_eval_unsized_local, - UnsupportedOpInfo::OverwritePartialPointer(_) => const_eval_partial_pointer_overwrite, - UnsupportedOpInfo::ReadPartialPointer(_) => const_eval_partial_pointer_copy, + UnsupportedOpInfo::ReadPartialPointer(_) => const_eval_partial_pointer_read, UnsupportedOpInfo::ReadPointerAsInt(_) => const_eval_read_pointer_as_int, UnsupportedOpInfo::ThreadLocalStatic(_) => const_eval_thread_local_static, UnsupportedOpInfo::ExternStatic(_) => const_eval_extern_static, @@ -844,7 +856,7 @@ impl ReportErrorExt for UnsupportedOpInfo { use UnsupportedOpInfo::*; use crate::fluent_generated::*; - if let ReadPointerAsInt(_) | OverwritePartialPointer(_) | ReadPartialPointer(_) = self { + if let ReadPointerAsInt(_) | ReadPartialPointer(_) = self { diag.help(const_eval_ptr_as_bytes_1); diag.help(const_eval_ptr_as_bytes_2); } @@ -856,7 +868,7 @@ impl ReportErrorExt for UnsupportedOpInfo { | UnsupportedOpInfo::ExternTypeField | Unsupported(_) | ReadPointerAsInt(_) => {} - OverwritePartialPointer(ptr) | ReadPartialPointer(ptr) => { + ReadPartialPointer(ptr) => { diag.arg("ptr", ptr); } ThreadLocalStatic(did) | ExternStatic(did) => rustc_middle::ty::tls::with(|tcx| { diff --git a/compiler/rustc_const_eval/src/interpret/call.rs b/compiler/rustc_const_eval/src/interpret/call.rs index 1503f3bcd99..64cb934ac8d 100644 --- a/compiler/rustc_const_eval/src/interpret/call.rs +++ b/compiler/rustc_const_eval/src/interpret/call.rs @@ -11,6 +11,7 @@ use rustc_middle::ty::{self, AdtDef, Instance, Ty, VariantDef}; use rustc_middle::{bug, mir, span_bug}; use rustc_span::sym; use rustc_target::callconv::{ArgAbi, FnAbi, PassMode}; +use tracing::field::Empty; use tracing::{info, instrument, trace}; use super::{ @@ -18,7 +19,8 @@ use super::{ Projectable, Provenance, ReturnAction, ReturnContinuation, Scalar, StackPopInfo, interp_ok, throw_ub, throw_ub_custom, throw_unsup_format, }; -use crate::fluent_generated as fluent; +use crate::interpret::EnteredTraceSpan; +use crate::{enter_trace_span, fluent_generated as fluent}; /// An argument passed to a function. #[derive(Clone, Debug)] @@ -270,6 +272,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { Item = (&'x FnArg<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>), >, callee_abi: &ArgAbi<'tcx, Ty<'tcx>>, + callee_arg_idx: usize, callee_arg: &mir::Place<'tcx>, callee_ty: Ty<'tcx>, already_live: bool, @@ -298,6 +301,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // Check compatibility if !self.check_argument_compat(caller_abi, callee_abi)? { throw_ub!(AbiMismatchArgument { + arg_idx: callee_arg_idx, caller_ty: caller_abi.layout.ty, callee_ty: callee_abi.layout.ty }); @@ -342,6 +346,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { destination: &PlaceTy<'tcx, M::Provenance>, mut cont: ReturnContinuation, ) -> InterpResult<'tcx> { + let _trace = enter_trace_span!(M, step::init_stack_frame, %instance, tracing_separate_thread = Empty); + // Compute callee information. // FIXME: for variadic support, do we have to somehow determine callee's extra_args? let callee_fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?; @@ -424,7 +430,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // this is a single iterator (that handles `spread_arg`), then // `pass_argument` would be the loop body. It takes care to // not advance `caller_iter` for ignored arguments. - let mut callee_args_abis = callee_fn_abi.args.iter(); + let mut callee_args_abis = callee_fn_abi.args.iter().enumerate(); for local in body.args_iter() { // Construct the destination place for this argument. At this point all // locals are still dead, so we cannot construct a `PlaceTy`. @@ -445,10 +451,11 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { &[mir::ProjectionElem::Field(FieldIdx::from_usize(i), field_ty)], *self.tcx, ); - let callee_abi = callee_args_abis.next().unwrap(); + let (idx, callee_abi) = callee_args_abis.next().unwrap(); self.pass_argument( &mut caller_args, callee_abi, + idx, &dest, field_ty, /* already_live */ true, @@ -456,10 +463,11 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { } } else { // Normal argument. Cannot mark it as live yet, it might be unsized! - let callee_abi = callee_args_abis.next().unwrap(); + let (idx, callee_abi) = callee_args_abis.next().unwrap(); self.pass_argument( &mut caller_args, callee_abi, + idx, &dest, ty, /* already_live */ false, @@ -519,7 +527,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { target: Option<mir::BasicBlock>, unwind: mir::UnwindAction, ) -> InterpResult<'tcx> { - trace!("init_fn_call: {:#?}", fn_val); + let _trace = + enter_trace_span!(M, step::init_fn_call, tracing_separate_thread = Empty, ?fn_val) + .or_if_tracing_disabled(|| trace!("init_fn_call: {:#?}", fn_val)); let instance = match fn_val { FnVal::Instance(instance) => instance, @@ -721,18 +731,21 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { ) { let tcx = *self.tcx; - let trait_def_id = tcx.trait_of_item(def_id).unwrap(); - let virtual_trait_ref = ty::TraitRef::from_method(tcx, trait_def_id, virtual_instance.args); + let trait_def_id = tcx.parent(def_id); + let virtual_trait_ref = ty::TraitRef::from_assoc(tcx, trait_def_id, virtual_instance.args); let existential_trait_ref = ty::ExistentialTraitRef::erase_self_ty(tcx, virtual_trait_ref); let concrete_trait_ref = existential_trait_ref.with_self_ty(tcx, dyn_ty); - let concrete_method = Instance::expect_resolve_for_vtable( - tcx, - self.typing_env, - def_id, - virtual_instance.args.rebase_onto(tcx, trait_def_id, concrete_trait_ref.args), - self.cur_span(), - ); + let concrete_method = { + let _trace = enter_trace_span!(M, resolve::expect_resolve_for_vtable, ?def_id); + Instance::expect_resolve_for_vtable( + tcx, + self.typing_env, + def_id, + virtual_instance.args.rebase_onto(tcx, trait_def_id, concrete_trait_ref.args), + self.cur_span(), + ) + }; assert_eq!(concrete_instance, concrete_method); } @@ -815,7 +828,11 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { place } }; - let instance = ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty); + let instance = { + let _trace = + enter_trace_span!(M, resolve::resolve_drop_in_place, ty = ?place.layout.ty); + ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty) + }; let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?; let arg = self.mplace_to_ref(&place)?; diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs index de4fbc7b475..e3afeda5b7c 100644 --- a/compiler/rustc_const_eval/src/interpret/cast.rs +++ b/compiler/rustc_const_eval/src/interpret/cast.rs @@ -16,8 +16,8 @@ use super::{ FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy, PlaceTy, err_inval, interp_ok, throw_ub, throw_ub_custom, }; -use crate::fluent_generated as fluent; use crate::interpret::Writeable; +use crate::{enter_trace_span, fluent_generated as fluent}; impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { pub fn cast( @@ -81,13 +81,16 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // The src operand does not matter, just its type match *src.layout.ty.kind() { ty::FnDef(def_id, args) => { - let instance = ty::Instance::resolve_for_fn_ptr( - *self.tcx, - self.typing_env, - def_id, - args, - ) - .ok_or_else(|| err_inval!(TooGeneric))?; + let instance = { + let _trace = enter_trace_span!(M, resolve::resolve_for_fn_ptr, ?def_id); + ty::Instance::resolve_for_fn_ptr( + *self.tcx, + self.typing_env, + def_id, + args, + ) + .ok_or_else(|| err_inval!(TooGeneric))? + }; let fn_ptr = self.fn_ptr(FnVal::Instance(instance)); self.write_pointer(fn_ptr, dest)?; @@ -114,12 +117,15 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // The src operand does not matter, just its type match *src.layout.ty.kind() { ty::Closure(def_id, args) => { - let instance = ty::Instance::resolve_closure( - *self.tcx, - def_id, - args, - ty::ClosureKind::FnOnce, - ); + let instance = { + let _trace = enter_trace_span!(M, resolve::resolve_closure, ?def_id); + ty::Instance::resolve_closure( + *self.tcx, + def_id, + args, + ty::ClosureKind::FnOnce, + ) + }; let fn_ptr = self.fn_ptr(FnVal::Instance(instance)); self.write_pointer(fn_ptr, dest)?; } diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs index 11e7706fe60..a8a1ac1c980 100644 --- a/compiler/rustc_const_eval/src/interpret/eval_context.rs +++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs @@ -113,7 +113,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// See [LayoutOf::layout_of] for the original documentation. #[inline(always)] pub fn layout_of(&self, ty: Ty<'tcx>) -> <Self as LayoutOfHelpers<'tcx>>::LayoutOfResult { - let _span = enter_trace_span!(M, "InterpCx::layout_of", ty = ?ty.kind()); + let _trace = enter_trace_span!(M, layouting::layout_of, ty = ?ty.kind()); LayoutOf::layout_of(self, ty) } @@ -126,7 +126,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>>, ) -> <Self as FnAbiOfHelpers<'tcx>>::FnAbiOfResult { - let _span = enter_trace_span!(M, "InterpCx::fn_abi_of_fn_ptr", ?sig, ?extra_args); + let _trace = enter_trace_span!(M, layouting::fn_abi_of_fn_ptr, ?sig, ?extra_args); FnAbiOf::fn_abi_of_fn_ptr(self, sig, extra_args) } @@ -139,7 +139,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>>, ) -> <Self as FnAbiOfHelpers<'tcx>>::FnAbiOfResult { - let _span = enter_trace_span!(M, "InterpCx::fn_abi_of_instance", ?instance, ?extra_args); + let _trace = enter_trace_span!(M, layouting::fn_abi_of_instance, ?instance, ?extra_args); FnAbiOf::fn_abi_of_instance(self, instance, extra_args) } } @@ -322,7 +322,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { frame: &Frame<'tcx, M::Provenance, M::FrameExtra>, value: T, ) -> Result<T, ErrorHandled> { - let _span = enter_trace_span!( + let _trace = enter_trace_span!( M, "instantiate_from_frame_and_normalize_erasing_regions", "{}", @@ -344,6 +344,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { def: DefId, args: GenericArgsRef<'tcx>, ) -> InterpResult<'tcx, ty::Instance<'tcx>> { + let _trace = enter_trace_span!(M, resolve::try_resolve, def = ?def); trace!("resolve: {:?}, {:#?}", def, args); trace!("typing_env: {:#?}", self.typing_env); trace!("args: {:#?}", args); @@ -442,10 +443,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // # First compute the dynamic alignment // Packed type alignment needs to be capped. - if let ty::Adt(def, _) = layout.ty.kind() { - if let Some(packed) = def.repr().pack { - unsized_align = unsized_align.min(packed); - } + if let ty::Adt(def, _) = layout.ty.kind() + && let Some(packed) = def.repr().pack + { + unsized_align = unsized_align.min(packed); } // Choose max of two known alignments (combined value must @@ -582,8 +583,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { span: Span, layout: Option<TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { - M::eval_mir_constant(self, *val, span, layout, |ecx, val, span, layout| { - let const_val = val.eval(*ecx.tcx, ecx.typing_env, span).map_err(|err| { + let const_val = val.eval(*self.tcx, self.typing_env, span).map_err(|err| { if M::ALL_CONSTS_ARE_PRECHECKED { match err { ErrorHandled::TooGeneric(..) => {}, @@ -599,11 +599,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { } } } - err.emit_note(*ecx.tcx); + err.emit_note(*self.tcx); err })?; - ecx.const_val_to_op(const_val, val.ty(), layout) - }) + self.const_val_to_op(const_val, val.ty(), layout) } #[must_use] diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs index bb59b9f5418..5d510a98352 100644 --- a/compiler/rustc_const_eval/src/interpret/intern.rs +++ b/compiler/rustc_const_eval/src/interpret/intern.rs @@ -19,9 +19,12 @@ use rustc_data_structures::fx::{FxHashSet, FxIndexMap}; use rustc_hir as hir; use rustc_hir::definitions::{DefPathData, DisambiguatorState}; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; -use rustc_middle::mir::interpret::{ConstAllocation, CtfeProvenance, InterpResult}; +use rustc_middle::mir::interpret::{ + AllocBytes, ConstAllocation, CtfeProvenance, InterpResult, Provenance, +}; use rustc_middle::query::TyCtxtAt; use rustc_middle::span_bug; +use rustc_middle::ty::TyCtxt; use rustc_middle::ty::layout::TyAndLayout; use rustc_span::def_id::LocalDefId; use tracing::{instrument, trace}; @@ -52,39 +55,30 @@ impl HasStaticRootDefId for const_eval::CompileTimeMachine<'_> { } } -/// Intern an allocation. Returns `Err` if the allocation does not exist in the local memory. -/// -/// `mutability` can be used to force immutable interning: if it is `Mutability::Not`, the -/// allocation is interned immutably; if it is `Mutability::Mut`, then the allocation *must be* -/// already mutable (as a sanity check). -/// -/// Returns an iterator over all relocations referred to by this allocation. -fn intern_shallow<'tcx, M: CompileTimeMachine<'tcx>>( - ecx: &mut InterpCx<'tcx, M>, - alloc_id: AllocId, +fn prepare_alloc<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>( + tcx: TyCtxt<'tcx>, + kind: MemoryKind<const_eval::MemoryKind>, + alloc: &mut Allocation<Prov, Extra, Bytes>, mutability: Mutability, - disambiguator: Option<&mut DisambiguatorState>, -) -> Result<impl Iterator<Item = CtfeProvenance> + 'tcx, InternError> { - trace!("intern_shallow {:?}", alloc_id); - // remove allocation - // FIXME(#120456) - is `swap_remove` correct? - let Some((kind, mut alloc)) = ecx.memory.alloc_map.swap_remove(&alloc_id) else { - return Err(InternError::DanglingPointer); - }; - +) -> Result<(), InternError> { match kind { MemoryKind::Machine(const_eval::MemoryKind::Heap { was_made_global }) => { if !was_made_global { // Attempting to intern a `const_allocate`d pointer that was not made global via - // `const_make_global`. We want to error here, but we have to first put the - // allocation back into the `alloc_map` to keep things in a consistent state. - ecx.memory.alloc_map.insert(alloc_id, (kind, alloc)); + // `const_make_global`. + tcx.dcx().delayed_bug("non-global heap allocation in const value"); return Err(InternError::ConstAllocNotGlobal); } } MemoryKind::Stack | MemoryKind::CallerLocation => {} } + if !alloc.provenance_merge_bytes(&tcx) { + // Per-byte provenance is not supported by backends, so we cannot accept it here. + tcx.dcx().delayed_bug("partial pointer in const value"); + return Err(InternError::PartialPointer); + } + // Set allocation mutability as appropriate. This is used by LLVM to put things into // read-only memory, and also by Miri when evaluating other globals that // access this one. @@ -97,6 +91,36 @@ fn intern_shallow<'tcx, M: CompileTimeMachine<'tcx>>( assert_eq!(alloc.mutability, Mutability::Mut); } } + Ok(()) +} + +/// Intern an allocation. Returns `Err` if the allocation does not exist in the local memory. +/// +/// `mutability` can be used to force immutable interning: if it is `Mutability::Not`, the +/// allocation is interned immutably; if it is `Mutability::Mut`, then the allocation *must be* +/// already mutable (as a sanity check). +/// +/// Returns an iterator over all relocations referred to by this allocation. +fn intern_shallow<'tcx, M: CompileTimeMachine<'tcx>>( + ecx: &mut InterpCx<'tcx, M>, + alloc_id: AllocId, + mutability: Mutability, + disambiguator: Option<&mut DisambiguatorState>, +) -> Result<impl Iterator<Item = CtfeProvenance> + 'tcx, InternError> { + trace!("intern_shallow {:?}", alloc_id); + // remove allocation + // FIXME(#120456) - is `swap_remove` correct? + let Some((kind, mut alloc)) = ecx.memory.alloc_map.swap_remove(&alloc_id) else { + return Err(InternError::DanglingPointer); + }; + + if let Err(err) = prepare_alloc(*ecx.tcx, kind, &mut alloc, mutability) { + // We want to error here, but we have to first put the + // allocation back into the `alloc_map` to keep things in a consistent state. + ecx.memory.alloc_map.insert(alloc_id, (kind, alloc)); + return Err(err); + } + // link the alloc id to the actual allocation let alloc = ecx.tcx.mk_const_alloc(alloc); if let Some(static_id) = ecx.machine.static_def_id() { @@ -166,6 +190,7 @@ pub enum InternError { BadMutablePointer, DanglingPointer, ConstAllocNotGlobal, + PartialPointer, } /// Intern `ret` and everything it references. @@ -221,13 +246,11 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>( let mut todo: Vec<_> = if is_static { // Do not steal the root allocation, we need it later to create the return value of `eval_static_initializer`. // But still change its mutability to match the requested one. - let alloc = ecx.memory.alloc_map.get_mut(&base_alloc_id).unwrap(); - alloc.1.mutability = base_mutability; - alloc.1.provenance().ptrs().iter().map(|&(_, prov)| prov).collect() + let (kind, alloc) = ecx.memory.alloc_map.get_mut(&base_alloc_id).unwrap(); + prepare_alloc(*ecx.tcx, *kind, alloc, base_mutability)?; + alloc.provenance().ptrs().iter().map(|&(_, prov)| prov).collect() } else { - intern_shallow(ecx, base_alloc_id, base_mutability, Some(&mut disambiguator)) - .unwrap() - .collect() + intern_shallow(ecx, base_alloc_id, base_mutability, Some(&mut disambiguator))?.collect() }; // We need to distinguish "has just been interned" from "was already in `tcx`", // so we track this in a separate set. @@ -235,7 +258,6 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>( // Whether we encountered a bad mutable pointer. // We want to first report "dangling" and then "mutable", so we need to delay reporting these // errors. - let mut result = Ok(()); let mut found_bad_mutable_ptr = false; // Keep interning as long as there are things to intern. @@ -310,20 +332,15 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>( // okay with losing some potential for immutability here. This can anyway only affect // `static mut`. just_interned.insert(alloc_id); - match intern_shallow(ecx, alloc_id, inner_mutability, Some(&mut disambiguator)) { - Ok(nested) => todo.extend(nested), - Err(err) => { - ecx.tcx.dcx().delayed_bug("error during const interning"); - result = Err(err); - } - } + let next = intern_shallow(ecx, alloc_id, inner_mutability, Some(&mut disambiguator))?; + todo.extend(next); } - if found_bad_mutable_ptr && result.is_ok() { + if found_bad_mutable_ptr { // We found a mutable pointer inside a const where inner allocations should be immutable, // and there was no other error. This should usually never happen! However, this can happen // in unleash-miri mode, so report it as a normal error then. if ecx.tcx.sess.opts.unstable_opts.unleash_the_miri_inside_of_you { - result = Err(InternError::BadMutablePointer); + return Err(InternError::BadMutablePointer); } else { span_bug!( ecx.tcx.span, @@ -331,7 +348,7 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>( ); } } - result + Ok(()) } /// Intern `ret`. This function assumes that `ret` references no other allocation. diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index e24a355891d..5e3d0a15d8b 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -6,7 +6,7 @@ use std::assert_matches::assert_matches; use rustc_abi::{FieldIdx, HasDataLayout, Size}; use rustc_apfloat::ieee::{Double, Half, Quad, Single}; -use rustc_middle::mir::interpret::{read_target_uint, write_target_uint}; +use rustc_middle::mir::interpret::{CTFE_ALLOC_SALT, read_target_uint, write_target_uint}; use rustc_middle::mir::{self, BinOp, ConstValue, NonDivergingIntrinsic}; use rustc_middle::ty::layout::TyAndLayout; use rustc_middle::ty::{Ty, TyCtxt}; @@ -17,17 +17,18 @@ use tracing::trace; use super::memory::MemoryKind; use super::util::ensure_monomorphic_enough; use super::{ - Allocation, CheckInAllocMsg, ConstAllocation, ImmTy, InterpCx, InterpResult, Machine, OpTy, - PlaceTy, Pointer, PointerArithmetic, Provenance, Scalar, err_ub_custom, err_unsup_format, - interp_ok, throw_inval, throw_ub_custom, throw_ub_format, + AllocId, CheckInAllocMsg, ImmTy, InterpCx, InterpResult, Machine, OpTy, PlaceTy, Pointer, + PointerArithmetic, Provenance, Scalar, err_ub_custom, err_unsup_format, interp_ok, throw_inval, + throw_ub_custom, throw_ub_format, }; use crate::fluent_generated as fluent; /// Directly returns an `Allocation` containing an absolute path representation of the given type. -pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAllocation<'tcx> { +pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> (AllocId, u64) { let path = crate::util::type_name(tcx, ty); - let alloc = Allocation::from_bytes_byte_aligned_immutable(path.into_bytes(), ()); - tcx.mk_const_alloc(alloc) + let bytes = path.into_bytes(); + let len = bytes.len().try_into().unwrap(); + (tcx.allocate_bytes_dedup(bytes, CTFE_ALLOC_SALT), len) } impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Generates a value of `TypeId` for `ty` in-place. @@ -126,8 +127,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { sym::type_name => { let tp_ty = instance.args.type_at(0); ensure_monomorphic_enough(tcx, tp_ty)?; - let alloc = alloc_type_name(tcx, tp_ty); - let val = ConstValue::Slice { data: alloc, meta: alloc.inner().size().bytes() }; + let (alloc_id, meta) = alloc_type_name(tcx, tp_ty); + let val = ConstValue::Slice { alloc_id, meta }; let val = self.const_val_to_op(val, dest.layout.ty, Some(dest.layout))?; self.copy_op(&val, dest)?; } diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs index e981f3973ae..e22629993fb 100644 --- a/compiler/rustc_const_eval/src/interpret/machine.rs +++ b/compiler/rustc_const_eval/src/interpret/machine.rs @@ -12,7 +12,6 @@ use rustc_middle::query::TyCtxtAt; use rustc_middle::ty::Ty; use rustc_middle::ty::layout::TyAndLayout; use rustc_middle::{mir, ty}; -use rustc_span::Span; use rustc_span::def_id::DefId; use rustc_target::callconv::FnAbi; @@ -587,27 +586,6 @@ pub trait Machine<'tcx>: Sized { interp_ok(()) } - /// Evaluate the given constant. The `eval` function will do all the required evaluation, - /// but this hook has the chance to do some pre/postprocessing. - #[inline(always)] - fn eval_mir_constant<F>( - ecx: &InterpCx<'tcx, Self>, - val: mir::Const<'tcx>, - span: Span, - layout: Option<TyAndLayout<'tcx>>, - eval: F, - ) -> InterpResult<'tcx, OpTy<'tcx, Self::Provenance>> - where - F: Fn( - &InterpCx<'tcx, Self>, - mir::Const<'tcx>, - Span, - Option<TyAndLayout<'tcx>>, - ) -> InterpResult<'tcx, OpTy<'tcx, Self::Provenance>>, - { - eval(ecx, val, span, layout) - } - /// Returns the salt to be used for a deduplicated global alloation. /// If the allocation is for a function, the instance is provided as well /// (this lets Miri ensure unique addresses for some functions). diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index 34297a61648..2c1e5087e1c 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -937,8 +937,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // (both global from `alloc_map` and local from `extra_fn_ptr_map`) if let Some(fn_val) = self.get_fn_alloc(id) { let align = match fn_val { - FnVal::Instance(instance) => { - self.tcx.codegen_instance_attrs(instance.def).alignment.unwrap_or(Align::ONE) + FnVal::Instance(_instance) => { + // FIXME: Until we have a clear design for the effects of align(N) functions + // on the address of function pointers, we don't consider the align(N) + // attribute on functions in the interpreter. + // See <https://github.com/rust-lang/rust/issues/144661> for more context. + Align::ONE } // Machine-specific extra functions currently do not support alignment restrictions. FnVal::Other(_) => Align::ONE, @@ -1000,7 +1004,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { ptr: Pointer<Option<M::Provenance>>, ) -> InterpResult<'tcx, (Ty<'tcx>, u64)> { let (alloc_id, offset, _meta) = self.ptr_get_alloc_id(ptr, 0)?; - let GlobalAlloc::TypeId { ty } = self.tcx.global_alloc(alloc_id) else { + let Some(GlobalAlloc::TypeId { ty }) = self.tcx.try_get_global_alloc(alloc_id) else { throw_ub_format!("invalid `TypeId` value: not all bytes carry type id metadata") }; interp_ok((ty, offset.bytes())) @@ -1310,29 +1314,20 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> } /// Mark the given sub-range (relative to this allocation reference) as uninitialized. - pub fn write_uninit(&mut self, range: AllocRange) -> InterpResult<'tcx> { + pub fn write_uninit(&mut self, range: AllocRange) { let range = self.range.subrange(range); - self.alloc - .write_uninit(&self.tcx, range) - .map_err(|e| e.to_interp_error(self.alloc_id)) - .into() + self.alloc.write_uninit(&self.tcx, range); } /// Mark the entire referenced range as uninitialized - pub fn write_uninit_full(&mut self) -> InterpResult<'tcx> { - self.alloc - .write_uninit(&self.tcx, self.range) - .map_err(|e| e.to_interp_error(self.alloc_id)) - .into() + pub fn write_uninit_full(&mut self) { + self.alloc.write_uninit(&self.tcx, self.range); } /// Remove all provenance in the reference range. - pub fn clear_provenance(&mut self) -> InterpResult<'tcx> { - self.alloc - .clear_provenance(&self.tcx, self.range) - .map_err(|e| e.to_interp_error(self.alloc_id)) - .into() + pub fn clear_provenance(&mut self) { + self.alloc.clear_provenance(&self.tcx, self.range); } } @@ -1423,11 +1418,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // Side-step AllocRef and directly access the underlying bytes more efficiently. // (We are staying inside the bounds here and all bytes do get overwritten so all is good.) - let alloc_id = alloc_ref.alloc_id; - let bytes = alloc_ref - .alloc - .get_bytes_unchecked_for_overwrite(&alloc_ref.tcx, alloc_ref.range) - .map_err(move |e| e.to_interp_error(alloc_id))?; + let bytes = + alloc_ref.alloc.get_bytes_unchecked_for_overwrite(&alloc_ref.tcx, alloc_ref.range); // `zip` would stop when the first iterator ends; we want to definitely // cover all of `bytes`. for dest in bytes { @@ -1509,10 +1501,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // `get_bytes_mut` will clear the provenance, which is correct, // since we don't want to keep any provenance at the target. // This will also error if copying partial provenance is not supported. - let provenance = src_alloc - .provenance() - .prepare_copy(src_range, dest_offset, num_copies, self) - .map_err(|e| e.to_interp_error(src_alloc_id))?; + let provenance = + src_alloc.provenance().prepare_copy(src_range, dest_offset, num_copies, self); // Prepare a copy of the initialization mask. let init = src_alloc.init_mask().prepare_copy(src_range); @@ -1530,10 +1520,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { dest_range, )?; // Yes we do overwrite all bytes in `dest_bytes`. - let dest_bytes = dest_alloc - .get_bytes_unchecked_for_overwrite_ptr(&tcx, dest_range) - .map_err(|e| e.to_interp_error(dest_alloc_id))? - .as_mut_ptr(); + let dest_bytes = + dest_alloc.get_bytes_unchecked_for_overwrite_ptr(&tcx, dest_range).as_mut_ptr(); if init.no_bytes_init() { // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range @@ -1542,9 +1530,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // This also avoids writing to the target bytes so that the backing allocation is never // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary // operating system this can avoid physically allocating the page. - dest_alloc - .write_uninit(&tcx, dest_range) - .map_err(|e| e.to_interp_error(dest_alloc_id))?; + dest_alloc.write_uninit(&tcx, dest_range); // `write_uninit` also resets the provenance, so we are done. return interp_ok(()); } diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index 62cbbae24a8..560b0e1ae4e 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -13,6 +13,7 @@ use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter}; use rustc_middle::ty::{ConstInt, ScalarInt, Ty, TyCtxt}; use rustc_middle::{bug, mir, span_bug, ty}; use rustc_span::DUMMY_SP; +use tracing::field::Empty; use tracing::trace; use super::{ @@ -20,6 +21,7 @@ use super::{ OffsetMode, PlaceTy, Pointer, Projectable, Provenance, Scalar, alloc_range, err_ub, from_known_layout, interp_ok, mir_assign_valid_types, throw_ub, }; +use crate::enter_trace_span; /// An `Immediate` represents a single immediate self-contained Rust value. /// @@ -173,6 +175,16 @@ impl<Prov: Provenance> Immediate<Prov> { } interp_ok(()) } + + pub fn has_provenance(&self) -> bool { + match self { + Immediate::Scalar(scalar) => matches!(scalar, Scalar::Ptr { .. }), + Immediate::ScalarPair(s1, s2) => { + matches!(s1, Scalar::Ptr { .. }) || matches!(s2, Scalar::Ptr { .. }) + } + Immediate::Uninit => false, + } + } } // ScalarPair needs a type to interpret, so we often have an immediate and a type together @@ -186,18 +198,18 @@ pub struct ImmTy<'tcx, Prov: Provenance = CtfeProvenance> { impl<Prov: Provenance> std::fmt::Display for ImmTy<'_, Prov> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { /// Helper function for printing a scalar to a FmtPrinter - fn p<'a, 'tcx, Prov: Provenance>( - cx: &mut FmtPrinter<'a, 'tcx>, + fn print_scalar<'a, 'tcx, Prov: Provenance>( + p: &mut FmtPrinter<'a, 'tcx>, s: Scalar<Prov>, ty: Ty<'tcx>, ) -> Result<(), std::fmt::Error> { match s { - Scalar::Int(int) => cx.pretty_print_const_scalar_int(int, ty, true), + Scalar::Int(int) => p.pretty_print_const_scalar_int(int, ty, true), Scalar::Ptr(ptr, _sz) => { // Just print the ptr value. `pretty_print_const_scalar_ptr` would also try to // print what is points to, which would fail since it has no access to the local // memory. - cx.pretty_print_const_pointer(ptr, ty) + p.pretty_print_const_pointer(ptr, ty) } } } @@ -205,8 +217,9 @@ impl<Prov: Provenance> std::fmt::Display for ImmTy<'_, Prov> { match self.imm { Immediate::Scalar(s) => { if let Some(ty) = tcx.lift(self.layout.ty) { - let s = - FmtPrinter::print_string(tcx, Namespace::ValueNS, |cx| p(cx, s, ty))?; + let s = FmtPrinter::print_string(tcx, Namespace::ValueNS, |p| { + print_scalar(p, s, ty) + })?; f.write_str(&s)?; return Ok(()); } @@ -770,6 +783,13 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { mir_place: mir::Place<'tcx>, layout: Option<TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { + let _trace = enter_trace_span!( + M, + step::eval_place_to_op, + ?mir_place, + tracing_separate_thread = Empty + ); + // Do not use the layout passed in as argument if the base we are looking at // here is not the entire place. let layout = if mir_place.projection.is_empty() { layout } else { None }; @@ -813,6 +833,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { mir_op: &mir::Operand<'tcx>, layout: Option<TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { + let _trace = + enter_trace_span!(M, step::eval_operand, ?mir_op, tracing_separate_thread = Empty); + use rustc_middle::mir::Operand::*; let op = match mir_op { // FIXME: do some more logic on `move` to invalidate the old location @@ -836,7 +859,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { pub(crate) fn const_val_to_op( &self, - val_val: mir::ConstValue<'tcx>, + val_val: mir::ConstValue, ty: Ty<'tcx>, layout: Option<TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { @@ -860,9 +883,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { } mir::ConstValue::Scalar(x) => adjust_scalar(x)?.into(), mir::ConstValue::ZeroSized => Immediate::Uninit, - mir::ConstValue::Slice { data, meta } => { + mir::ConstValue::Slice { alloc_id, meta } => { // This is const data, no mutation allowed. - let alloc_id = self.tcx.reserve_and_set_memory_alloc(data); let ptr = Pointer::new(CtfeProvenance::from(alloc_id).as_immutable(), Size::ZERO); Immediate::new_slice(self.global_root_pointer(ptr)?.into(), meta, self) } diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index e2284729efd..e7fe4c0b34f 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -9,6 +9,7 @@ use rustc_abi::{BackendRepr, HasDataLayout, Size}; use rustc_middle::ty::Ty; use rustc_middle::ty::layout::TyAndLayout; use rustc_middle::{bug, mir, span_bug}; +use tracing::field::Empty; use tracing::{instrument, trace}; use super::{ @@ -16,6 +17,7 @@ use super::{ InterpResult, Machine, MemoryKind, Misalignment, OffsetMode, OpTy, Operand, Pointer, Projectable, Provenance, Scalar, alloc_range, interp_ok, mir_assign_valid_types, }; +use crate::enter_trace_span; #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)] /// Information required for the sound usage of a `MemPlace`. @@ -524,6 +526,9 @@ where &self, mir_place: mir::Place<'tcx>, ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> { + let _trace = + enter_trace_span!(M, step::eval_place, ?mir_place, tracing_separate_thread = Empty); + let mut place = self.local_to_place(mir_place.local)?; // Using `try_fold` turned out to be bad for performance, hence the loop. for elem in mir_place.projection.iter() { @@ -700,7 +705,7 @@ where match value { Immediate::Scalar(scalar) => { - alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar) + alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar)?; } Immediate::ScalarPair(a_val, b_val) => { let BackendRepr::ScalarPair(a, b) = layout.backend_repr else { @@ -720,10 +725,10 @@ where alloc.write_scalar(alloc_range(Size::ZERO, a_val.size()), a_val)?; alloc.write_scalar(alloc_range(b_offset, b_val.size()), b_val)?; // We don't have to reset padding here, `write_immediate` will anyway do a validation run. - interp_ok(()) } Immediate::Uninit => alloc.write_uninit_full(), } + interp_ok(()) } pub fn write_uninit( @@ -743,7 +748,7 @@ where // Zero-sized access return interp_ok(()); }; - alloc.write_uninit_full()?; + alloc.write_uninit_full(); } } interp_ok(()) @@ -754,6 +759,13 @@ where &mut self, dest: &impl Writeable<'tcx, M::Provenance>, ) -> InterpResult<'tcx> { + // If this is an efficiently represented local variable without provenance, skip the + // `as_mplace_or_mutable_local` that would otherwise force this local into memory. + if let Right(imm) = dest.to_op(self)?.as_mplace_or_imm() { + if !imm.has_provenance() { + return interp_ok(()); + } + } match self.as_mplace_or_mutable_local(&dest.to_place())? { Right((local_val, _local_layout, local)) => { local_val.clear_provenance()?; @@ -767,7 +779,7 @@ where // Zero-sized access return interp_ok(()); }; - alloc.clear_provenance()?; + alloc.clear_provenance(); } } interp_ok(()) diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs index f72c4418081..d05871bfc77 100644 --- a/compiler/rustc_const_eval/src/interpret/projection.rs +++ b/compiler/rustc_const_eval/src/interpret/projection.rs @@ -199,6 +199,15 @@ where base.offset_with_meta(offset, OffsetMode::Inbounds, meta, field_layout, self) } + /// Projects multiple fields at once. See [`Self::project_field`] for details. + pub fn project_fields<P: Projectable<'tcx, M::Provenance>, const N: usize>( + &self, + base: &P, + fields: [FieldIdx; N], + ) -> InterpResult<'tcx, [P; N]> { + fields.try_map(|field| self.project_field(base, field)) + } + /// Downcasting to an enum variant. pub fn project_downcast<P: Projectable<'tcx, M::Provenance>>( &self, diff --git a/compiler/rustc_const_eval/src/interpret/stack.rs b/compiler/rustc_const_eval/src/interpret/stack.rs index 2e99bb4209f..73cc87508ef 100644 --- a/compiler/rustc_const_eval/src/interpret/stack.rs +++ b/compiler/rustc_const_eval/src/interpret/stack.rs @@ -397,11 +397,11 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // Finish things up. M::after_stack_push(self)?; self.frame_mut().loc = Left(mir::Location::START); - // `tracing_separate_thread` is used to instruct the chrome_tracing [tracing::Layer] in Miri + // `tracing_separate_thread` is used to instruct the tracing_chrome [tracing::Layer] in Miri // to put the "frame" span on a separate trace thread/line than other spans, to make the - // visualization in https://ui.perfetto.dev easier to interpret. It is set to a value of + // visualization in <https://ui.perfetto.dev> easier to interpret. It is set to a value of // [tracing::field::Empty] so that other tracing layers (e.g. the logger) will ignore it. - let span = info_span!("frame", tracing_separate_thread = Empty, "{}", instance); + let span = info_span!("frame", tracing_separate_thread = Empty, frame = %instance); self.frame_mut().tracing_span.enter(span); interp_ok(()) diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs index 629dcc3523c..f1995b3f132 100644 --- a/compiler/rustc_const_eval/src/interpret/step.rs +++ b/compiler/rustc_const_eval/src/interpret/step.rs @@ -9,13 +9,15 @@ use rustc_middle::ty::{self, Instance, Ty}; use rustc_middle::{bug, mir, span_bug}; use rustc_span::source_map::Spanned; use rustc_target::callconv::FnAbi; +use tracing::field::Empty; use tracing::{info, instrument, trace}; use super::{ FnArg, FnVal, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemPlaceMeta, PlaceTy, Projectable, Scalar, interp_ok, throw_ub, throw_unsup_format, }; -use crate::util; +use crate::interpret::EnteredTraceSpan; +use crate::{enter_trace_span, util}; struct EvaluatedCalleeAndArgs<'tcx, M: Machine<'tcx>> { callee: FnVal<'tcx, M::ExtraFnVal>, @@ -74,7 +76,14 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// /// This does NOT move the statement counter forward, the caller has to do that! pub fn eval_statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> { - info!("{:?}", stmt); + let _trace = enter_trace_span!( + M, + step::eval_statement, + stmt = ?stmt.kind, + span = ?stmt.source_info.span, + tracing_separate_thread = Empty, + ) + .or_if_tracing_disabled(|| info!(stmt = ?stmt.kind)); use rustc_middle::mir::StatementKind::*; @@ -456,7 +465,14 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { } fn eval_terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> { - info!("{:?}", terminator.kind); + let _trace = enter_trace_span!( + M, + step::eval_terminator, + terminator = ?terminator.kind, + span = ?terminator.source_info.span, + tracing_separate_thread = Empty, + ) + .or_if_tracing_disabled(|| info!(terminator = ?terminator.kind)); use rustc_middle::mir::TerminatorKind::*; match terminator.kind { @@ -544,7 +560,11 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { "Async Drop must be expanded or reset to sync in runtime MIR" ); let place = self.eval_place(place)?; - let instance = Instance::resolve_drop_in_place(*self.tcx, place.layout.ty); + let instance = { + let _trace = + enter_trace_span!(M, resolve::resolve_drop_in_place, ty = ?place.layout.ty); + Instance::resolve_drop_in_place(*self.tcx, place.layout.ty) + }; if let ty::InstanceKind::DropGlue(_, None) = instance.def { // This is the branch we enter if and only if the dropped type has no drop glue // whatsoever. This can happen as a result of monomorphizing a drop of a diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs index 72650d545c3..72bee345406 100644 --- a/compiler/rustc_const_eval/src/interpret/util.rs +++ b/compiler/rustc_const_eval/src/interpret/util.rs @@ -48,18 +48,104 @@ pub(crate) fn create_static_alloc<'tcx>( /// A marker trait returned by [crate::interpret::Machine::enter_trace_span], identifying either a /// real [tracing::span::EnteredSpan] in case tracing is enabled, or the dummy type `()` when -/// tracing is disabled. -pub trait EnteredTraceSpan {} -impl EnteredTraceSpan for () {} -impl EnteredTraceSpan for tracing::span::EnteredSpan {} +/// tracing is disabled. Also see [crate::enter_trace_span!] below. +pub trait EnteredTraceSpan { + /// Allows executing an alternative function when tracing is disabled. Useful for example if you + /// want to open a trace span when tracing is enabled, and alternatively just log a line when + /// tracing is disabled. + fn or_if_tracing_disabled(self, f: impl FnOnce()) -> Self; +} +impl EnteredTraceSpan for () { + fn or_if_tracing_disabled(self, f: impl FnOnce()) -> Self { + f(); // tracing is disabled, execute the function + self + } +} +impl EnteredTraceSpan for tracing::span::EnteredSpan { + fn or_if_tracing_disabled(self, _f: impl FnOnce()) -> Self { + self // tracing is enabled, don't execute anything + } +} -/// Shortand for calling [crate::interpret::Machine::enter_trace_span] on a [tracing::info_span]. +/// Shortand for calling [crate::interpret::Machine::enter_trace_span] on a [tracing::info_span!]. /// This is supposed to be compiled out when [crate::interpret::Machine::enter_trace_span] has the /// default implementation (i.e. when it does not actually enter the span but instead returns `()`). +/// This macro takes a type implementing the [crate::interpret::Machine] trait as its first argument +/// and otherwise accepts the same syntax as [tracing::span!] (see some tips below). /// Note: the result of this macro **must be used** because the span is exited when it's dropped. +/// +/// ### Syntax accepted by this macro +/// +/// The full documentation for the [tracing::span!] syntax can be found at [tracing] under "Using the +/// Macros". A few possibly confusing syntaxes are listed here: +/// ```rust +/// # use rustc_const_eval::enter_trace_span; +/// # type M = rustc_const_eval::const_eval::CompileTimeMachine<'static>; +/// # let my_display_var = String::new(); +/// # let my_debug_var = String::new(); +/// // logs a span named "hello" with a field named "arg" of value 42 (works only because +/// // 42 implements the tracing::Value trait, otherwise use one of the options below) +/// let _trace = enter_trace_span!(M, "hello", arg = 42); +/// // logs a field called "my_display_var" using the Display implementation +/// let _trace = enter_trace_span!(M, "hello", %my_display_var); +/// // logs a field called "my_debug_var" using the Debug implementation +/// let _trace = enter_trace_span!(M, "hello", ?my_debug_var); +/// ``` +/// +/// ### `NAME::SUBNAME` syntax +/// +/// In addition to the syntax accepted by [tracing::span!], this macro optionally allows passing +/// the span name (i.e. the first macro argument) in the form `NAME::SUBNAME` (without quotes) to +/// indicate that the span has name "NAME" (usually the name of the component) and has an additional +/// more specific name "SUBNAME" (usually the function name). The latter is passed to the [tracing] +/// infrastructure as a span field with the name "NAME". This allows not being distracted by +/// subnames when looking at the trace in <https://ui.perfetto.dev>, but when deeper introspection +/// is needed within a component, it's still possible to view the subnames directly in the UI by +/// selecting a span, clicking on the "NAME" argument on the right, and clicking on "Visualize +/// argument values". +/// ```rust +/// # use rustc_const_eval::enter_trace_span; +/// # type M = rustc_const_eval::const_eval::CompileTimeMachine<'static>; +/// // for example, the first will expand to the second +/// let _trace = enter_trace_span!(M, borrow_tracker::on_stack_pop, /* ... */); +/// let _trace = enter_trace_span!(M, "borrow_tracker", borrow_tracker = "on_stack_pop", /* ... */); +/// ``` +/// +/// ### `tracing_separate_thread` parameter +/// +/// This macro was introduced to obtain better traces of Miri without impacting release performance. +/// Miri saves traces using the the `tracing_chrome` `tracing::Layer` so that they can be visualized +/// in <https://ui.perfetto.dev>. To instruct `tracing_chrome` to put some spans on a separate trace +/// thread/line than other spans when viewed in <https://ui.perfetto.dev>, you can pass +/// `tracing_separate_thread = tracing::field::Empty` to the tracing macros. This is useful to +/// separate out spans which just indicate the current step or program frame being processed by the +/// interpreter. You should use a value of [tracing::field::Empty] so that other tracing layers +/// (e.g. the logger) will ignore the `tracing_separate_thread` field. For example: +/// ```rust +/// # use rustc_const_eval::enter_trace_span; +/// # type M = rustc_const_eval::const_eval::CompileTimeMachine<'static>; +/// let _trace = enter_trace_span!(M, step::eval_statement, tracing_separate_thread = tracing::field::Empty); +/// ``` +/// +/// ### Executing something else when tracing is disabled +/// +/// [crate::interpret::Machine::enter_trace_span] returns [EnteredTraceSpan], on which you can call +/// [EnteredTraceSpan::or_if_tracing_disabled], to e.g. log a line as an alternative to the tracing +/// span for when tracing is disabled. For example: +/// ```rust +/// # use rustc_const_eval::enter_trace_span; +/// # use rustc_const_eval::interpret::EnteredTraceSpan; +/// # type M = rustc_const_eval::const_eval::CompileTimeMachine<'static>; +/// let _trace = enter_trace_span!(M, step::eval_statement) +/// .or_if_tracing_disabled(|| tracing::info!("eval_statement")); +/// ``` #[macro_export] macro_rules! enter_trace_span { - ($machine:ident, $($tt:tt)*) => { - $machine::enter_trace_span(|| tracing::info_span!($($tt)*)) - } + ($machine:ty, $name:ident :: $subname:ident $($tt:tt)*) => { + $crate::enter_trace_span!($machine, stringify!($name), $name = %stringify!($subname) $($tt)*) + }; + + ($machine:ty, $($tt:tt)*) => { + <$machine as $crate::interpret::Machine>::enter_trace_span(|| tracing::info_span!($($tt)*)) + }; } diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index 693b3782960..5e8bee65706 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -320,10 +320,10 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { // for a coroutine). let var_hir_id = captured_place.get_root_variable(); let node = self.ecx.tcx.hir_node(var_hir_id); - if let hir::Node::Pat(pat) = node { - if let hir::PatKind::Binding(_, _, ident, _) = pat.kind { - name = Some(ident.name); - } + if let hir::Node::Pat(pat) = node + && let hir::PatKind::Binding(_, _, ident, _) = pat.kind + { + name = Some(ident.name); } } } @@ -949,7 +949,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { let padding_size = offset - padding_cleared_until; let range = alloc_range(padding_start, padding_size); trace!("reset_padding on {}: resetting padding range {range:?}", mplace.layout.ty); - alloc.write_uninit(range)?; + alloc.write_uninit(range); } padding_cleared_until = offset + size; } @@ -1239,7 +1239,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt, if self.reset_provenance_and_padding { // We can't share this with above as above, we might be looking at read-only memory. let mut alloc = self.ecx.get_ptr_alloc_mut(mplace.ptr(), size)?.expect("we already excluded size 0"); - alloc.clear_provenance()?; + alloc.clear_provenance(); // Also, mark this as containing data, not padding. self.add_data_range(mplace.ptr(), size); } @@ -1415,7 +1415,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { recursive: bool, reset_provenance_and_padding: bool, ) -> InterpResult<'tcx> { - let _span = enter_trace_span!( + let _trace = enter_trace_span!( M, "validate_operand", "recursive={recursive}, reset_provenance_and_padding={reset_provenance_and_padding}, val={val:?}" diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs index a27b6646131..82c50fac6c0 100644 --- a/compiler/rustc_const_eval/src/interpret/visitor.rs +++ b/compiler/rustc_const_eval/src/interpret/visitor.rs @@ -121,25 +121,24 @@ pub trait ValueVisitor<'tcx, M: Machine<'tcx>>: Sized { // `Box` has two fields: the pointer we care about, and the allocator. assert_eq!(v.layout().fields.count(), 2, "`Box` must have exactly 2 fields"); - let (unique_ptr, alloc) = ( - self.ecx().project_field(v, FieldIdx::ZERO)?, - self.ecx().project_field(v, FieldIdx::ONE)?, - ); + let [unique_ptr, alloc] = + self.ecx().project_fields(v, [FieldIdx::ZERO, FieldIdx::ONE])?; + // Unfortunately there is some type junk in the way here: `unique_ptr` is a `Unique`... // (which means another 2 fields, the second of which is a `PhantomData`) assert_eq!(unique_ptr.layout().fields.count(), 2); - let (nonnull_ptr, phantom) = ( - self.ecx().project_field(&unique_ptr, FieldIdx::ZERO)?, - self.ecx().project_field(&unique_ptr, FieldIdx::ONE)?, - ); + let [nonnull_ptr, phantom] = + self.ecx().project_fields(&unique_ptr, [FieldIdx::ZERO, FieldIdx::ONE])?; assert!( phantom.layout().ty.ty_adt_def().is_some_and(|adt| adt.is_phantom_data()), "2nd field of `Unique` should be PhantomData but is {:?}", phantom.layout().ty, ); + // ... that contains a `NonNull`... (gladly, only a single field here) assert_eq!(nonnull_ptr.layout().fields.count(), 1); let raw_ptr = self.ecx().project_field(&nonnull_ptr, FieldIdx::ZERO)?; // the actual raw ptr + // ... whose only field finally is a raw ptr we can dereference. self.visit_box(ty, &raw_ptr)?; diff --git a/compiler/rustc_const_eval/src/lib.rs b/compiler/rustc_const_eval/src/lib.rs index bf7a79dcb20..8ace560d85d 100644 --- a/compiler/rustc_const_eval/src/lib.rs +++ b/compiler/rustc_const_eval/src/lib.rs @@ -1,7 +1,9 @@ // tidy-alphabetical-start #![allow(internal_features)] #![allow(rustc::diagnostic_outside_of_impl)] +#![cfg_attr(bootstrap, feature(strict_overflow_ops))] #![doc(rust_logo)] +#![feature(array_try_map)] #![feature(assert_matches)] #![feature(box_patterns)] #![feature(decl_macro)] @@ -9,7 +11,6 @@ #![feature(never_type)] #![feature(rustdoc_internals)] #![feature(slice_ptr_get)] -#![feature(strict_overflow_ops)] #![feature(trait_alias)] #![feature(try_blocks)] #![feature(unqualified_local_imports)] diff --git a/compiler/rustc_const_eval/src/util/caller_location.rs b/compiler/rustc_const_eval/src/util/caller_location.rs index f489b05fbbd..5249b32eca4 100644 --- a/compiler/rustc_const_eval/src/util/caller_location.rs +++ b/compiler/rustc_const_eval/src/util/caller_location.rs @@ -42,12 +42,12 @@ fn alloc_caller_location<'tcx>( let location = ecx.allocate(loc_layout, MemoryKind::CallerLocation).unwrap(); // Initialize fields. - ecx.write_immediate(filename, &ecx.project_field(&location, FieldIdx::from_u32(0)).unwrap()) - .expect("writing to memory we just allocated cannot fail"); - ecx.write_scalar(line, &ecx.project_field(&location, FieldIdx::from_u32(1)).unwrap()) - .expect("writing to memory we just allocated cannot fail"); - ecx.write_scalar(col, &ecx.project_field(&location, FieldIdx::from_u32(2)).unwrap()) + let [filename_field, line_field, col_field] = + ecx.project_fields(&location, [0, 1, 2].map(FieldIdx::from_u32)).unwrap(); + ecx.write_immediate(filename, &filename_field) .expect("writing to memory we just allocated cannot fail"); + ecx.write_scalar(line, &line_field).expect("writing to memory we just allocated cannot fail"); + ecx.write_scalar(col, &col_field).expect("writing to memory we just allocated cannot fail"); location } @@ -57,7 +57,7 @@ pub(crate) fn const_caller_location_provider( file: Symbol, line: u32, col: u32, -) -> mir::ConstValue<'_> { +) -> mir::ConstValue { trace!("const_caller_location: {}:{}:{}", file, line, col); let mut ecx = mk_eval_cx_to_read_const_val( tcx, diff --git a/compiler/rustc_const_eval/src/util/type_name.rs b/compiler/rustc_const_eval/src/util/type_name.rs index e8f2728a772..13cc607135a 100644 --- a/compiler/rustc_const_eval/src/util/type_name.rs +++ b/compiler/rustc_const_eval/src/util/type_name.rs @@ -4,21 +4,24 @@ use rustc_data_structures::intern::Interned; use rustc_hir::def_id::CrateNum; use rustc_hir::definitions::DisambiguatedDefPathData; use rustc_middle::bug; -use rustc_middle::ty::print::{PrettyPrinter, Print, PrintError, Printer}; -use rustc_middle::ty::{self, GenericArg, GenericArgKind, Ty, TyCtxt}; +use rustc_middle::ty::print::{PrettyPrinter, PrintError, Printer}; +use rustc_middle::ty::{self, GenericArg, Ty, TyCtxt}; -struct AbsolutePathPrinter<'tcx> { +struct TypeNamePrinter<'tcx> { tcx: TyCtxt<'tcx>, path: String, } -impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> { +impl<'tcx> Printer<'tcx> for TypeNamePrinter<'tcx> { fn tcx(&self) -> TyCtxt<'tcx> { self.tcx } fn print_region(&mut self, _region: ty::Region<'_>) -> Result<(), PrintError> { - Ok(()) + // FIXME: most regions have been erased by the time this code runs. + // Just printing `'_` is a bit hacky but gives mostly good results, and + // doing better is difficult. See `should_print_optional_region`. + write!(self, "'_") } fn print_type(&mut self, ty: Ty<'tcx>) -> Result<(), PrintError> { @@ -73,27 +76,26 @@ impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> { self.pretty_print_dyn_existential(predicates) } - fn path_crate(&mut self, cnum: CrateNum) -> Result<(), PrintError> { + fn print_crate_name(&mut self, cnum: CrateNum) -> Result<(), PrintError> { self.path.push_str(self.tcx.crate_name(cnum).as_str()); Ok(()) } - fn path_qualified( + fn print_path_with_qualified( &mut self, self_ty: Ty<'tcx>, trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<(), PrintError> { - self.pretty_path_qualified(self_ty, trait_ref) + self.pretty_print_path_with_qualified(self_ty, trait_ref) } - fn path_append_impl( + fn print_path_with_impl( &mut self, print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>, - _disambiguated_data: &DisambiguatedDefPathData, self_ty: Ty<'tcx>, trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<(), PrintError> { - self.pretty_path_append_impl( + self.pretty_print_path_with_impl( |cx| { print_prefix(cx)?; @@ -106,7 +108,7 @@ impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> { ) } - fn path_append( + fn print_path_with_simple( &mut self, print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>, disambiguated_data: &DisambiguatedDefPathData, @@ -118,38 +120,30 @@ impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> { Ok(()) } - fn path_generic_args( + fn print_path_with_generic_args( &mut self, print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>, args: &[GenericArg<'tcx>], ) -> Result<(), PrintError> { print_prefix(self)?; - let args = - args.iter().cloned().filter(|arg| !matches!(arg.kind(), GenericArgKind::Lifetime(_))); - if args.clone().next().is_some() { - self.generic_delimiters(|cx| cx.comma_sep(args)) + if !args.is_empty() { + self.generic_delimiters(|cx| cx.comma_sep(args.iter().copied())) } else { Ok(()) } } } -impl<'tcx> PrettyPrinter<'tcx> for AbsolutePathPrinter<'tcx> { - fn should_print_region(&self, _region: ty::Region<'_>) -> bool { - false - } - fn comma_sep<T>(&mut self, mut elems: impl Iterator<Item = T>) -> Result<(), PrintError> - where - T: Print<'tcx, Self>, - { - if let Some(first) = elems.next() { - first.print(self)?; - for elem in elems { - self.path.push_str(", "); - elem.print(self)?; - } +impl<'tcx> PrettyPrinter<'tcx> for TypeNamePrinter<'tcx> { + fn should_print_optional_region(&self, _region: ty::Region<'_>) -> bool { + // Bound regions are always printed (as `'_`), which gives some idea that they are special, + // even though the `for` is omitted by the pretty printer. + // E.g. `for<'a, 'b> fn(&'a u32, &'b u32)` is printed as "fn(&'_ u32, &'_ u32)". + match _region.kind() { + ty::ReErased => false, + ty::ReBound(..) => true, + _ => unreachable!(), } - Ok(()) } fn generic_delimiters( @@ -171,7 +165,7 @@ impl<'tcx> PrettyPrinter<'tcx> for AbsolutePathPrinter<'tcx> { } } -impl Write for AbsolutePathPrinter<'_> { +impl Write for TypeNamePrinter<'_> { fn write_str(&mut self, s: &str) -> std::fmt::Result { self.path.push_str(s); Ok(()) @@ -179,7 +173,7 @@ impl Write for AbsolutePathPrinter<'_> { } pub fn type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> String { - let mut printer = AbsolutePathPrinter { tcx, path: String::new() }; - printer.print_type(ty).unwrap(); - printer.path + let mut p = TypeNamePrinter { tcx, path: String::new() }; + p.print_type(ty).unwrap(); + p.path } |
