diff options
Diffstat (limited to 'compiler/rustc_const_eval/src')
18 files changed, 184 insertions, 322 deletions
diff --git a/compiler/rustc_const_eval/src/check_consts/check.rs b/compiler/rustc_const_eval/src/check_consts/check.rs index c1d91f98957..0eb6f28bdb3 100644 --- a/compiler/rustc_const_eval/src/check_consts/check.rs +++ b/compiler/rustc_const_eval/src/check_consts/check.rs @@ -638,14 +638,6 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { // These are all okay; they only change the type, not the data. } - Rvalue::Cast( - CastKind::PointerCoercion(PointerCoercion::Unsize | PointerCoercion::DynStar, _), - _, - _, - ) => { - // Unsizing and `dyn*` coercions are implemented for CTFE. - } - Rvalue::Cast(CastKind::PointerExposeProvenance, _, _) => { self.check_op(ops::RawPtrToIntCast); } diff --git a/compiler/rustc_const_eval/src/check_consts/mod.rs b/compiler/rustc_const_eval/src/check_consts/mod.rs index d8421415225..9ab8e0692e1 100644 --- a/compiler/rustc_const_eval/src/check_consts/mod.rs +++ b/compiler/rustc_const_eval/src/check_consts/mod.rs @@ -82,7 +82,7 @@ pub fn rustc_allow_const_fn_unstable( ) -> bool { let attrs = tcx.hir_attrs(tcx.local_def_id_to_hir_id(def_id)); - attrs::find_attr!(attrs, attrs::AttributeKind::AllowConstFnUnstable(syms) if syms.contains(&feature_gate)) + attrs::find_attr!(attrs, attrs::AttributeKind::AllowConstFnUnstable(syms, _) if syms.contains(&feature_gate)) } /// Returns `true` if the given `def_id` (trait or function) is "safe to expose on stable". diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index 569a07c3a01..08e1877f0eb 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -20,7 +20,7 @@ use crate::const_eval::CheckAlignment; use crate::interpret::{ CtfeValidationMode, GlobalId, Immediate, InternKind, InternResult, InterpCx, InterpErrorKind, InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking, StackPopCleanup, create_static_alloc, - eval_nullary_intrinsic, intern_const_alloc_recursive, interp_ok, throw_exhaust, + intern_const_alloc_recursive, interp_ok, throw_exhaust, }; use crate::{CTRL_C_RECEIVED, errors}; @@ -209,9 +209,9 @@ pub(super) fn op_to_const<'tcx>( match immediate { Left(ref mplace) => { - // We know `offset` is relative to the allocation, so we can use `into_parts`. - let (prov, offset) = mplace.ptr().into_parts(); - let alloc_id = prov.expect("cannot have `fake` place for non-ZST type").alloc_id(); + let (prov, offset) = + mplace.ptr().into_pointer_or_addr().unwrap().prov_and_relative_offset(); + let alloc_id = prov.alloc_id(); ConstValue::Indirect { alloc_id, offset } } // see comment on `let force_as_immediate` above @@ -232,9 +232,10 @@ pub(super) fn op_to_const<'tcx>( imm.layout.ty, ); let msg = "`op_to_const` on an immediate scalar pair must only be used on slice references to the beginning of an actual allocation"; - // We know `offset` is relative to the allocation, so we can use `into_parts`. - let (prov, offset) = a.to_pointer(ecx).expect(msg).into_parts(); - let alloc_id = prov.expect(msg).alloc_id(); + let ptr = a.to_pointer(ecx).expect(msg); + let (prov, offset) = + ptr.into_pointer_or_addr().expect(msg).prov_and_relative_offset(); + let alloc_id = prov.alloc_id(); let data = ecx.tcx.global_alloc(alloc_id).unwrap_memory(); assert!(offset == abi::Size::ZERO, "{}", msg); let meta = b.to_target_usize(ecx).expect(msg); @@ -280,34 +281,6 @@ pub fn eval_to_const_value_raw_provider<'tcx>( tcx: TyCtxt<'tcx>, key: ty::PseudoCanonicalInput<'tcx, GlobalId<'tcx>>, ) -> ::rustc_middle::mir::interpret::EvalToConstValueResult<'tcx> { - // We call `const_eval` for zero arg intrinsics, too, in order to cache their value. - // Catch such calls and evaluate them instead of trying to load a constant's MIR. - if let ty::InstanceKind::Intrinsic(def_id) = key.value.instance.def { - let ty = key.value.instance.ty(tcx, key.typing_env); - let ty::FnDef(_, args) = ty.kind() else { - bug!("intrinsic with type {:?}", ty); - }; - return eval_nullary_intrinsic(tcx, key.typing_env, def_id, args).report_err().map_err( - |error| { - let span = tcx.def_span(def_id); - - // FIXME(oli-obk): why don't we have any tests for this code path? - super::report( - tcx, - error.into_kind(), - span, - || (span, vec![]), - |diag, span, _| { - diag.span_label( - span, - crate::fluent_generated::const_eval_nullary_intrinsic_fail, - ); - }, - ) - }, - ); - } - tcx.eval_to_allocation_raw(key).map(|val| turn_into_const_value(tcx, val, key)) } diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs index 317b1229a90..76fa744361a 100644 --- a/compiler/rustc_const_eval/src/const_eval/machine.rs +++ b/compiler/rustc_const_eval/src/const_eval/machine.rs @@ -10,7 +10,7 @@ use rustc_hir::{self as hir, CRATE_HIR_ID, LangItem}; use rustc_middle::mir::AssertMessage; use rustc_middle::mir::interpret::ReportedErrorInfo; use rustc_middle::query::TyCtxtAt; -use rustc_middle::ty::layout::{HasTypingEnv, TyAndLayout}; +use rustc_middle::ty::layout::{HasTypingEnv, TyAndLayout, ValidityRequirement}; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_middle::{bug, mir}; use rustc_span::{Span, Symbol, sym}; @@ -23,8 +23,8 @@ use crate::fluent_generated as fluent; use crate::interpret::{ self, AllocId, AllocInit, AllocRange, ConstAllocation, CtfeProvenance, FnArg, Frame, GlobalAlloc, ImmTy, InterpCx, InterpResult, OpTy, PlaceTy, Pointer, RangeSet, Scalar, - compile_time_machine, interp_ok, throw_exhaust, throw_inval, throw_ub, throw_ub_custom, - throw_unsup, throw_unsup_format, + compile_time_machine, err_inval, interp_ok, throw_exhaust, throw_inval, throw_ub, + throw_ub_custom, throw_unsup, throw_unsup_format, }; /// When hitting this many interpreted terminators we emit a deny by default lint @@ -462,6 +462,44 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> { // (We know the value here in the machine of course, but this is the runtime of that code, // not the optimization stage.) sym::is_val_statically_known => ecx.write_scalar(Scalar::from_bool(false), dest)?, + + // We handle these here since Miri does not want to have them. + sym::assert_inhabited + | sym::assert_zero_valid + | sym::assert_mem_uninitialized_valid => { + let ty = instance.args.type_at(0); + let requirement = ValidityRequirement::from_intrinsic(intrinsic_name).unwrap(); + + let should_panic = !ecx + .tcx + .check_validity_requirement((requirement, ecx.typing_env().as_query_input(ty))) + .map_err(|_| err_inval!(TooGeneric))?; + + if should_panic { + let layout = ecx.layout_of(ty)?; + + let msg = match requirement { + // For *all* intrinsics we first check `is_uninhabited` to give a more specific + // error message. + _ if layout.is_uninhabited() => format!( + "aborted execution: attempted to instantiate uninhabited type `{ty}`" + ), + ValidityRequirement::Inhabited => bug!("handled earlier"), + ValidityRequirement::Zero => format!( + "aborted execution: attempted to zero-initialize type `{ty}`, which is invalid" + ), + ValidityRequirement::UninitMitigated0x01Fill => format!( + "aborted execution: attempted to leave type `{ty}` uninitialized, which is invalid" + ), + ValidityRequirement::Uninit => bug!("assert_uninit_valid doesn't exist"), + }; + + Self::panic_nounwind(ecx, &msg)?; + // Skip the `return_to_block` at the end (we panicked, we do not return). + return interp_ok(None); + } + } + _ => { // We haven't handled the intrinsic, let's see if we can use a fallback body. if ecx.tcx.intrinsic(instance.def_id()).unwrap().must_be_overridden { diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs index d95d552d7d5..0082f90f3b8 100644 --- a/compiler/rustc_const_eval/src/const_eval/mod.rs +++ b/compiler/rustc_const_eval/src/const_eval/mod.rs @@ -1,9 +1,9 @@ // Not in interpret to make sure we do not use private implementation details use rustc_abi::{FieldIdx, VariantIdx}; -use rustc_middle::query::Key; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_middle::{bug, mir}; +use rustc_span::DUMMY_SP; use tracing::instrument; use crate::interpret::InterpCx; @@ -71,8 +71,7 @@ pub fn tag_for_variant_provider<'tcx>( let (ty, variant_index) = key.value; assert!(ty.is_enum()); - let ecx = - InterpCx::new(tcx, ty.default_span(tcx), key.typing_env, crate::const_eval::DummyMachine); + let ecx = InterpCx::new(tcx, DUMMY_SP, key.typing_env, crate::const_eval::DummyMachine); let layout = ecx.layout_of(ty).unwrap(); ecx.tag_for_variant(layout, variant_index).unwrap().map(|(tag, _tag_field)| tag) diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs index 14abdd8c98c..9133a5fc8ef 100644 --- a/compiler/rustc_const_eval/src/errors.rs +++ b/compiler/rustc_const_eval/src/errors.rs @@ -574,7 +574,7 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> { if addr != 0 { diag.arg( "pointer", - Pointer::<Option<CtfeProvenance>>::from_addr_invalid(addr).to_string(), + Pointer::<Option<CtfeProvenance>>::without_provenance(addr).to_string(), ); } diff --git a/compiler/rustc_const_eval/src/interpret/call.rs b/compiler/rustc_const_eval/src/interpret/call.rs index 79c14b204e3..ebaa5a97a4a 100644 --- a/compiler/rustc_const_eval/src/interpret/call.rs +++ b/compiler/rustc_const_eval/src/interpret/call.rs @@ -643,13 +643,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { break self.ref_to_mplace(&val)?; } ty::Dynamic(.., ty::Dyn) => break receiver.assert_mem_place(), // no immediate unsized values - ty::Dynamic(.., ty::DynStar) => { - // Not clear how to handle this, so far we assume the receiver is always a pointer. - span_bug!( - self.cur_span(), - "by-value calls on a `dyn*`... are those a thing?" - ); - } _ => { // Not there yet, search for the only non-ZST field. // (The rules for `DispatchFromDyn` ensure there's exactly one such field.) @@ -662,39 +655,23 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { }; // Obtain the underlying trait we are working on, and the adjusted receiver argument. - let (trait_, dyn_ty, adjusted_recv) = if let ty::Dynamic(data, _, ty::DynStar) = - receiver_place.layout.ty.kind() - { - let recv = self.unpack_dyn_star(&receiver_place, data)?; - - (data.principal(), recv.layout.ty, recv.ptr()) - } else { - // Doesn't have to be a `dyn Trait`, but the unsized tail must be `dyn Trait`. - // (For that reason we also cannot use `unpack_dyn_trait`.) - let receiver_tail = - self.tcx.struct_tail_for_codegen(receiver_place.layout.ty, self.typing_env); - let ty::Dynamic(receiver_trait, _, ty::Dyn) = receiver_tail.kind() else { - span_bug!( - self.cur_span(), - "dynamic call on non-`dyn` type {}", - receiver_tail - ) - }; - assert!(receiver_place.layout.is_unsized()); - - // Get the required information from the vtable. - let vptr = receiver_place.meta().unwrap_meta().to_pointer(self)?; - let dyn_ty = self.get_ptr_vtable_ty(vptr, Some(receiver_trait))?; - - // It might be surprising that we use a pointer as the receiver even if this - // is a by-val case; this works because by-val passing of an unsized `dyn - // Trait` to a function is actually desugared to a pointer. - (receiver_trait.principal(), dyn_ty, receiver_place.ptr()) + // Doesn't have to be a `dyn Trait`, but the unsized tail must be `dyn Trait`. + // (For that reason we also cannot use `unpack_dyn_trait`.) + let receiver_tail = + self.tcx.struct_tail_for_codegen(receiver_place.layout.ty, self.typing_env); + let ty::Dynamic(receiver_trait, _, ty::Dyn) = receiver_tail.kind() else { + span_bug!(self.cur_span(), "dynamic call on non-`dyn` type {}", receiver_tail) }; + assert!(receiver_place.layout.is_unsized()); + + // Get the required information from the vtable. + let vptr = receiver_place.meta().unwrap_meta().to_pointer(self)?; + let dyn_ty = self.get_ptr_vtable_ty(vptr, Some(receiver_trait))?; + let adjusted_recv = receiver_place.ptr(); // Now determine the actual method to call. Usually we use the easy way of just // looking up the method at index `idx`. - let vtable_entries = self.vtable_entries(trait_, dyn_ty); + let vtable_entries = self.vtable_entries(receiver_trait.principal(), dyn_ty); let Some(ty::VtblEntry::Method(fn_inst)) = vtable_entries.get(idx).copied() else { // FIXME(fee1-dead) these could be variants of the UB info enum instead of this throw_ub_custom!(fluent::const_eval_dyn_call_not_a_method); @@ -830,10 +807,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // Dropping a trait object. Need to find actual drop fn. self.unpack_dyn_trait(&place, data)? } - ty::Dynamic(data, _, ty::DynStar) => { - // Dropping a `dyn*`. Need to find actual drop fn. - self.unpack_dyn_star(&place, data)? - } _ => { debug_assert_eq!( instance, diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs index 1036935bb10..7a73d70fc85 100644 --- a/compiler/rustc_const_eval/src/interpret/cast.rs +++ b/compiler/rustc_const_eval/src/interpret/cast.rs @@ -126,20 +126,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { } } - CastKind::PointerCoercion(PointerCoercion::DynStar, _) => { - if let ty::Dynamic(data, _, ty::DynStar) = cast_ty.kind() { - // Initial cast from sized to dyn trait - let vtable = self.get_vtable_ptr(src.layout.ty, data)?; - let vtable = Scalar::from_maybe_pointer(vtable, self); - let data = self.read_immediate(src)?.to_scalar(); - let _assert_pointer_like = data.to_pointer(self)?; - let val = Immediate::ScalarPair(data, vtable); - self.write_immediate(val, dest)?; - } else { - bug!() - } - } - CastKind::Transmute => { assert!(src.layout.is_sized()); assert!(dest.layout.is_sized()); diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index b29c5c7c7d7..378ed6d0e10 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -6,10 +6,9 @@ use std::assert_matches::assert_matches; use rustc_abi::Size; use rustc_apfloat::ieee::{Double, Half, Quad, Single}; -use rustc_hir::def_id::DefId; use rustc_middle::mir::{self, BinOp, ConstValue, NonDivergingIntrinsic}; -use rustc_middle::ty::layout::{TyAndLayout, ValidityRequirement}; -use rustc_middle::ty::{GenericArgsRef, Ty, TyCtxt}; +use rustc_middle::ty::layout::TyAndLayout; +use rustc_middle::ty::{Ty, TyCtxt}; use rustc_middle::{bug, ty}; use rustc_span::{Symbol, sym}; use tracing::trace; @@ -17,9 +16,9 @@ use tracing::trace; use super::memory::MemoryKind; use super::util::ensure_monomorphic_enough; use super::{ - Allocation, CheckInAllocMsg, ConstAllocation, GlobalId, ImmTy, InterpCx, InterpResult, Machine, - OpTy, PlaceTy, Pointer, PointerArithmetic, Provenance, Scalar, err_inval, err_ub_custom, - err_unsup_format, interp_ok, throw_inval, throw_ub_custom, throw_ub_format, + Allocation, CheckInAllocMsg, ConstAllocation, ImmTy, InterpCx, InterpResult, Machine, OpTy, + PlaceTy, Pointer, PointerArithmetic, Provenance, Scalar, err_ub_custom, err_unsup_format, + interp_ok, throw_inval, throw_ub_custom, throw_ub_format, }; use crate::fluent_generated as fluent; @@ -30,73 +29,6 @@ pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAll tcx.mk_const_alloc(alloc) } -/// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated -/// inside an `InterpCx` and instead have their value computed directly from rustc internal info. -pub(crate) fn eval_nullary_intrinsic<'tcx>( - tcx: TyCtxt<'tcx>, - typing_env: ty::TypingEnv<'tcx>, - def_id: DefId, - args: GenericArgsRef<'tcx>, -) -> InterpResult<'tcx, ConstValue<'tcx>> { - let tp_ty = args.type_at(0); - let name = tcx.item_name(def_id); - interp_ok(match name { - sym::type_name => { - ensure_monomorphic_enough(tcx, tp_ty)?; - let alloc = alloc_type_name(tcx, tp_ty); - ConstValue::Slice { data: alloc, meta: alloc.inner().size().bytes() } - } - sym::needs_drop => { - ensure_monomorphic_enough(tcx, tp_ty)?; - ConstValue::from_bool(tp_ty.needs_drop(tcx, typing_env)) - } - sym::type_id => { - ensure_monomorphic_enough(tcx, tp_ty)?; - ConstValue::from_u128(tcx.type_id_hash(tp_ty).as_u128()) - } - sym::variant_count => match match tp_ty.kind() { - // Pattern types have the same number of variants as their base type. - // Even if we restrict e.g. which variants are valid, the variants are essentially just uninhabited. - // And `Result<(), !>` still has two variants according to `variant_count`. - ty::Pat(base, _) => *base, - _ => tp_ty, - } - .kind() - { - // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough. - ty::Adt(adt, _) => ConstValue::from_target_usize(adt.variants().len() as u64, &tcx), - ty::Alias(..) | ty::Param(_) | ty::Placeholder(_) | ty::Infer(_) => { - throw_inval!(TooGeneric) - } - ty::Pat(..) => unreachable!(), - ty::Bound(_, _) => bug!("bound ty during ctfe"), - ty::Bool - | ty::Char - | ty::Int(_) - | ty::Uint(_) - | ty::Float(_) - | ty::Foreign(_) - | ty::Str - | ty::Array(_, _) - | ty::Slice(_) - | ty::RawPtr(_, _) - | ty::Ref(_, _, _) - | ty::FnDef(_, _) - | ty::FnPtr(..) - | ty::Dynamic(_, _, _) - | ty::Closure(_, _) - | ty::CoroutineClosure(_, _) - | ty::Coroutine(_, _) - | ty::CoroutineWitness(..) - | ty::UnsafeBinder(_) - | ty::Never - | ty::Tuple(_) - | ty::Error(_) => ConstValue::from_target_usize(0u64, &tcx), - }, - other => bug!("`{}` is not a zero arg intrinsic", other), - }) -} - impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Returns `true` if emulation happened. /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own @@ -110,8 +42,77 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { ) -> InterpResult<'tcx, bool> { let instance_args = instance.args; let intrinsic_name = self.tcx.item_name(instance.def_id()); + let tcx = self.tcx.tcx; match intrinsic_name { + sym::type_name => { + let tp_ty = instance.args.type_at(0); + ensure_monomorphic_enough(tcx, tp_ty)?; + let alloc = alloc_type_name(tcx, tp_ty); + let val = ConstValue::Slice { data: alloc, meta: alloc.inner().size().bytes() }; + let val = self.const_val_to_op(val, dest.layout.ty, Some(dest.layout))?; + self.copy_op(&val, dest)?; + } + sym::needs_drop => { + let tp_ty = instance.args.type_at(0); + ensure_monomorphic_enough(tcx, tp_ty)?; + let val = ConstValue::from_bool(tp_ty.needs_drop(tcx, self.typing_env)); + let val = self.const_val_to_op(val, tcx.types.bool, Some(dest.layout))?; + self.copy_op(&val, dest)?; + } + sym::type_id => { + let tp_ty = instance.args.type_at(0); + ensure_monomorphic_enough(tcx, tp_ty)?; + let val = ConstValue::from_u128(tcx.type_id_hash(tp_ty).as_u128()); + let val = self.const_val_to_op(val, dest.layout.ty, Some(dest.layout))?; + self.copy_op(&val, dest)?; + } + sym::variant_count => { + let tp_ty = instance.args.type_at(0); + let ty = match tp_ty.kind() { + // Pattern types have the same number of variants as their base type. + // Even if we restrict e.g. which variants are valid, the variants are essentially just uninhabited. + // And `Result<(), !>` still has two variants according to `variant_count`. + ty::Pat(base, _) => *base, + _ => tp_ty, + }; + let val = match ty.kind() { + // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough. + ty::Adt(adt, _) => { + ConstValue::from_target_usize(adt.variants().len() as u64, &tcx) + } + ty::Alias(..) | ty::Param(_) | ty::Placeholder(_) | ty::Infer(_) => { + throw_inval!(TooGeneric) + } + ty::Pat(..) => unreachable!(), + ty::Bound(_, _) => bug!("bound ty during ctfe"), + ty::Bool + | ty::Char + | ty::Int(_) + | ty::Uint(_) + | ty::Float(_) + | ty::Foreign(_) + | ty::Str + | ty::Array(_, _) + | ty::Slice(_) + | ty::RawPtr(_, _) + | ty::Ref(_, _, _) + | ty::FnDef(_, _) + | ty::FnPtr(..) + | ty::Dynamic(_, _, _) + | ty::Closure(_, _) + | ty::CoroutineClosure(_, _) + | ty::Coroutine(_, _) + | ty::CoroutineWitness(..) + | ty::UnsafeBinder(_) + | ty::Never + | ty::Tuple(_) + | ty::Error(_) => ConstValue::from_target_usize(0u64, &tcx), + }; + let val = self.const_val_to_op(val, dest.layout.ty, Some(dest.layout))?; + self.copy_op(&val, dest)?; + } + sym::caller_location => { let span = self.find_closest_untracked_caller_location(); let val = self.tcx.span_as_caller_location(span); @@ -137,21 +138,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { self.write_scalar(Scalar::from_target_usize(result, self), dest)?; } - sym::needs_drop | sym::type_id | sym::type_name | sym::variant_count => { - let gid = GlobalId { instance, promoted: None }; - let ty = self - .tcx - .fn_sig(instance.def_id()) - .instantiate(self.tcx.tcx, instance.args) - .output() - .no_bound_vars() - .unwrap(); - let val = self - .ctfe_query(|tcx| tcx.const_eval_global_id(self.typing_env, gid, tcx.span))?; - let val = self.const_val_to_op(val, ty, Some(dest.layout))?; - self.copy_op(&val, dest)?; - } - sym::fadd_algebraic | sym::fsub_algebraic | sym::fmul_algebraic @@ -386,41 +372,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { self.exact_div(&val, &size, dest)?; } - sym::assert_inhabited - | sym::assert_zero_valid - | sym::assert_mem_uninitialized_valid => { - let ty = instance.args.type_at(0); - let requirement = ValidityRequirement::from_intrinsic(intrinsic_name).unwrap(); - - let should_panic = !self - .tcx - .check_validity_requirement((requirement, self.typing_env.as_query_input(ty))) - .map_err(|_| err_inval!(TooGeneric))?; - - if should_panic { - let layout = self.layout_of(ty)?; - - let msg = match requirement { - // For *all* intrinsics we first check `is_uninhabited` to give a more specific - // error message. - _ if layout.is_uninhabited() => format!( - "aborted execution: attempted to instantiate uninhabited type `{ty}`" - ), - ValidityRequirement::Inhabited => bug!("handled earlier"), - ValidityRequirement::Zero => format!( - "aborted execution: attempted to zero-initialize type `{ty}`, which is invalid" - ), - ValidityRequirement::UninitMitigated0x01Fill => format!( - "aborted execution: attempted to leave type `{ty}` uninitialized, which is invalid" - ), - ValidityRequirement::Uninit => bug!("assert_uninit_valid doesn't exist"), - }; - - M::panic_nounwind(self, &msg)?; - // Skip the `return_to_block` at the end (we panicked, we do not return). - return interp_ok(true); - } - } sym::simd_insert => { let index = u64::from(self.read_scalar(&args[1])?.to_u32()?); let elem = &args[2]; diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs index d6d230fbd17..35ec303f961 100644 --- a/compiler/rustc_const_eval/src/interpret/machine.rs +++ b/compiler/rustc_const_eval/src/interpret/machine.rs @@ -747,7 +747,7 @@ pub macro compile_time_machine(<$tcx: lifetime>) { // Allow these casts, but make the pointer not dereferenceable. // (I.e., they behave like transmutation.) // This is correct because no pointers can ever be exposed in compile-time evaluation. - interp_ok(Pointer::from_addr_invalid(addr)) + interp_ok(Pointer::without_provenance(addr)) } #[inline(always)] @@ -756,8 +756,7 @@ pub macro compile_time_machine(<$tcx: lifetime>) { ptr: Pointer<CtfeProvenance>, _size: i64, ) -> Option<(AllocId, Size, Self::ProvenanceExtra)> { - // We know `offset` is relative to the allocation, so we can use `into_parts`. - let (prov, offset) = ptr.into_parts(); + let (prov, offset) = ptr.prov_and_relative_offset(); Some((prov.alloc_id(), offset, prov.immutable())) } diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index 69fceb02ff9..c97d53a45de 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -537,7 +537,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { #[inline] fn is_offset_misaligned(offset: u64, align: Align) -> Option<Misalignment> { - if offset % align.bytes() == 0 { + if offset.is_multiple_of(align.bytes()) { None } else { // The biggest power of two through which `offset` is divisible. @@ -655,7 +655,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// The caller is responsible for calling the access hooks! /// /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead. - fn get_alloc_raw( + pub fn get_alloc_raw( &self, id: AllocId, ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra, M::Bytes>> { @@ -757,7 +757,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// /// Also returns a ptr to `self.extra` so that the caller can use it in parallel with the /// allocation. - fn get_alloc_raw_mut( + /// + /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead. + pub fn get_alloc_raw_mut( &mut self, id: AllocId, ) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra, M::Bytes>, &mut M)> { @@ -976,15 +978,15 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { interp_ok(()) } - /// Handle the effect an FFI call might have on the state of allocations. - /// This overapproximates the modifications which external code might make to memory: - /// We set all reachable allocations as initialized, mark all reachable provenances as exposed - /// and overwrite them with `Provenance::WILDCARD`. - /// - /// The allocations in `ids` are assumed to be already exposed. - pub fn prepare_for_native_call(&mut self, ids: Vec<AllocId>) -> InterpResult<'tcx> { + /// Visit all allocations reachable from the given start set, by recursively traversing the + /// provenance information of those allocations. + pub fn visit_reachable_allocs( + &mut self, + start: Vec<AllocId>, + mut visit: impl FnMut(&mut Self, AllocId, &AllocInfo) -> InterpResult<'tcx>, + ) -> InterpResult<'tcx> { let mut done = FxHashSet::default(); - let mut todo = ids; + let mut todo = start; while let Some(id) = todo.pop() { if !done.insert(id) { // We already saw this allocation before, don't process it again. @@ -992,31 +994,20 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { } let info = self.get_alloc_info(id); - // If there is no data behind this pointer, skip this. - if !matches!(info.kind, AllocKind::LiveData) { - continue; - } - - // Expose all provenances in this allocation, and add them to `todo`. - let alloc = self.get_alloc_raw(id)?; - for prov in alloc.provenance().provenances() { - M::expose_provenance(self, prov)?; - if let Some(id) = prov.get_alloc_id() { - todo.push(id); + // Recurse, if there is data here. + // Do this *before* invoking the callback, as the callback might mutate the + // allocation and e.g. replace all provenance by wildcards! + if matches!(info.kind, AllocKind::LiveData) { + let alloc = self.get_alloc_raw(id)?; + for prov in alloc.provenance().provenances() { + if let Some(id) = prov.get_alloc_id() { + todo.push(id); + } } } - // Also expose the provenance of the interpreter-level allocation, so it can - // be read by FFI. The `black_box` is defensive programming as LLVM likes - // to (incorrectly) optimize away ptr2int casts whose result is unused. - std::hint::black_box(alloc.get_bytes_unchecked_raw().expose_provenance()); - - // Prepare for possible write from native code if mutable. - if info.mutbl.is_mut() { - self.get_alloc_raw_mut(id)? - .0 - .prepare_for_native_write() - .map_err(|e| e.to_interp_error(id))?; - } + + // Call the callback. + visit(self, id, &info)?; } interp_ok(()) } @@ -1073,7 +1064,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { todo.extend(static_roots(self)); while let Some(id) = todo.pop() { if reachable.insert(id) { - // This is a new allocation, add the allocation it points to `todo`. + // This is a new allocation, add the allocations it points to `todo`. + // We only need to care about `alloc_map` memory here, as entirely unchanged + // global memory cannot point to memory relevant for the leak check. if let Some((_, alloc)) = self.memory.alloc_map.get(id) { todo.extend( alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id()), @@ -1561,7 +1554,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // If the allocation is N-aligned, and the offset is not divisible by N, // then `base + offset` has a non-zero remainder after division by `N`, // which means `base + offset` cannot be null. - if offset.bytes() % info.align.bytes() != 0 { + if !offset.bytes().is_multiple_of(info.align.bytes()) { return interp_ok(false); } // We don't know enough, this might be null. @@ -1596,7 +1589,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { Some((alloc_id, offset, extra)) => Ok((alloc_id, offset, extra)), None => { assert!(M::Provenance::OFFSET_IS_ADDR); - let (_, addr) = ptr.into_parts(); + // Offset is absolute, as we just asserted. + let (_, addr) = ptr.into_raw_parts(); Err(addr.bytes()) } }, diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs index f5792aba207..f8b3c92debb 100644 --- a/compiler/rustc_const_eval/src/interpret/mod.rs +++ b/compiler/rustc_const_eval/src/interpret/mod.rs @@ -29,7 +29,6 @@ pub use self::intern::{ HasStaticRootDefId, InternKind, InternResult, intern_const_alloc_for_constprop, intern_const_alloc_recursive, }; -pub(crate) use self::intrinsics::eval_nullary_intrinsic; pub use self::machine::{AllocMap, Machine, MayLeak, ReturnAction, compile_time_machine}; pub use self::memory::{AllocInfo, AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind}; use self::operand::Operand; diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index 3028568dd8f..e2284729efd 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -118,7 +118,7 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> { pub fn fake_alloc_zst(layout: TyAndLayout<'tcx>) -> Self { assert!(layout.is_zst()); let align = layout.align.abi; - let ptr = Pointer::from_addr_invalid(align.bytes()); // no provenance, absolute address + let ptr = Pointer::without_provenance(align.bytes()); // no provenance, absolute address MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None, misaligned: None }, layout } } @@ -572,7 +572,7 @@ where Right((local, offset, locals_addr, layout)) => { if offset.is_some() { // This has been projected to a part of this local, or had the type changed. - // FIMXE: there are cases where we could still avoid allocating an mplace. + // FIXME: there are cases where we could still avoid allocating an mplace. Left(place.force_mplace(self)?) } else { debug_assert_eq!(locals_addr, self.frame().locals_addr()); diff --git a/compiler/rustc_const_eval/src/interpret/stack.rs b/compiler/rustc_const_eval/src/interpret/stack.rs index 3361a586b8e..543d68d7f45 100644 --- a/compiler/rustc_const_eval/src/interpret/stack.rs +++ b/compiler/rustc_const_eval/src/interpret/stack.rs @@ -499,8 +499,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { | ty::Closure(..) | ty::CoroutineClosure(..) | ty::Never - | ty::Error(_) - | ty::Dynamic(_, _, ty::DynStar) => true, + | ty::Error(_) => true, ty::Str | ty::Slice(_) | ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => false, diff --git a/compiler/rustc_const_eval/src/interpret/traits.rs b/compiler/rustc_const_eval/src/interpret/traits.rs index 8b634955bb7..e4b5c82853a 100644 --- a/compiler/rustc_const_eval/src/interpret/traits.rs +++ b/compiler/rustc_const_eval/src/interpret/traits.rs @@ -1,4 +1,4 @@ -use rustc_abi::{Align, FieldIdx, Size}; +use rustc_abi::{Align, Size}; use rustc_middle::mir::interpret::{InterpResult, Pointer}; use rustc_middle::ty::{self, ExistentialPredicateStableCmpExt, Ty, TyCtxt, VtblEntry}; use tracing::trace; @@ -125,24 +125,4 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { )?; interp_ok(mplace) } - - /// Turn a `dyn* Trait` type into an value with the actual dynamic type. - pub(super) fn unpack_dyn_star<P: Projectable<'tcx, M::Provenance>>( - &self, - val: &P, - expected_trait: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>, - ) -> InterpResult<'tcx, P> { - assert!( - matches!(val.layout().ty.kind(), ty::Dynamic(_, _, ty::DynStar)), - "`unpack_dyn_star` only makes sense on `dyn*` types" - ); - let data = self.project_field(val, FieldIdx::ZERO)?; - let vtable = self.project_field(val, FieldIdx::ONE)?; - let vtable = self.read_pointer(&vtable.to_op(self)?)?; - let ty = self.get_ptr_vtable_ty(vtable, Some(expected_trait))?; - // `data` is already the right thing but has the wrong type. So we transmute it. - let layout = self.layout_of(ty)?; - let data = data.transmute(layout, self)?; - interp_ok(data) - } } diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index 998ef3729ea..fc4d13af8c4 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -358,9 +358,6 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { // arrays/slices ty::Array(..) | ty::Slice(..) => PathElem::ArrayElem(field), - // dyn* vtables - ty::Dynamic(_, _, ty::DynKind::DynStar) if field == 1 => PathElem::Vtable, - // dyn traits ty::Dynamic(..) => { assert_eq!(field, 0); @@ -518,7 +515,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { Ub(DanglingIntPointer { addr: i, .. }) => DanglingPtrNoProvenance { ptr_kind, // FIXME this says "null pointer" when null but we need translate - pointer: format!("{}", Pointer::<Option<AllocId>>::from_addr_invalid(i)) + pointer: format!("{}", Pointer::<Option<AllocId>>::without_provenance(i)) }, Ub(PointerOutOfBounds { .. }) => DanglingPtrOutOfBounds { ptr_kind @@ -868,7 +865,9 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { fn add_data_range(&mut self, ptr: Pointer<Option<M::Provenance>>, size: Size) { if let Some(data_bytes) = self.data_bytes.as_mut() { // We only have to store the offset, the rest is the same for all pointers here. - let (_prov, offset) = ptr.into_parts(); + // The logic is agnostic to whether the offset is relative or absolute as long as + // it is consistent. + let (_prov, offset) = ptr.into_raw_parts(); // Add this. data_bytes.add_range(offset, size); }; @@ -894,7 +893,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { .as_mplace_or_imm() .expect_left("place must be in memory") .ptr(); - let (_prov, offset) = ptr.into_parts(); + let (_prov, offset) = ptr.into_raw_parts(); offset } @@ -903,7 +902,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { // Our value must be in memory, otherwise we would not have set up `data_bytes`. let mplace = self.ecx.force_allocation(place)?; // Determine starting offset and size. - let (_prov, start_offset) = mplace.ptr().into_parts(); + let (_prov, start_offset) = mplace.ptr().into_raw_parts(); let (size, _align) = self .ecx .size_and_align_of_val(&mplace)? diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs index d5970b69baf..a27b6646131 100644 --- a/compiler/rustc_const_eval/src/interpret/visitor.rs +++ b/compiler/rustc_const_eval/src/interpret/visitor.rs @@ -101,26 +101,6 @@ pub trait ValueVisitor<'tcx, M: Machine<'tcx>>: Sized { // recurse with the inner type return self.visit_field(v, 0, &inner_mplace.into()); } - ty::Dynamic(data, _, ty::DynStar) => { - // DynStar types. Very different from a dyn type (but strangely part of the - // same variant in `TyKind`): These are pairs where the 2nd component is the - // vtable, and the first component is the data (which must be ptr-sized). - - // First make sure the vtable can be read at its type. - // The type of this vtable is fake, it claims to be a reference to some actual memory but that isn't true. - // So we transmute it to a raw pointer. - let raw_ptr_ty = Ty::new_mut_ptr(*self.ecx().tcx, self.ecx().tcx.types.unit); - let raw_ptr_ty = self.ecx().layout_of(raw_ptr_ty)?; - let vtable_field = self - .ecx() - .project_field(v, FieldIdx::ONE)? - .transmute(raw_ptr_ty, self.ecx())?; - self.visit_field(v, 1, &vtable_field)?; - - // Then unpack the first field, and continue. - let data = self.ecx().unpack_dyn_star(v, data)?; - return self.visit_field(v, 0, &data); - } // Slices do not need special handling here: they have `Array` field // placement with length 0, so we enter the `Array` case below which // indirectly uses the metadata to determine the actual length. diff --git a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs index d7e97f32bae..4ca39bbc68e 100644 --- a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs +++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs @@ -69,7 +69,7 @@ fn check_validity_requirement_strict<'tcx>( // require dereferenceability also require non-null, we don't actually get any false negatives // due to this. // The value we are validating is temporary and discarded at the end of this function, so - // there is no point in reseting provenance and padding. + // there is no point in resetting provenance and padding. cx.validate_operand( &allocated.into(), /*recursive*/ false, |
