diff options
| author | bors <bors@rust-lang.org> | 2022-12-24 09:41:11 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2022-12-24 09:41:11 +0000 |
| commit | d23554fae855d884761d549cd6ee6537450b0f3c (patch) | |
| tree | b44bd3e79fa5019f0e8648737e71c432bc784188 /compiler/rustc_const_eval/src | |
| parent | 245357f61939d2b6d15f8c6b15f7026396f95871 (diff) | |
| parent | e52e0d855799fe651922be4a038fe84fe9009c72 (diff) | |
| download | rust-d23554fae855d884761d549cd6ee6537450b0f3c.tar.gz rust-d23554fae855d884761d549cd6ee6537450b0f3c.zip | |
Auto merge of #2738 - RalfJung:rustup, r=RalfJung
Rustup
Diffstat (limited to 'compiler/rustc_const_eval/src')
8 files changed, 50 insertions, 26 deletions
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs index b1fdeb01b10..986b6d65530 100644 --- a/compiler/rustc_const_eval/src/interpret/cast.rs +++ b/compiler/rustc_const_eval/src/interpret/cast.rs @@ -332,7 +332,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Immediate::new_slice(ptr, length.eval_usize(*self.tcx, self.param_env), self); self.write_immediate(val, dest) } - (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => { + (ty::Dynamic(data_a, ..), ty::Dynamic(data_b, ..)) => { let val = self.read_immediate(src)?; if data_a.principal() == data_b.principal() { // A NOP cast that doesn't actually change anything, should be allowed even with mismatching vtables. diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index 9b56757eb39..666fcbd6f80 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -303,7 +303,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } sym::offset => { let ptr = self.read_pointer(&args[0])?; - let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?; + let offset_count = self.read_machine_isize(&args[1])?; let pointee_ty = substs.type_at(0); let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?; @@ -311,7 +311,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } sym::arith_offset => { let ptr = self.read_pointer(&args[0])?; - let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?; + let offset_count = self.read_machine_isize(&args[1])?; let pointee_ty = substs.type_at(0); let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap(); @@ -428,7 +428,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { sym::transmute => { self.copy_op(&args[0], dest, /*allow_transmute*/ true)?; } - sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => { + sym::assert_inhabited + | sym::assert_zero_valid + | sym::assert_mem_uninitialized_valid => { let ty = instance.substs.type_at(0); let layout = self.layout_of(ty)?; @@ -460,7 +462,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } } - if intrinsic_name == sym::assert_uninit_valid { + if intrinsic_name == sym::assert_mem_uninitialized_valid { let should_panic = !self.tcx.permits_uninit_init(layout); if should_panic { @@ -668,7 +670,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, nonoverlapping: bool, ) -> InterpResult<'tcx> { - let count = self.read_scalar(&count)?.to_machine_usize(self)?; + let count = self.read_machine_usize(&count)?; let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?; let (size, align) = (layout.size, layout.align.abi); // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max), @@ -696,7 +698,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let dst = self.read_pointer(&dst)?; let byte = self.read_scalar(&byte)?.to_u8()?; - let count = self.read_scalar(&count)?.to_machine_usize(self)?; + let count = self.read_machine_usize(&count)?; // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max), // but no actual allocation can be big enough for the difference to be noticeable. diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index 221e359d24a..fcc6f8ea852 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -39,7 +39,7 @@ pub enum Immediate<Prov: Provenance = AllocId> { impl<Prov: Provenance> From<Scalar<Prov>> for Immediate<Prov> { #[inline(always)] fn from(val: Scalar<Prov>) -> Self { - Immediate::Scalar(val.into()) + Immediate::Scalar(val) } } @@ -53,7 +53,7 @@ impl<Prov: Provenance> Immediate<Prov> { } pub fn new_slice(val: Scalar<Prov>, len: u64, cx: &impl HasDataLayout) -> Self { - Immediate::ScalarPair(val.into(), Scalar::from_machine_usize(len, cx).into()) + Immediate::ScalarPair(val, Scalar::from_machine_usize(len, cx)) } pub fn new_dyn_trait( @@ -61,7 +61,7 @@ impl<Prov: Provenance> Immediate<Prov> { vtable: Pointer<Option<Prov>>, cx: &impl HasDataLayout, ) -> Self { - Immediate::ScalarPair(val.into(), Scalar::from_maybe_pointer(vtable, cx)) + Immediate::ScalarPair(val, Scalar::from_maybe_pointer(vtable, cx)) } #[inline] @@ -341,10 +341,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { alloc_range(b_offset, b_size), /*read_provenance*/ b.is_ptr(), )?; - Some(ImmTy { - imm: Immediate::ScalarPair(a_val.into(), b_val.into()), - layout: mplace.layout, - }) + Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout }) } _ => { // Neither a scalar nor scalar pair. @@ -407,6 +404,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Ok(self.read_immediate(op)?.to_scalar()) } + // Pointer-sized reads are fairly common and need target layout access, so we wrap them in + // convenience functions. + /// Read a pointer from a place. pub fn read_pointer( &self, @@ -414,6 +414,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> { self.read_scalar(op)?.to_pointer(self) } + /// Read a pointer-sized unsigned integer from a place. + pub fn read_machine_usize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, u64> { + self.read_scalar(op)?.to_machine_usize(self) + } + /// Read a pointer-sized signed integer from a place. + pub fn read_machine_isize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, i64> { + self.read_scalar(op)?.to_machine_isize(self) + } /// Turn the wide MPlace into a string (must already be dereferenced!) pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx, &str> { @@ -569,8 +577,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ty::ConstKind::Unevaluated(uv) => { let instance = self.resolve(uv.def, uv.substs)?; let cid = GlobalId { instance, promoted: None }; - self.ctfe_query(span, |tcx| tcx.eval_to_valtree(self.param_env.and(cid)))? - .unwrap_or_else(|| bug!("unable to create ValTree for {uv:?}")) + self.ctfe_query(span, |tcx| { + tcx.eval_to_valtree(self.param_env.with_const().and(cid)) + })? + .unwrap_or_else(|| bug!("unable to create ValTree for {uv:?}")) } ty::ConstKind::Bound(..) | ty::ConstKind::Infer(..) => { span_bug!(self.cur_span(), "unexpected ConstKind in ctfe: {val:?}") diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs index 949f95c5fa8..e8ff70e3a40 100644 --- a/compiler/rustc_const_eval/src/interpret/operator.rs +++ b/compiler/rustc_const_eval/src/interpret/operator.rs @@ -36,7 +36,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { if let Abi::ScalarPair(..) = dest.layout.abi { // We can use the optimized path and avoid `place_field` (which might do // `force_allocation`). - let pair = Immediate::ScalarPair(val.into(), Scalar::from_bool(overflowed).into()); + let pair = Immediate::ScalarPair(val, Scalar::from_bool(overflowed)); self.write_immediate(pair, dest)?; } else { assert!(self.tcx.sess.opts.unstable_opts.randomize_layout); diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index 905eb71bb18..97a73e98abc 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -141,7 +141,7 @@ impl<Prov: Provenance> MemPlace<Prov> { match self.meta { MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)), MemPlaceMeta::Meta(meta) => { - Immediate::ScalarPair(Scalar::from_maybe_pointer(self.ptr, cx).into(), meta.into()) + Immediate::ScalarPair(Scalar::from_maybe_pointer(self.ptr, cx), meta) } } } diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs index 2ffd73eef3e..291464ab58a 100644 --- a/compiler/rustc_const_eval/src/interpret/projection.rs +++ b/compiler/rustc_const_eval/src/interpret/projection.rs @@ -363,7 +363,7 @@ where Index(local) => { let layout = self.layout_of(self.tcx.types.usize)?; let n = self.local_to_op(self.frame(), local, Some(layout))?; - let n = self.read_scalar(&n)?.to_machine_usize(self)?; + let n = self.read_machine_usize(&n)?; self.place_index(base, n)? } ConstantIndex { offset, min_length, from_end } => { @@ -392,7 +392,7 @@ where Index(local) => { let layout = self.layout_of(self.tcx.types.usize)?; let n = self.local_to_op(self.frame(), local, Some(layout))?; - let n = self.read_scalar(&n)?.to_machine_usize(self)?; + let n = self.read_machine_usize(&n)?; self.operand_index(base, n)? } ConstantIndex { offset, min_length, from_end } => { diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs index 0e7ffcdffc9..550c7a44c41 100644 --- a/compiler/rustc_const_eval/src/interpret/terminator.rs +++ b/compiler/rustc_const_eval/src/interpret/terminator.rs @@ -119,11 +119,20 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } Drop { place, target, unwind } => { + let frame = self.frame(); + let ty = place.ty(&frame.body.local_decls, *self.tcx).ty; + let ty = self.subst_from_frame_and_normalize_erasing_regions(frame, ty)?; + let instance = Instance::resolve_drop_in_place(*self.tcx, ty); + if let ty::InstanceDef::DropGlue(_, None) = instance.def { + // This is the branch we enter if and only if the dropped type has no drop glue + // whatsoever. This can happen as a result of monomorphizing a drop of a + // generic. In order to make sure that generic and non-generic code behaves + // roughly the same (and in keeping with Mir semantics) we do nothing here. + self.go_to_block(target); + return Ok(()); + } let place = self.eval_place(place)?; - let ty = place.layout.ty; trace!("TerminatorKind::drop: {:?}, type {}", place, ty); - - let instance = Instance::resolve_drop_in_place(*self.tcx, ty); self.drop_in_place(&place, instance, target, unwind)?; } diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs index bb897b95b2c..94e1b95a0eb 100644 --- a/compiler/rustc_const_eval/src/transform/validate.rs +++ b/compiler/rustc_const_eval/src/transform/validate.rs @@ -9,8 +9,8 @@ use rustc_middle::mir::visit::{PlaceContext, Visitor}; use rustc_middle::mir::{ traversal, AggregateKind, BasicBlock, BinOp, Body, BorrowKind, CastKind, CopyNonOverlapping, Local, Location, MirPass, MirPhase, NonDivergingIntrinsic, Operand, Place, PlaceElem, PlaceRef, - ProjectionElem, RuntimePhase, Rvalue, SourceScope, Statement, StatementKind, Terminator, - TerminatorKind, UnOp, START_BLOCK, + ProjectionElem, RetagKind, RuntimePhase, Rvalue, SourceScope, Statement, StatementKind, + Terminator, TerminatorKind, UnOp, START_BLOCK, }; use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeVisitable}; use rustc_mir_dataflow::impls::MaybeStorageLive; @@ -667,10 +667,13 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { self.fail(location, "`Deinit`is not allowed until deaggregation"); } } - StatementKind::Retag(_, _) => { + StatementKind::Retag(kind, _) => { // FIXME(JakobDegen) The validator should check that `self.mir_phase < // DropsLowered`. However, this causes ICEs with generation of drop shims, which // seem to fail to set their `MirPhase` correctly. + if *kind == RetagKind::Raw || *kind == RetagKind::TwoPhase { + self.fail(location, format!("explicit `{:?}` is forbidden", kind)); + } } StatementKind::StorageLive(..) | StatementKind::StorageDead(..) |
