diff options
| author | Dylan MacKenzie <ecstaticmorse@gmail.com> | 2018-06-21 17:31:09 -0700 |
|---|---|---|
| committer | Dylan MacKenzie <ecstaticmorse@gmail.com> | 2018-07-04 14:36:07 -0700 |
| commit | db025c14eccb435ffb6bc5d4f834b4551589447b (patch) | |
| tree | 1dbd33e4a01c30d9b8b50aa1805a65c418a51817 | |
| parent | 0ad8f9e5b1950e4ac09950def4231c8d5875de57 (diff) | |
| download | rust-db025c14eccb435ffb6bc5d4f834b4551589447b.tar.gz rust-db025c14eccb435ffb6bc5d4f834b4551589447b.zip | |
Refactor EvalContext stack and heap into inner struct
Change surrounding code to use accessor methods to refer to these fields. Similar changes have not yet been made in tools/miri
| -rw-r--r-- | src/librustc_mir/interpret/cast.rs | 8 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/const_eval.rs | 10 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/eval_context.rs | 138 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/memory.rs | 4 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/place.rs | 4 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/step.rs | 10 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/terminator/mod.rs | 8 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/traits.rs | 26 |
8 files changed, 110 insertions, 98 deletions
diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs index e69e7a522ab..4bf40091153 100644 --- a/src/librustc_mir/interpret/cast.rs +++ b/src/librustc_mir/interpret/cast.rs @@ -76,8 +76,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // No alignment check needed for raw pointers. But we have to truncate to target ptr size. TyRawPtr(_) => { Ok(Scalar::Bits { - bits: self.memory.truncate_to_ptr(v).0 as u128, - defined: self.memory.pointer_size().bits() as u8, + bits: self.memory().truncate_to_ptr(v).0 as u128, + defined: self.memory().pointer_size().bits() as u8, }) }, @@ -92,7 +92,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { match dest_ty.sty { // float -> uint TyUint(t) => { - let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize); + let width = t.bit_width().unwrap_or(self.memory().pointer_size().bits() as usize); match fty { FloatTy::F32 => Ok(Scalar::Bits { bits: Single::from_bits(bits).to_u128(width).value, @@ -106,7 +106,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { }, // float -> int TyInt(t) => { - let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize); + let width = t.bit_width().unwrap_or(self.memory().pointer_size().bits() as usize); match fty { FloatTy::F32 => Ok(Scalar::Bits { bits: Single::from_bits(bits).to_i128(width).value as u128, diff --git a/src/librustc_mir/interpret/const_eval.rs b/src/librustc_mir/interpret/const_eval.rs index ea09bab5d14..fe2c46aa48f 100644 --- a/src/librustc_mir/interpret/const_eval.rs +++ b/src/librustc_mir/interpret/const_eval.rs @@ -88,7 +88,7 @@ pub fn value_to_const_value<'tcx>( Value::ScalarPair(a, b) => Ok(ConstValue::ScalarPair(a, b)), Value::ByRef(ptr, align) => { let ptr = ptr.to_ptr().unwrap(); - let alloc = ecx.memory.get(ptr.alloc_id)?; + let alloc = ecx.memory().get(ptr.alloc_id)?; assert!(alloc.align.abi() >= align.abi()); assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= layout.size.bytes()); let mut alloc = alloc.clone(); @@ -149,7 +149,7 @@ fn eval_body_using_ecx<'a, 'mir, 'tcx>( } let layout = ecx.layout_of(mir.return_ty().subst(tcx, cid.instance.substs))?; assert!(!layout.is_unsized()); - let ptr = ecx.memory.allocate( + let ptr = ecx.memory_mut().allocate( layout.size, layout.align, MemoryKind::Stack, @@ -486,7 +486,7 @@ pub fn const_variant_index<'a, 'tcx>( let (ptr, align) = match value { Value::ScalarPair(..) | Value::Scalar(_) => { let layout = ecx.layout_of(val.ty)?; - let ptr = ecx.memory.allocate(layout.size, layout.align, MemoryKind::Stack)?.into(); + let ptr = ecx.memory_mut().allocate(layout.size, layout.align, MemoryKind::Stack)?.into(); ecx.write_value_to_ptr(value, ptr, layout.align, val.ty)?; (ptr, layout.align) }, @@ -515,9 +515,9 @@ pub fn const_value_to_allocation_provider<'a, 'tcx>( ()); let value = ecx.const_to_value(val.val)?; let layout = ecx.layout_of(val.ty)?; - let ptr = ecx.memory.allocate(layout.size, layout.align, MemoryKind::Stack)?; + let ptr = ecx.memory_mut().allocate(layout.size, layout.align, MemoryKind::Stack)?; ecx.write_value_to_ptr(value, ptr.into(), layout.align, val.ty)?; - let alloc = ecx.memory.get(ptr.alloc_id)?; + let alloc = ecx.memory().get(ptr.alloc_id)?; Ok(tcx.intern_const_alloc(alloc.clone())) }; result().expect("unable to convert ConstValue to Allocation") diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index 031c75013a2..fe95b9319f3 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -32,11 +32,8 @@ pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { /// Bounds in scope for polymorphic evaluations. pub param_env: ty::ParamEnv<'tcx>, - /// The virtual memory system. - pub memory: Memory<'a, 'mir, 'tcx, M>, - - /// The virtual call stack. - pub(crate) stack: Vec<Frame<'mir, 'tcx>>, + /// Virtual memory and call stack. + state: EvalState<'a, 'mir, 'tcx, M>, /// The maximum number of stack frames allowed pub(crate) stack_limit: usize, @@ -47,6 +44,14 @@ pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { pub(crate) terminators_remaining: usize, } +struct EvalState<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { + /// The virtual memory system. + memory: Memory<'a, 'mir, 'tcx, M>, + + /// The virtual call stack. + stack: Vec<Frame<'mir, 'tcx>>, +} + /// A stack frame. pub struct Frame<'mir, 'tcx: 'mir> { //////////////////////////////////////////////////////////////////////////////// @@ -186,18 +191,20 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M machine, tcx, param_env, - memory: Memory::new(tcx, memory_data), - stack: Vec::new(), + state: EvalState { + memory: Memory::new(tcx, memory_data), + stack: Vec::new(), + }, stack_limit: tcx.sess.const_eval_stack_frame_limit, terminators_remaining: MAX_TERMINATORS, } } pub(crate) fn with_fresh_body<F: FnOnce(&mut Self) -> R, R>(&mut self, f: F) -> R { - let stack = mem::replace(&mut self.stack, Vec::new()); + let stack = mem::replace(self.stack_mut(), Vec::new()); let terminators_remaining = mem::replace(&mut self.terminators_remaining, MAX_TERMINATORS); let r = f(self); - self.stack = stack; + *self.stack_mut() = stack; self.terminators_remaining = terminators_remaining; r } @@ -206,29 +213,34 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let layout = self.layout_of(ty)?; assert!(!layout.is_unsized(), "cannot alloc memory for unsized type"); - self.memory.allocate(layout.size, layout.align, MemoryKind::Stack) + self.memory_mut().allocate(layout.size, layout.align, MemoryKind::Stack) } pub fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> { - &self.memory + &self.state.memory } pub fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> { - &mut self.memory + &mut self.state.memory } + #[inline] pub fn stack(&self) -> &[Frame<'mir, 'tcx>] { - &self.stack + &self.state.stack + } + + pub fn stack_mut(&mut self) -> &mut Vec<Frame<'mir, 'tcx>> { + &mut self.state.stack } #[inline] pub fn cur_frame(&self) -> usize { - assert!(self.stack.len() > 0); - self.stack.len() - 1 + assert!(self.stack().len() > 0); + self.stack().len() - 1 } pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> { - let ptr = self.memory.allocate_bytes(s.as_bytes()); + let ptr = self.memory_mut().allocate_bytes(s.as_bytes()); Ok(Scalar::Ptr(ptr).to_value_with_len(s.len() as u64, self.tcx.tcx)) } @@ -246,7 +258,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } ConstValue::ByRef(alloc, offset) => { // FIXME: Allocate new AllocId for all constants inside - let id = self.memory.allocate_value(alloc.clone(), MemoryKind::Stack)?; + let id = self.memory_mut().allocate_value(alloc.clone(), MemoryKind::Stack)?; Ok(Value::ByRef(Pointer::new(id, offset).into(), alloc.align)) }, ConstValue::ScalarPair(a, b) => Ok(Value::ScalarPair(a, b)), @@ -417,7 +429,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M IndexVec::new() }; - self.stack.push(Frame { + self.stack_mut().push(Frame { mir, block: mir::START_BLOCK, return_to_block, @@ -428,9 +440,9 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M stmt: 0, }); - self.memory.cur_frame = self.cur_frame(); + self.memory_mut().cur_frame = self.cur_frame(); - if self.stack.len() > self.stack_limit { + if self.stack().len() > self.stack_limit { err!(StackFrameLimitReached) } else { Ok(()) @@ -440,18 +452,18 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> { ::log_settings::settings().indentation -= 1; M::end_region(self, None)?; - let frame = self.stack.pop().expect( + let frame = self.stack_mut().pop().expect( "tried to pop a stack frame, but there were none", ); - if !self.stack.is_empty() { + if !self.stack().is_empty() { // TODO: Is this the correct time to start considering these accesses as originating from the returned-to stack frame? - self.memory.cur_frame = self.cur_frame(); + self.memory_mut().cur_frame = self.cur_frame(); } match frame.return_to_block { StackPopCleanup::MarkStatic(mutable) => { if let Place::Ptr { ptr, .. } = frame.return_place { // FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions - self.memory.mark_static_initialized( + self.memory_mut().mark_static_initialized( ptr.to_ptr()?.alloc_id, mutable, )? @@ -474,8 +486,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M if let Some(Value::ByRef(ptr, _align)) = local { trace!("deallocating local"); let ptr = ptr.to_ptr()?; - self.memory.dump_alloc(ptr.alloc_id); - self.memory.deallocate_local(ptr)?; + self.memory().dump_alloc(ptr.alloc_id); + self.memory_mut().deallocate_local(ptr)?; }; Ok(()) } @@ -595,7 +607,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let src = self.eval_place(place)?; let ty = self.place_ty(place); let (_, len) = src.elem_ty_and_len(ty, self.tcx.tcx); - let defined = self.memory.pointer_size().bits() as u8; + let defined = self.memory().pointer_size().bits() as u8; self.write_scalar( dest, Scalar::Bits { @@ -637,7 +649,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let layout = self.layout_of(ty)?; assert!(!layout.is_unsized(), "SizeOf nullary MIR operator called for unsized type"); - let defined = self.memory.pointer_size().bits() as u8; + let defined = self.memory().pointer_size().bits() as u8; self.write_scalar( dest, Scalar::Bits { @@ -732,7 +744,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M def_id, substs, ).ok_or_else(|| EvalErrorKind::TooGeneric.into()); - let fn_ptr = self.memory.create_fn_alloc(instance?); + let fn_ptr = self.memory_mut().create_fn_alloc(instance?); let valty = ValTy { value: Value::Scalar(fn_ptr.into()), ty: dest_ty, @@ -768,7 +780,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M substs, ty::ClosureKind::FnOnce, ); - let fn_ptr = self.memory.create_fn_alloc(instance); + let fn_ptr = self.memory_mut().create_fn_alloc(instance); let valty = ValTy { value: Value::Scalar(fn_ptr.into()), ty: dest_ty, @@ -1045,7 +1057,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M pub fn force_allocation(&mut self, place: Place) -> EvalResult<'tcx, Place> { let new_place = match place { Place::Local { frame, local } => { - match self.stack[frame].locals[local] { + match self.stack()[frame].locals[local] { None => return err!(DeadLocal), Some(Value::ByRef(ptr, align)) => { Place::Ptr { @@ -1055,11 +1067,11 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } } Some(val) => { - let ty = self.stack[frame].mir.local_decls[local].ty; - let ty = self.monomorphize(ty, self.stack[frame].instance.substs); + let ty = self.stack()[frame].mir.local_decls[local].ty; + let ty = self.monomorphize(ty, self.stack()[frame].instance.substs); let layout = self.layout_of(ty)?; let ptr = self.alloc_ptr(ty)?; - self.stack[frame].locals[local] = + self.stack_mut()[frame].locals[local] = Some(Value::ByRef(ptr.into(), layout.align)); // it stays live let place = Place::from_ptr(ptr, layout.align); self.write_value(ValTy { value: val, ty }, place)?; @@ -1141,10 +1153,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } Place::Local { frame, local } => { - let dest = self.stack[frame].get_local(local)?; + let dest = self.stack()[frame].get_local(local)?; self.write_value_possibly_by_val( src_val, - |this, val| this.stack[frame].set_local(local, val), + |this, val| this.stack_mut()[frame].set_local(local, val), dest, dest_ty, ) @@ -1186,7 +1198,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } else { let dest_ptr = self.alloc_ptr(dest_ty)?.into(); let layout = self.layout_of(dest_ty)?; - self.memory.copy(src_ptr, align.min(layout.align), dest_ptr, layout.align, layout.size, false)?; + self.memory_mut().copy(src_ptr, align.min(layout.align), dest_ptr, layout.align, layout.size, false)?; write_dest(self, Value::ByRef(dest_ptr, layout.align))?; } } else { @@ -1208,7 +1220,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M trace!("write_value_to_ptr: {:#?}, {}, {:#?}", value, dest_ty, layout); match value { Value::ByRef(ptr, align) => { - self.memory.copy(ptr, align.min(layout.align), dest, dest_align.min(layout.align), layout.size, false) + self.memory_mut().copy(ptr, align.min(layout.align), dest, dest_align.min(layout.align), layout.size, false) } Value::Scalar(scalar) => { let signed = match layout.abi { @@ -1221,7 +1233,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M _ => bug!("write_value_to_ptr: invalid ByVal layout: {:#?}", layout), } }; - self.memory.write_scalar(dest, dest_align, scalar, layout.size, signed) + self.memory_mut().write_scalar(dest, dest_align, scalar, layout.size, signed) } Value::ScalarPair(a_val, b_val) => { trace!("write_value_to_ptr valpair: {:#?}", layout); @@ -1234,8 +1246,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let b_offset = a_size.abi_align(b.align(&self)); let b_ptr = dest.ptr_offset(b_offset, &self)?.into(); // TODO: What about signedess? - self.memory.write_scalar(a_ptr, dest_align, a_val, a_size, false)?; - self.memory.write_scalar(b_ptr, dest_align, b_val, b_size, false) + self.memory_mut().write_scalar(a_ptr, dest_align, a_val, a_size, false)?; + self.memory_mut().write_scalar(b_ptr, dest_align, b_val, b_size, false) } } } @@ -1266,8 +1278,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M ptr_align: Align, pointee_ty: Ty<'tcx>, ) -> EvalResult<'tcx, Value> { - let ptr_size = self.memory.pointer_size(); - let p: Scalar = self.memory.read_ptr_sized(ptr, ptr_align)?.into(); + let ptr_size = self.memory().pointer_size(); + let p: Scalar = self.memory().read_ptr_sized(ptr, ptr_align)?.into(); if self.type_is_sized(pointee_ty) { Ok(p.to_value()) } else { @@ -1275,11 +1287,11 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let extra = ptr.offset(ptr_size, self)?; match self.tcx.struct_tail(pointee_ty).sty { ty::TyDynamic(..) => Ok(p.to_value_with_vtable( - self.memory.read_ptr_sized(extra, ptr_align)?.to_ptr()?, + self.memory().read_ptr_sized(extra, ptr_align)?.to_ptr()?, )), ty::TySlice(..) | ty::TyStr => { let len = self - .memory + .memory() .read_ptr_sized(extra, ptr_align)? .to_bits(ptr_size)?; Ok(p.to_value_with_len(len as u64, self.tcx.tcx)) @@ -1297,10 +1309,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M ) -> EvalResult<'tcx> { match ty.sty { ty::TyBool => { - self.memory.read_scalar(ptr, ptr_align, Size::from_bytes(1))?.to_bool()?; + self.memory().read_scalar(ptr, ptr_align, Size::from_bytes(1))?.to_bool()?; } ty::TyChar => { - let c = self.memory.read_scalar(ptr, ptr_align, Size::from_bytes(4))?.to_bits(Size::from_bytes(4))? as u32; + let c = self.memory().read_scalar(ptr, ptr_align, Size::from_bytes(4))?.to_bits(Size::from_bytes(4))? as u32; match ::std::char::from_u32(c) { Some(..) => (), None => return err!(InvalidChar(c as u128)), @@ -1308,7 +1320,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } ty::TyFnPtr(_) => { - self.memory.read_ptr_sized(ptr, ptr_align)?; + self.memory().read_ptr_sized(ptr, ptr_align)?; }, ty::TyRef(_, rty, _) | ty::TyRawPtr(ty::TypeAndMut { ty: rty, .. }) => { @@ -1323,7 +1335,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M if let layout::Abi::Scalar(ref scalar) = self.layout_of(ty)?.abi { let size = scalar.value.size(self); - self.memory.read_scalar(ptr, ptr_align, size)?; + self.memory().read_scalar(ptr, ptr_align, size)?; } } @@ -1344,7 +1356,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M pub fn try_read_value(&self, ptr: Scalar, ptr_align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<Value>> { let layout = self.layout_of(ty)?; - self.memory.check_align(ptr, ptr_align)?; + self.memory().check_align(ptr, ptr_align)?; if layout.size.bytes() == 0 { return Ok(Some(Value::Scalar(Scalar::undef()))); @@ -1357,7 +1369,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M match layout.abi { layout::Abi::Scalar(..) => { - let scalar = self.memory.read_scalar(ptr, ptr_align, layout.size)?; + let scalar = self.memory().read_scalar(ptr, ptr_align, layout.size)?; Ok(Some(Value::Scalar(scalar))) } layout::Abi::ScalarPair(ref a, ref b) => { @@ -1366,8 +1378,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let a_ptr = ptr; let b_offset = a_size.abi_align(b.align(self)); let b_ptr = ptr.offset(b_offset, self)?.into(); - let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?; - let b_val = self.memory.read_scalar(b_ptr, ptr_align, b_size)?; + let a_val = self.memory().read_scalar(a_ptr, ptr_align, a_size)?; + let b_val = self.memory().read_scalar(b_ptr, ptr_align, b_size)?; Ok(Some(Value::ScalarPair(a_val, b_val))) } _ => Ok(None), @@ -1375,11 +1387,11 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } pub fn frame(&self) -> &Frame<'mir, 'tcx> { - self.stack.last().expect("no call frames exist") + self.stack().last().expect("no call frames exist") } pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx> { - self.stack.last_mut().expect("no call frames exist") + self.stack_mut().last_mut().expect("no call frames exist") } pub(super) fn mir(&self) -> &'mir mir::Mir<'tcx> { @@ -1387,7 +1399,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } pub fn substs(&self) -> &'tcx Substs<'tcx> { - if let Some(frame) = self.stack.last() { + if let Some(frame) = self.stack().last() { frame.instance.substs } else { Substs::empty() @@ -1533,7 +1545,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } write!(msg, ":").unwrap(); - match self.stack[frame].get_local(local) { + match self.stack()[frame].get_local(local) { Err(err) => { if let EvalErrorKind::DeadLocal = err.kind { write!(msg, " is dead").unwrap(); @@ -1568,13 +1580,13 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } trace!("{}", msg); - self.memory.dump_allocs(allocs); + self.memory().dump_allocs(allocs); } Place::Ptr { ptr, align, .. } => { match ptr { Scalar::Ptr(ptr) => { trace!("by align({}) ref:", align.abi()); - self.memory.dump_alloc(ptr.alloc_id); + self.memory().dump_alloc(ptr.alloc_id); } ptr => trace!(" integral by ref: {:?}", ptr), } @@ -1587,12 +1599,12 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M where F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>, { - let val = self.stack[frame].get_local(local)?; + let val = self.stack()[frame].get_local(local)?; let new_val = f(self, val)?; - self.stack[frame].set_local(local, new_val)?; + self.stack_mut()[frame].set_local(local, new_val)?; // FIXME(solson): Run this when setting to Undef? (See previous version of this code.) - // if let Value::ByRef(ptr) = self.stack[frame].get_local(local) { - // self.memory.deallocate(ptr)?; + // if let Value::ByRef(ptr) = self.stack()[frame].get_local(local) { + // self.memory().deallocate(ptr)?; // } Ok(()) } diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 5cf734cce8a..4d1188b0f15 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -991,12 +991,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for Me impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for EvalContext<'a, 'mir, 'tcx, M> { #[inline] fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> { - &mut self.memory + self.memory_mut() } #[inline] fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> { - &self.memory + self.memory() } } diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index 51b33fa54b2..95391bc8963 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -201,7 +201,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { assert_eq!(extra, PlaceExtra::None); Ok(Value::ByRef(ptr, align)) } - Place::Local { frame, local } => self.stack[frame].get_local(local), + Place::Local { frame, local } => self.stack()[frame].get_local(local), } } @@ -261,7 +261,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let (base_ptr, base_align, base_extra) = match base { Place::Ptr { ptr, align, extra } => (ptr, align, extra), Place::Local { frame, local } => { - match (&self.stack[frame].get_local(local)?, &base_layout.abi) { + match (&self.stack()[frame].get_local(local)?, &base_layout.abi) { // in case the field covers the entire type, just return the value (&Value::Scalar(_), &layout::Abi::Scalar(_)) | (&Value::ScalarPair(..), &layout::Abi::ScalarPair(..)) diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs index ab15278219f..86eb02312bd 100644 --- a/src/librustc_mir/interpret/step.rs +++ b/src/librustc_mir/interpret/step.rs @@ -19,7 +19,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { /// Returns true as long as there are more things to do. pub fn step(&mut self) -> EvalResult<'tcx, bool> { - if self.stack.is_empty() { + if self.stack().is_empty() { return Ok(false); } @@ -53,7 +53,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // *before* executing the statement. let frame_idx = self.cur_frame(); self.tcx.span = stmt.source_info.span; - self.memory.tcx.span = stmt.source_info.span; + self.memory_mut().tcx.span = stmt.source_info.span; match stmt.kind { Assign(ref place, ref rvalue) => self.eval_rvalue_into_place(rvalue, place)?, @@ -102,16 +102,16 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { InlineAsm { .. } => return err!(InlineAsm), } - self.stack[frame_idx].stmt += 1; + self.stack_mut()[frame_idx].stmt += 1; Ok(()) } fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> EvalResult<'tcx> { trace!("{:?}", terminator.kind); self.tcx.span = terminator.source_info.span; - self.memory.tcx.span = terminator.source_info.span; + self.memory_mut().tcx.span = terminator.source_info.span; self.eval_terminator(terminator)?; - if !self.stack.is_empty() { + if !self.stack().is_empty() { trace!("// {:?}", self.frame().block); } Ok(()) diff --git a/src/librustc_mir/interpret/terminator/mod.rs b/src/librustc_mir/interpret/terminator/mod.rs index 2994b1b387f..61670769d01 100644 --- a/src/librustc_mir/interpret/terminator/mod.rs +++ b/src/librustc_mir/interpret/terminator/mod.rs @@ -71,7 +71,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let (fn_def, sig) = match func.ty.sty { ty::TyFnPtr(sig) => { let fn_ptr = self.value_to_scalar(func)?.to_ptr()?; - let instance = self.memory.get_fn(fn_ptr)?; + let instance = self.memory().get_fn(fn_ptr)?; let instance_ty = instance.ty(*self.tcx); match instance_ty.sty { ty::TyFnDef(..) => { @@ -377,14 +377,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } // cannot use the shim here, because that will only result in infinite recursion ty::InstanceDef::Virtual(_, idx) => { - let ptr_size = self.memory.pointer_size(); + let ptr_size = self.memory().pointer_size(); let ptr_align = self.tcx.data_layout.pointer_align; let (ptr, vtable) = self.into_ptr_vtable_pair(args[0].value)?; - let fn_ptr = self.memory.read_ptr_sized( + let fn_ptr = self.memory().read_ptr_sized( vtable.offset(ptr_size * (idx as u64 + 3), &self)?, ptr_align )?.to_ptr()?; - let instance = self.memory.get_fn(fn_ptr)?; + let instance = self.memory().get_fn(fn_ptr)?; let mut args = args.to_vec(); let ty = self.layout_of(args[0].ty)?.field(&self, 0)?.ty; args[0].ty = ty; diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs index b6c7feda19f..2a9129498ad 100644 --- a/src/librustc_mir/interpret/traits.rs +++ b/src/librustc_mir/interpret/traits.rs @@ -25,26 +25,26 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let size = layout.size.bytes(); let align = layout.align.abi(); - let ptr_size = self.memory.pointer_size(); + let ptr_size = self.memory().pointer_size(); let ptr_align = self.tcx.data_layout.pointer_align; let methods = self.tcx.vtable_methods(trait_ref); - let vtable = self.memory.allocate( + let vtable = self.memory_mut().allocate( ptr_size * (3 + methods.len() as u64), ptr_align, MemoryKind::Stack, )?; let drop = ::monomorphize::resolve_drop_in_place(*self.tcx, ty); - let drop = self.memory.create_fn_alloc(drop); - self.memory.write_ptr_sized_unsigned(vtable, ptr_align, drop.into())?; + let drop = self.memory_mut().create_fn_alloc(drop); + self.memory_mut().write_ptr_sized_unsigned(vtable, ptr_align, drop.into())?; let size_ptr = vtable.offset(ptr_size, &self)?; - self.memory.write_ptr_sized_unsigned(size_ptr, ptr_align, Scalar::Bits { + self.memory_mut().write_ptr_sized_unsigned(size_ptr, ptr_align, Scalar::Bits { bits: size as u128, defined: ptr_size.bits() as u8, })?; let align_ptr = vtable.offset(ptr_size * 2, &self)?; - self.memory.write_ptr_sized_unsigned(align_ptr, ptr_align, Scalar::Bits { + self.memory_mut().write_ptr_sized_unsigned(align_ptr, ptr_align, Scalar::Bits { bits: align as u128, defined: ptr_size.bits() as u8, })?; @@ -52,13 +52,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { for (i, method) in methods.iter().enumerate() { if let Some((def_id, substs)) = *method { let instance = self.resolve(def_id, substs)?; - let fn_ptr = self.memory.create_fn_alloc(instance); + let fn_ptr = self.memory_mut().create_fn_alloc(instance); let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?; - self.memory.write_ptr_sized_unsigned(method_ptr, ptr_align, fn_ptr.into())?; + self.memory_mut().write_ptr_sized_unsigned(method_ptr, ptr_align, fn_ptr.into())?; } } - self.memory.mark_static_initialized( + self.memory_mut().mark_static_initialized( vtable.alloc_id, Mutability::Immutable, )?; @@ -76,7 +76,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { match self.read_ptr(vtable, pointer_align, self.tcx.mk_nil_ptr())? { // some values don't need to call a drop impl, so the value is null Value::Scalar(Scalar::Bits { bits: 0, defined} ) if defined == pointer_size => Ok(None), - Value::Scalar(Scalar::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some), + Value::Scalar(Scalar::Ptr(drop_fn)) => self.memory().get_fn(drop_fn).map(Some), _ => err!(ReadBytesAsPointer), } } @@ -85,10 +85,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { &self, vtable: Pointer, ) -> EvalResult<'tcx, (Size, Align)> { - let pointer_size = self.memory.pointer_size(); + let pointer_size = self.memory().pointer_size(); let pointer_align = self.tcx.data_layout.pointer_align; - let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bits(pointer_size)? as u64; - let align = self.memory.read_ptr_sized( + let size = self.memory().read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bits(pointer_size)? as u64; + let align = self.memory().read_ptr_sized( vtable.offset(pointer_size * 2, self)?, pointer_align )?.to_bits(pointer_size)? as u64; |
