diff options
Diffstat (limited to 'compiler/rustc_const_eval/src')
| -rw-r--r-- | compiler/rustc_const_eval/src/const_eval/machine.rs | 6 | ||||
| -rw-r--r-- | compiler/rustc_const_eval/src/errors.rs | 6 | ||||
| -rw-r--r-- | compiler/rustc_const_eval/src/interpret/eval_context.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_const_eval/src/interpret/intrinsics.rs | 76 | ||||
| -rw-r--r-- | compiler/rustc_const_eval/src/interpret/machine.rs | 25 | ||||
| -rw-r--r-- | compiler/rustc_const_eval/src/interpret/memory.rs | 36 | ||||
| -rw-r--r-- | compiler/rustc_const_eval/src/interpret/mod.rs | 1 | ||||
| -rw-r--r-- | compiler/rustc_const_eval/src/interpret/projection.rs | 6 | ||||
| -rw-r--r-- | compiler/rustc_const_eval/src/interpret/util.rs | 23 | ||||
| -rw-r--r-- | compiler/rustc_const_eval/src/interpret/validity.rs | 64 |
10 files changed, 177 insertions, 70 deletions
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs index 76fa744361a..52fc898192a 100644 --- a/compiler/rustc_const_eval/src/const_eval/machine.rs +++ b/compiler/rustc_const_eval/src/const_eval/machine.rs @@ -331,10 +331,10 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> { fn load_mir( ecx: &InterpCx<'tcx, Self>, instance: ty::InstanceKind<'tcx>, - ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> { + ) -> &'tcx mir::Body<'tcx> { match instance { - ty::InstanceKind::Item(def) => interp_ok(ecx.tcx.mir_for_ctfe(def)), - _ => interp_ok(ecx.tcx.instance_mir(instance)), + ty::InstanceKind::Item(def) => ecx.tcx.mir_for_ctfe(def), + _ => ecx.tcx.instance_mir(instance), } } diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs index 9133a5fc8ef..49cd7138748 100644 --- a/compiler/rustc_const_eval/src/errors.rs +++ b/compiler/rustc_const_eval/src/errors.rs @@ -475,6 +475,7 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> { WriteToReadOnly(_) => const_eval_write_to_read_only, DerefFunctionPointer(_) => const_eval_deref_function_pointer, DerefVTablePointer(_) => const_eval_deref_vtable_pointer, + DerefTypeIdPointer(_) => const_eval_deref_typeid_pointer, InvalidBool(_) => const_eval_invalid_bool, InvalidChar(_) => const_eval_invalid_char, InvalidTag(_) => const_eval_invalid_tag, @@ -588,7 +589,10 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> { diag.arg("has", has.bytes()); diag.arg("msg", format!("{msg:?}")); } - WriteToReadOnly(alloc) | DerefFunctionPointer(alloc) | DerefVTablePointer(alloc) => { + WriteToReadOnly(alloc) + | DerefFunctionPointer(alloc) + | DerefVTablePointer(alloc) + | DerefTypeIdPointer(alloc) => { diag.arg("allocation", alloc); } InvalidBool(b) => { diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs index 46c784b41c6..41fc8d47cd3 100644 --- a/compiler/rustc_const_eval/src/interpret/eval_context.rs +++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs @@ -96,7 +96,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// This inherent method takes priority over the trait method with the same name in LayoutOf, /// and allows wrapping the actual [LayoutOf::layout_of] with a tracing span. /// See [LayoutOf::layout_of] for the original documentation. - #[inline] + #[inline(always)] pub fn layout_of( &self, ty: Ty<'tcx>, @@ -272,7 +272,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { let def = instance.def_id(); &self.tcx.promoted_mir(def)[promoted] } else { - M::load_mir(self, instance)? + M::load_mir(self, instance) }; // do not continue if typeck errors occurred (can only occur in local crate) if let Some(err) = body.tainted_by_errors { diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index 378ed6d0e10..1eba1f2f03c 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -4,7 +4,7 @@ use std::assert_matches::assert_matches; -use rustc_abi::Size; +use rustc_abi::{FieldIdx, Size}; use rustc_apfloat::ieee::{Double, Half, Quad, Single}; use rustc_middle::mir::{self, BinOp, ConstValue, NonDivergingIntrinsic}; use rustc_middle::ty::layout::TyAndLayout; @@ -28,8 +28,35 @@ pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAll let alloc = Allocation::from_bytes_byte_aligned_immutable(path.into_bytes(), ()); tcx.mk_const_alloc(alloc) } - impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { + /// Generates a value of `TypeId` for `ty` in-place. + pub(crate) fn write_type_id( + &mut self, + ty: Ty<'tcx>, + dest: &PlaceTy<'tcx, M::Provenance>, + ) -> InterpResult<'tcx, ()> { + let tcx = self.tcx; + let type_id_hash = tcx.type_id_hash(ty).as_u128(); + let op = self.const_val_to_op( + ConstValue::Scalar(Scalar::from_u128(type_id_hash)), + tcx.types.u128, + None, + )?; + self.copy_op_allow_transmute(&op, dest)?; + + // Give the first pointer-size bytes provenance that knows about the type id. + // Here we rely on `TypeId` being a newtype around an array of pointers, so we + // first project to its only field and then the first array element. + let alloc_id = tcx.reserve_and_set_type_id_alloc(ty); + let first = self.project_field(dest, FieldIdx::ZERO)?; + let first = self.project_index(&first, 0)?; + let offset = self.read_scalar(&first)?.to_target_usize(&tcx)?; + let ptr = Pointer::new(alloc_id.into(), Size::from_bytes(offset)); + let ptr = self.global_root_pointer(ptr)?; + let val = Scalar::from_pointer(ptr, &tcx); + self.write_scalar(val, &first) + } + /// Returns `true` if emulation happened. /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own /// intrinsic handling. @@ -63,9 +90,48 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { sym::type_id => { let tp_ty = instance.args.type_at(0); ensure_monomorphic_enough(tcx, tp_ty)?; - let val = ConstValue::from_u128(tcx.type_id_hash(tp_ty).as_u128()); - let val = self.const_val_to_op(val, dest.layout.ty, Some(dest.layout))?; - self.copy_op(&val, dest)?; + self.write_type_id(tp_ty, dest)?; + } + sym::type_id_eq => { + // Both operands are `TypeId`, which is a newtype around an array of pointers. + // Project until we have the array elements. + let a_fields = self.project_field(&args[0], FieldIdx::ZERO)?; + let b_fields = self.project_field(&args[1], FieldIdx::ZERO)?; + + let mut a_fields = self.project_array_fields(&a_fields)?; + let mut b_fields = self.project_array_fields(&b_fields)?; + + let (_idx, a) = a_fields + .next(self)? + .expect("we know the layout of TypeId has at least 2 array elements"); + let a = self.deref_pointer(&a)?; + let (a, offset_a) = self.get_ptr_type_id(a.ptr())?; + + let (_idx, b) = b_fields + .next(self)? + .expect("we know the layout of TypeId has at least 2 array elements"); + let b = self.deref_pointer(&b)?; + let (b, offset_b) = self.get_ptr_type_id(b.ptr())?; + + let provenance_matches = a == b; + + let mut eq_id = offset_a == offset_b; + + while let Some((_, a)) = a_fields.next(self)? { + let (_, b) = b_fields.next(self)?.unwrap(); + + let a = self.read_target_usize(&a)?; + let b = self.read_target_usize(&b)?; + eq_id &= a == b; + } + + if !eq_id && provenance_matches { + throw_ub_format!( + "type_id_eq: one of the TypeId arguments is invalid, the hash does not match the type it represents" + ) + } + + self.write_scalar(Scalar::from_bool(provenance_matches), dest)?; } sym::variant_count => { let tp_ty = instance.args.type_at(0); diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs index 35ec303f961..d150ed69250 100644 --- a/compiler/rustc_const_eval/src/interpret/machine.rs +++ b/compiler/rustc_const_eval/src/interpret/machine.rs @@ -18,8 +18,8 @@ use rustc_target::callconv::FnAbi; use super::{ AllocBytes, AllocId, AllocKind, AllocRange, Allocation, CTFE_ALLOC_SALT, ConstAllocation, - CtfeProvenance, FnArg, Frame, ImmTy, InterpCx, InterpResult, MPlaceTy, MemoryKind, - Misalignment, OpTy, PlaceTy, Pointer, Provenance, RangeSet, interp_ok, throw_unsup, + CtfeProvenance, EnteredTraceSpan, FnArg, Frame, ImmTy, InterpCx, InterpResult, MPlaceTy, + MemoryKind, Misalignment, OpTy, PlaceTy, Pointer, Provenance, RangeSet, interp_ok, throw_unsup, }; /// Data returned by [`Machine::after_stack_pop`], and consumed by @@ -147,12 +147,6 @@ pub trait Machine<'tcx>: Sized { /// already been checked before. const ALL_CONSTS_ARE_PRECHECKED: bool = true; - /// Determines whether rustc_const_eval functions that make use of the [Machine] should make - /// tracing calls (to the `tracing` library). By default this is `false`, meaning the tracing - /// calls will supposedly be optimized out. This flag is set to `true` inside Miri, to allow - /// tracing the interpretation steps, among other things. - const TRACING_ENABLED: bool = false; - /// Whether memory accesses should be alignment-checked. fn enforce_alignment(ecx: &InterpCx<'tcx, Self>) -> bool; @@ -189,8 +183,8 @@ pub trait Machine<'tcx>: Sized { fn load_mir( ecx: &InterpCx<'tcx, Self>, instance: ty::InstanceKind<'tcx>, - ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> { - interp_ok(ecx.tcx.instance_mir(instance)) + ) -> &'tcx mir::Body<'tcx> { + ecx.tcx.instance_mir(instance) } /// Entry point to all function calls. @@ -634,6 +628,17 @@ pub trait Machine<'tcx>: Sized { /// Compute the value passed to the constructors of the `AllocBytes` type for /// abstract machine allocations. fn get_default_alloc_params(&self) -> <Self::Bytes as AllocBytes>::AllocParams; + + /// Allows enabling/disabling tracing calls from within `rustc_const_eval` at compile time, by + /// delegating the entering of [tracing::Span]s to implementors of the [Machine] trait. The + /// default implementation corresponds to tracing being disabled, meaning the tracing calls will + /// supposedly be optimized out completely. To enable tracing, override this trait method and + /// return `span.entered()`. Also see [crate::enter_trace_span]. + #[must_use] + #[inline(always)] + fn enter_trace_span(_span: impl FnOnce() -> tracing::Span) -> impl EnteredTraceSpan { + () + } } /// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index c97d53a45de..6414821e21d 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -15,9 +15,9 @@ use std::{fmt, ptr}; use rustc_abi::{Align, HasDataLayout, Size}; use rustc_ast::Mutability; use rustc_data_structures::fx::{FxHashSet, FxIndexMap}; -use rustc_middle::bug; use rustc_middle::mir::display_allocation; use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; +use rustc_middle::{bug, throw_ub_format}; use tracing::{debug, instrument, trace}; use super::{ @@ -346,6 +346,13 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { kind = "vtable", ) } + Some(GlobalAlloc::TypeId { .. }) => { + err_ub_custom!( + fluent::const_eval_invalid_dealloc, + alloc_id = alloc_id, + kind = "typeid", + ) + } Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => { err_ub_custom!( fluent::const_eval_invalid_dealloc, @@ -615,6 +622,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { } Some(GlobalAlloc::Function { .. }) => throw_ub!(DerefFunctionPointer(id)), Some(GlobalAlloc::VTable(..)) => throw_ub!(DerefVTablePointer(id)), + Some(GlobalAlloc::TypeId { .. }) => throw_ub!(DerefTypeIdPointer(id)), None => throw_ub!(PointerUseAfterFree(id, CheckInAllocMsg::MemoryAccess)), Some(GlobalAlloc::Static(def_id)) => { assert!(self.tcx.is_static(def_id)); @@ -896,7 +904,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { let (size, align) = global_alloc.size_and_align(*self.tcx, self.typing_env); let mutbl = global_alloc.mutability(*self.tcx, self.typing_env); let kind = match global_alloc { - GlobalAlloc::Static { .. } | GlobalAlloc::Memory { .. } => AllocKind::LiveData, + GlobalAlloc::TypeId { .. } + | GlobalAlloc::Static { .. } + | GlobalAlloc::Memory { .. } => AllocKind::LiveData, GlobalAlloc::Function { .. } => bug!("We already checked function pointers above"), GlobalAlloc::VTable { .. } => AllocKind::VTable, }; @@ -936,6 +946,19 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { } } + /// Takes a pointer that is the first chunk of a `TypeId` and return the type that its + /// provenance refers to, as well as the segment of the hash that this pointer covers. + pub fn get_ptr_type_id( + &self, + ptr: Pointer<Option<M::Provenance>>, + ) -> InterpResult<'tcx, (Ty<'tcx>, Size)> { + let (alloc_id, offset, _meta) = self.ptr_get_alloc_id(ptr, 0)?; + let GlobalAlloc::TypeId { ty } = self.tcx.global_alloc(alloc_id) else { + throw_ub_format!("type_id_eq: `TypeId` provenance is not a type id") + }; + interp_ok((ty, offset)) + } + pub fn get_ptr_fn( &self, ptr: Pointer<Option<M::Provenance>>, @@ -1197,6 +1220,9 @@ impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debug for DumpAllocs<'a, 'tcx, M> { Some(GlobalAlloc::VTable(ty, dyn_ty)) => { write!(fmt, " (vtable: impl {dyn_ty} for {ty})")?; } + Some(GlobalAlloc::TypeId { ty }) => { + write!(fmt, " (typeid for {ty})")?; + } Some(GlobalAlloc::Static(did)) => { write!(fmt, " (static: {})", self.ecx.tcx.def_path_str(did))?; } @@ -1233,7 +1259,7 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> /// `offset` is relative to this allocation reference, not the base of the allocation. pub fn write_ptr_sized(&mut self, offset: Size, val: Scalar<Prov>) -> InterpResult<'tcx> { - self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size), val) + self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size()), val) } /// Mark the given sub-range (relative to this allocation reference) as uninitialized. @@ -1285,7 +1311,7 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Pr /// `offset` is relative to this allocation reference, not the base of the allocation. pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, Scalar<Prov>> { self.read_scalar( - alloc_range(offset, self.tcx.data_layout().pointer_size), + alloc_range(offset, self.tcx.data_layout().pointer_size()), /*read_provenance*/ true, ) } @@ -1472,7 +1498,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { dest_alloc .write_uninit(&tcx, dest_range) .map_err(|e| e.to_interp_error(dest_alloc_id))?; - // We can forget about the provenance, this is all not initialized anyway. + // `write_uninit` also resets the provenance, so we are done. return interp_ok(()); } diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs index 8303f891f98..2fc372dd019 100644 --- a/compiler/rustc_const_eval/src/interpret/mod.rs +++ b/compiler/rustc_const_eval/src/interpret/mod.rs @@ -37,6 +37,7 @@ pub use self::place::{MPlaceTy, MemPlaceMeta, PlaceTy, Writeable}; use self::place::{MemPlace, Place}; pub use self::projection::{OffsetMode, Projectable}; pub use self::stack::{Frame, FrameInfo, LocalState, ReturnContinuation, StackPopInfo}; +pub use self::util::EnteredTraceSpan; pub(crate) use self::util::create_static_alloc; pub use self::validity::{CtfeValidationMode, RangeSet, RefTracking}; pub use self::visitor::ValueVisitor; diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs index 306697d4ec9..f72c4418081 100644 --- a/compiler/rustc_const_eval/src/interpret/projection.rs +++ b/compiler/rustc_const_eval/src/interpret/projection.rs @@ -296,7 +296,11 @@ where base: &'a P, ) -> InterpResult<'tcx, ArrayIterator<'a, 'tcx, M::Provenance, P>> { let abi::FieldsShape::Array { stride, .. } = base.layout().fields else { - span_bug!(self.cur_span(), "project_array_fields: expected an array layout"); + span_bug!( + self.cur_span(), + "project_array_fields: expected an array layout, got {:#?}", + base.layout() + ); }; let len = base.len(self)?; let field_layout = base.layout().field(self, 0); diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs index 99add01f95c..72650d545c3 100644 --- a/compiler/rustc_const_eval/src/interpret/util.rs +++ b/compiler/rustc_const_eval/src/interpret/util.rs @@ -46,21 +46,20 @@ pub(crate) fn create_static_alloc<'tcx>( interp_ok(ecx.ptr_to_mplace(Pointer::from(alloc_id).into(), layout)) } -/// This struct is needed to enforce `#[must_use]` on [tracing::span::EnteredSpan] -/// while wrapping them in an `Option`. -#[must_use] -pub enum MaybeEnteredSpan { - Some(tracing::span::EnteredSpan), - None, -} +/// A marker trait returned by [crate::interpret::Machine::enter_trace_span], identifying either a +/// real [tracing::span::EnteredSpan] in case tracing is enabled, or the dummy type `()` when +/// tracing is disabled. +pub trait EnteredTraceSpan {} +impl EnteredTraceSpan for () {} +impl EnteredTraceSpan for tracing::span::EnteredSpan {} +/// Shortand for calling [crate::interpret::Machine::enter_trace_span] on a [tracing::info_span]. +/// This is supposed to be compiled out when [crate::interpret::Machine::enter_trace_span] has the +/// default implementation (i.e. when it does not actually enter the span but instead returns `()`). +/// Note: the result of this macro **must be used** because the span is exited when it's dropped. #[macro_export] macro_rules! enter_trace_span { ($machine:ident, $($tt:tt)*) => { - if $machine::TRACING_ENABLED { - $crate::interpret::util::MaybeEnteredSpan::Some(tracing::info_span!($($tt)*).entered()) - } else { - $crate::interpret::util::MaybeEnteredSpan::None - } + $machine::enter_trace_span(|| tracing::info_span!($($tt)*)) } } diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index fc4d13af8c4..fc44490c96d 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -571,40 +571,42 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { let alloc_actual_mutbl = global_alloc.mutability(*self.ecx.tcx, self.ecx.typing_env); - if let GlobalAlloc::Static(did) = global_alloc { - let DefKind::Static { nested, .. } = self.ecx.tcx.def_kind(did) else { - bug!() - }; - // Special handling for pointers to statics (irrespective of their type). - assert!(!self.ecx.tcx.is_thread_local_static(did)); - assert!(self.ecx.tcx.is_static(did)); - // Mode-specific checks - match ctfe_mode { - CtfeValidationMode::Static { .. } - | CtfeValidationMode::Promoted { .. } => { - // We skip recursively checking other statics. These statics must be sound by - // themselves, and the only way to get broken statics here is by using - // unsafe code. - // The reasons we don't check other statics is twofold. For one, in all - // sound cases, the static was already validated on its own, and second, we - // trigger cycle errors if we try to compute the value of the other static - // and that static refers back to us (potentially through a promoted). - // This could miss some UB, but that's fine. - // We still walk nested allocations, as they are fundamentally part of this validation run. - // This means we will also recurse into nested statics of *other* - // statics, even though we do not recurse into other statics directly. - // That's somewhat inconsistent but harmless. - skip_recursive_check = !nested; - } - CtfeValidationMode::Const { .. } => { - // If this is mutable memory or an `extern static`, there's no point in checking it -- we'd - // just get errors trying to read the value. - if alloc_actual_mutbl.is_mut() || self.ecx.tcx.is_foreign_item(did) - { - skip_recursive_check = true; + match global_alloc { + GlobalAlloc::Static(did) => { + let DefKind::Static { nested, .. } = self.ecx.tcx.def_kind(did) else { + bug!() + }; + assert!(!self.ecx.tcx.is_thread_local_static(did)); + assert!(self.ecx.tcx.is_static(did)); + match ctfe_mode { + CtfeValidationMode::Static { .. } + | CtfeValidationMode::Promoted { .. } => { + // We skip recursively checking other statics. These statics must be sound by + // themselves, and the only way to get broken statics here is by using + // unsafe code. + // The reasons we don't check other statics is twofold. For one, in all + // sound cases, the static was already validated on its own, and second, we + // trigger cycle errors if we try to compute the value of the other static + // and that static refers back to us (potentially through a promoted). + // This could miss some UB, but that's fine. + // We still walk nested allocations, as they are fundamentally part of this validation run. + // This means we will also recurse into nested statics of *other* + // statics, even though we do not recurse into other statics directly. + // That's somewhat inconsistent but harmless. + skip_recursive_check = !nested; + } + CtfeValidationMode::Const { .. } => { + // If this is mutable memory or an `extern static`, there's no point in checking it -- we'd + // just get errors trying to read the value. + if alloc_actual_mutbl.is_mut() + || self.ecx.tcx.is_foreign_item(did) + { + skip_recursive_check = true; + } } } } + _ => (), } // If this allocation has size zero, there is no actual mutability here. |
