diff options
Diffstat (limited to 'compiler/rustc_mir_transform/src')
| -rw-r--r-- | compiler/rustc_mir_transform/src/dataflow_const_prop.rs | 45 | ||||
| -rw-r--r-- | compiler/rustc_mir_transform/src/gvn.rs | 84 | ||||
| -rw-r--r-- | compiler/rustc_mir_transform/src/jump_threading.rs | 39 | ||||
| -rw-r--r-- | compiler/rustc_mir_transform/src/known_panics_lint.rs | 43 |
4 files changed, 114 insertions, 97 deletions
diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs index 88dc8e74a8c..002216f50f2 100644 --- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs +++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs @@ -3,7 +3,9 @@ //! Currently, this pass only propagates scalar values. use rustc_const_eval::const_eval::{DummyMachine, throw_machine_stop_str}; -use rustc_const_eval::interpret::{ImmTy, Immediate, InterpCx, OpTy, PlaceTy, Projectable}; +use rustc_const_eval::interpret::{ + ImmTy, Immediate, InterpCx, OpTy, PlaceTy, Projectable, interp_ok, +}; use rustc_data_structures::fx::FxHashMap; use rustc_hir::def::DefKind; use rustc_middle::bug; @@ -236,6 +238,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> { FlatSet::Elem(op) => self .ecx .int_to_int_or_float(&op, layout) + .discard_err() .map_or(FlatSet::Top, |result| self.wrap_immediate(*result)), FlatSet::Bottom => FlatSet::Bottom, FlatSet::Top => FlatSet::Top, @@ -249,6 +252,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> { FlatSet::Elem(op) => self .ecx .float_to_float_or_int(&op, layout) + .discard_err() .map_or(FlatSet::Top, |result| self.wrap_immediate(*result)), FlatSet::Bottom => FlatSet::Bottom, FlatSet::Top => FlatSet::Top, @@ -271,6 +275,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> { FlatSet::Elem(value) => self .ecx .unary_op(*op, &value) + .discard_err() .map_or(FlatSet::Top, |val| self.wrap_immediate(*val)), FlatSet::Bottom => FlatSet::Bottom, FlatSet::Top => FlatSet::Top, @@ -364,8 +369,8 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { } } Operand::Constant(box constant) => { - if let Ok(constant) = - self.ecx.eval_mir_constant(&constant.const_, constant.span, None) + if let Some(constant) = + self.ecx.eval_mir_constant(&constant.const_, constant.span, None).discard_err() { self.assign_constant(state, place, constant, &[]); } @@ -387,7 +392,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { for &(mut proj_elem) in projection { if let PlaceElem::Index(index) = proj_elem { if let FlatSet::Elem(index) = state.get(index.into(), &self.map) - && let Ok(offset) = index.to_target_usize(&self.tcx) + && let Some(offset) = index.to_target_usize(&self.tcx).discard_err() && let Some(min_length) = offset.checked_add(1) { proj_elem = PlaceElem::ConstantIndex { offset, min_length, from_end: false }; @@ -395,7 +400,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { return; } } - operand = if let Ok(operand) = self.ecx.project(&operand, proj_elem) { + operand = if let Some(operand) = self.ecx.project(&operand, proj_elem).discard_err() { operand } else { return; @@ -406,24 +411,24 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { place, operand, &mut |elem, op| match elem { - TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).ok(), - TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).ok(), + TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).discard_err(), + TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).discard_err(), TrackElem::Discriminant => { - let variant = self.ecx.read_discriminant(op).ok()?; + let variant = self.ecx.read_discriminant(op).discard_err()?; let discr_value = - self.ecx.discriminant_for_variant(op.layout.ty, variant).ok()?; + self.ecx.discriminant_for_variant(op.layout.ty, variant).discard_err()?; Some(discr_value.into()) } TrackElem::DerefLen => { - let op: OpTy<'_> = self.ecx.deref_pointer(op).ok()?.into(); - let len_usize = op.len(&self.ecx).ok()?; + let op: OpTy<'_> = self.ecx.deref_pointer(op).discard_err()?.into(); + let len_usize = op.len(&self.ecx).discard_err()?; let layout = self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).unwrap(); Some(ImmTy::from_uint(len_usize, layout).into()) } }, &mut |place, op| { - if let Ok(imm) = self.ecx.read_immediate_raw(op) + if let Some(imm) = self.ecx.read_immediate_raw(op).discard_err() && let Some(imm) = imm.right() { let elem = self.wrap_immediate(*imm); @@ -447,11 +452,11 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { (FlatSet::Bottom, _) | (_, FlatSet::Bottom) => (FlatSet::Bottom, FlatSet::Bottom), // Both sides are known, do the actual computation. (FlatSet::Elem(left), FlatSet::Elem(right)) => { - match self.ecx.binary_op(op, &left, &right) { + match self.ecx.binary_op(op, &left, &right).discard_err() { // Ideally this would return an Immediate, since it's sometimes // a pair and sometimes not. But as a hack we always return a pair // and just make the 2nd component `Bottom` when it does not exist. - Ok(val) => { + Some(val) => { if matches!(val.layout.abi, Abi::ScalarPair(..)) { let (val, overflow) = val.to_scalar_pair(); (FlatSet::Elem(val), FlatSet::Elem(overflow)) @@ -470,7 +475,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { } let arg_scalar = const_arg.to_scalar(); - let Ok(arg_value) = arg_scalar.to_bits(layout.size) else { + let Some(arg_value) = arg_scalar.to_bits(layout.size).discard_err() else { return (FlatSet::Top, FlatSet::Top); }; @@ -519,7 +524,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { } let enum_ty_layout = self.tcx.layout_of(self.param_env.and(enum_ty)).ok()?; let discr_value = - self.ecx.discriminant_for_variant(enum_ty_layout.ty, variant_index).ok()?; + self.ecx.discriminant_for_variant(enum_ty_layout.ty, variant_index).discard_err()?; Some(discr_value.to_scalar()) } @@ -595,7 +600,7 @@ impl<'a, 'tcx> Collector<'a, 'tcx> { .intern_with_temp_alloc(layout, |ecx, dest| { try_write_constant(ecx, dest, place, ty, state, map) }) - .ok()?; + .discard_err()?; return Some(Const::Val(ConstValue::Indirect { alloc_id, offset: Size::ZERO }, ty)); } @@ -632,7 +637,7 @@ fn try_write_constant<'tcx>( // Fast path for ZSTs. if layout.is_zst() { - return Ok(()); + return interp_ok(()); } // Fast path for scalars. @@ -717,7 +722,7 @@ fn try_write_constant<'tcx>( ty::Error(_) | ty::Infer(..) | ty::CoroutineWitness(..) => bug!(), } - Ok(()) + interp_ok(()) } impl<'mir, 'tcx> @@ -830,7 +835,7 @@ impl<'tcx> MutVisitor<'tcx> for Patch<'tcx> { if let PlaceElem::Index(local) = elem { let offset = self.before_effect.get(&(location, local.into()))?; let offset = offset.try_to_scalar()?; - let offset = offset.to_target_usize(&self.tcx).ok()?; + let offset = offset.to_target_usize(&self.tcx).discard_err()?; let min_length = offset.checked_add(1)?; Some(PlaceElem::ConstantIndex { offset, min_length, from_end: false }) } else { diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index f735d08fca5..50c9702cb9b 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -393,7 +393,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { Repeat(..) => return None, Constant { ref value, disambiguator: _ } => { - self.ecx.eval_mir_constant(value, DUMMY_SP, None).ok()? + self.ecx.eval_mir_constant(value, DUMMY_SP, None).discard_err()? } Aggregate(kind, variant, ref fields) => { let fields = fields @@ -419,29 +419,32 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { ImmTy::uninit(ty).into() } else if matches!(kind, AggregateTy::RawPtr { .. }) { // Pointers don't have fields, so don't `project_field` them. - let data = self.ecx.read_pointer(fields[0]).ok()?; + let data = self.ecx.read_pointer(fields[0]).discard_err()?; let meta = if fields[1].layout.is_zst() { MemPlaceMeta::None } else { - MemPlaceMeta::Meta(self.ecx.read_scalar(fields[1]).ok()?) + MemPlaceMeta::Meta(self.ecx.read_scalar(fields[1]).discard_err()?) }; let ptr_imm = Immediate::new_pointer_with_meta(data, meta, &self.ecx); ImmTy::from_immediate(ptr_imm, ty).into() } else if matches!(ty.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) { - let dest = self.ecx.allocate(ty, MemoryKind::Stack).ok()?; + let dest = self.ecx.allocate(ty, MemoryKind::Stack).discard_err()?; let variant_dest = if let Some(variant) = variant { - self.ecx.project_downcast(&dest, variant).ok()? + self.ecx.project_downcast(&dest, variant).discard_err()? } else { dest.clone() }; for (field_index, op) in fields.into_iter().enumerate() { - let field_dest = self.ecx.project_field(&variant_dest, field_index).ok()?; - self.ecx.copy_op(op, &field_dest).ok()?; + let field_dest = + self.ecx.project_field(&variant_dest, field_index).discard_err()?; + self.ecx.copy_op(op, &field_dest).discard_err()?; } - self.ecx.write_discriminant(variant.unwrap_or(FIRST_VARIANT), &dest).ok()?; + self.ecx + .write_discriminant(variant.unwrap_or(FIRST_VARIANT), &dest) + .discard_err()?; self.ecx .alloc_mark_immutable(dest.ptr().provenance.unwrap().alloc_id()) - .ok()?; + .discard_err()?; dest.into() } else { return None; @@ -467,7 +470,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { // This should have been replaced by a `ConstantIndex` earlier. ProjectionElem::Index(_) => return None, }; - self.ecx.project(value, elem).ok()? + self.ecx.project(value, elem).discard_err()? } Address { place, kind, provenance: _ } => { if !place.is_indirect_first_projection() { @@ -475,14 +478,14 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } let local = self.locals[place.local]?; let pointer = self.evaluated[local].as_ref()?; - let mut mplace = self.ecx.deref_pointer(pointer).ok()?; + let mut mplace = self.ecx.deref_pointer(pointer).discard_err()?; for proj in place.projection.iter().skip(1) { // We have no call stack to associate a local with a value, so we cannot // interpret indexing. if matches!(proj, ProjectionElem::Index(_)) { return None; } - mplace = self.ecx.project(&mplace, proj).ok()?; + mplace = self.ecx.project(&mplace, proj).discard_err()?; } let pointer = mplace.to_ref(&self.ecx); let ty = match kind { @@ -500,15 +503,15 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { Discriminant(base) => { let base = self.evaluated[base].as_ref()?; - let variant = self.ecx.read_discriminant(base).ok()?; + let variant = self.ecx.read_discriminant(base).discard_err()?; let discr_value = - self.ecx.discriminant_for_variant(base.layout.ty, variant).ok()?; + self.ecx.discriminant_for_variant(base.layout.ty, variant).discard_err()?; discr_value.into() } Len(slice) => { let slice = self.evaluated[slice].as_ref()?; let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap(); - let len = slice.len(&self.ecx).ok()?; + let len = slice.len(&self.ecx).discard_err()?; let imm = ImmTy::from_uint(len, usize_layout); imm.into() } @@ -535,31 +538,31 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } UnaryOp(un_op, operand) => { let operand = self.evaluated[operand].as_ref()?; - let operand = self.ecx.read_immediate(operand).ok()?; - let val = self.ecx.unary_op(un_op, &operand).ok()?; + let operand = self.ecx.read_immediate(operand).discard_err()?; + let val = self.ecx.unary_op(un_op, &operand).discard_err()?; val.into() } BinaryOp(bin_op, lhs, rhs) => { let lhs = self.evaluated[lhs].as_ref()?; - let lhs = self.ecx.read_immediate(lhs).ok()?; + let lhs = self.ecx.read_immediate(lhs).discard_err()?; let rhs = self.evaluated[rhs].as_ref()?; - let rhs = self.ecx.read_immediate(rhs).ok()?; - let val = self.ecx.binary_op(bin_op, &lhs, &rhs).ok()?; + let rhs = self.ecx.read_immediate(rhs).discard_err()?; + let val = self.ecx.binary_op(bin_op, &lhs, &rhs).discard_err()?; val.into() } Cast { kind, value, from: _, to } => match kind { CastKind::IntToInt | CastKind::IntToFloat => { let value = self.evaluated[value].as_ref()?; - let value = self.ecx.read_immediate(value).ok()?; + let value = self.ecx.read_immediate(value).discard_err()?; let to = self.ecx.layout_of(to).ok()?; - let res = self.ecx.int_to_int_or_float(&value, to).ok()?; + let res = self.ecx.int_to_int_or_float(&value, to).discard_err()?; res.into() } CastKind::FloatToFloat | CastKind::FloatToInt => { let value = self.evaluated[value].as_ref()?; - let value = self.ecx.read_immediate(value).ok()?; + let value = self.ecx.read_immediate(value).discard_err()?; let to = self.ecx.layout_of(to).ok()?; - let res = self.ecx.float_to_float_or_int(&value, to).ok()?; + let res = self.ecx.float_to_float_or_int(&value, to).discard_err()?; res.into() } CastKind::Transmute => { @@ -574,28 +577,28 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { _ => return None, } } - value.offset(Size::ZERO, to, &self.ecx).ok()? + value.offset(Size::ZERO, to, &self.ecx).discard_err()? } CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize, _) => { let src = self.evaluated[value].as_ref()?; let to = self.ecx.layout_of(to).ok()?; - let dest = self.ecx.allocate(to, MemoryKind::Stack).ok()?; - self.ecx.unsize_into(src, to, &dest.clone().into()).ok()?; + let dest = self.ecx.allocate(to, MemoryKind::Stack).discard_err()?; + self.ecx.unsize_into(src, to, &dest.clone().into()).discard_err()?; self.ecx .alloc_mark_immutable(dest.ptr().provenance.unwrap().alloc_id()) - .ok()?; + .discard_err()?; dest.into() } CastKind::FnPtrToPtr | CastKind::PtrToPtr => { let src = self.evaluated[value].as_ref()?; - let src = self.ecx.read_immediate(src).ok()?; + let src = self.ecx.read_immediate(src).discard_err()?; let to = self.ecx.layout_of(to).ok()?; - let ret = self.ecx.ptr_to_ptr(&src, to).ok()?; + let ret = self.ecx.ptr_to_ptr(&src, to).discard_err()?; ret.into() } CastKind::PointerCoercion(ty::adjustment::PointerCoercion::UnsafeFnPointer, _) => { let src = self.evaluated[value].as_ref()?; - let src = self.ecx.read_immediate(src).ok()?; + let src = self.ecx.read_immediate(src).discard_err()?; let to = self.ecx.layout_of(to).ok()?; ImmTy::from_immediate(*src, to).into() } @@ -708,7 +711,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { && let Some(idx) = self.locals[idx_local] { if let Some(offset) = self.evaluated[idx].as_ref() - && let Ok(offset) = self.ecx.read_target_usize(offset) + && let Some(offset) = self.ecx.read_target_usize(offset).discard_err() && let Some(min_length) = offset.checked_add(1) { projection.to_mut()[i] = @@ -868,7 +871,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { && let DefKind::Enum = self.tcx.def_kind(enum_did) { let enum_ty = self.tcx.type_of(enum_did).instantiate(self.tcx, enum_args); - let discr = self.ecx.discriminant_for_variant(enum_ty, variant).ok()?; + let discr = self.ecx.discriminant_for_variant(enum_ty, variant).discard_err()?; return Some(self.insert_scalar(discr.to_scalar(), discr.layout.ty)); } @@ -1223,8 +1226,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { let as_bits = |value| { let constant = self.evaluated[value].as_ref()?; if layout.abi.is_scalar() { - let scalar = self.ecx.read_scalar(constant).ok()?; - scalar.to_bits(constant.layout.size).ok() + let scalar = self.ecx.read_scalar(constant).discard_err()?; + scalar.to_bits(constant.layout.size).discard_err() } else { // `constant` is a wide pointer. Do not evaluate to bits. None @@ -1484,7 +1487,7 @@ fn op_to_prop_const<'tcx>( // If this constant has scalar ABI, return it as a `ConstValue::Scalar`. if let Abi::Scalar(abi::Scalar::Initialized { .. }) = op.layout.abi - && let Ok(scalar) = ecx.read_scalar(op) + && let Some(scalar) = ecx.read_scalar(op).discard_err() { if !scalar.try_to_scalar_int().is_ok() { // Check that we do not leak a pointer. @@ -1498,12 +1501,12 @@ fn op_to_prop_const<'tcx>( // If this constant is already represented as an `Allocation`, // try putting it into global memory to return it. if let Either::Left(mplace) = op.as_mplace_or_imm() { - let (size, _align) = ecx.size_and_align_of_mplace(&mplace).ok()??; + let (size, _align) = ecx.size_and_align_of_mplace(&mplace).discard_err()??; // Do not try interning a value that contains provenance. // Due to https://github.com/rust-lang/rust/issues/79738, doing so could lead to bugs. // FIXME: remove this hack once that issue is fixed. - let alloc_ref = ecx.get_ptr_alloc(mplace.ptr(), size).ok()??; + let alloc_ref = ecx.get_ptr_alloc(mplace.ptr(), size).discard_err()??; if alloc_ref.has_provenance() { return None; } @@ -1511,7 +1514,7 @@ fn op_to_prop_const<'tcx>( let pointer = mplace.ptr().into_pointer_or_addr().ok()?; let (prov, offset) = pointer.into_parts(); let alloc_id = prov.alloc_id(); - intern_const_alloc_for_constprop(ecx, alloc_id).ok()?; + intern_const_alloc_for_constprop(ecx, alloc_id).discard_err()?; // `alloc_id` may point to a static. Codegen will choke on an `Indirect` with anything // by `GlobalAlloc::Memory`, so do fall through to copying if needed. @@ -1526,7 +1529,8 @@ fn op_to_prop_const<'tcx>( } // Everything failed: create a new allocation to hold the data. - let alloc_id = ecx.intern_with_temp_alloc(op.layout, |ecx, dest| ecx.copy_op(op, dest)).ok()?; + let alloc_id = + ecx.intern_with_temp_alloc(op.layout, |ecx, dest| ecx.copy_op(op, dest)).discard_err()?; let value = ConstValue::Indirect { alloc_id, offset: Size::ZERO }; // Check that we do not leak a pointer. diff --git a/compiler/rustc_mir_transform/src/jump_threading.rs b/compiler/rustc_mir_transform/src/jump_threading.rs index 91fbc91e1e7..1844b97887a 100644 --- a/compiler/rustc_mir_transform/src/jump_threading.rs +++ b/compiler/rustc_mir_transform/src/jump_threading.rs @@ -200,7 +200,9 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> { debug!(?discr, ?bb); let discr_ty = discr.ty(self.body, self.tcx).ty; - let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { return }; + let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { + return; + }; let Some(discr) = self.map.find(discr.as_ref()) else { return }; debug!(?discr); @@ -388,24 +390,24 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> { lhs, constant, &mut |elem, op| match elem { - TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).ok(), - TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).ok(), + TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).discard_err(), + TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).discard_err(), TrackElem::Discriminant => { - let variant = self.ecx.read_discriminant(op).ok()?; + let variant = self.ecx.read_discriminant(op).discard_err()?; let discr_value = - self.ecx.discriminant_for_variant(op.layout.ty, variant).ok()?; + self.ecx.discriminant_for_variant(op.layout.ty, variant).discard_err()?; Some(discr_value.into()) } TrackElem::DerefLen => { - let op: OpTy<'_> = self.ecx.deref_pointer(op).ok()?.into(); - let len_usize = op.len(&self.ecx).ok()?; + let op: OpTy<'_> = self.ecx.deref_pointer(op).discard_err()?.into(); + let len_usize = op.len(&self.ecx).discard_err()?; let layout = self.ecx.layout_of(self.tcx.types.usize).unwrap(); Some(ImmTy::from_uint(len_usize, layout).into()) } }, &mut |place, op| { if let Some(conditions) = state.try_get_idx(place, &self.map) - && let Ok(imm) = self.ecx.read_immediate_raw(op) + && let Some(imm) = self.ecx.read_immediate_raw(op).discard_err() && let Some(imm) = imm.right() && let Immediate::Scalar(Scalar::Int(int)) = *imm { @@ -429,8 +431,8 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> { match rhs { // If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`. Operand::Constant(constant) => { - let Ok(constant) = - self.ecx.eval_mir_constant(&constant.const_, constant.span, None) + let Some(constant) = + self.ecx.eval_mir_constant(&constant.const_, constant.span, None).discard_err() else { return; }; @@ -469,8 +471,10 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> { AggregateKind::Adt(.., Some(_)) => return, AggregateKind::Adt(_, variant_index, ..) if agg_ty.is_enum() => { if let Some(discr_target) = self.map.apply(lhs, TrackElem::Discriminant) - && let Ok(discr_value) = - self.ecx.discriminant_for_variant(agg_ty, *variant_index) + && let Some(discr_value) = self + .ecx + .discriminant_for_variant(agg_ty, *variant_index) + .discard_err() { self.process_immediate(bb, discr_target, discr_value, state); } @@ -555,7 +559,9 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> { // `SetDiscriminant` may be a no-op if the assigned variant is the untagged variant // of a niche encoding. If we cannot ensure that we write to the discriminant, do // nothing. - let Ok(enum_layout) = self.ecx.layout_of(enum_ty) else { return }; + let Ok(enum_layout) = self.ecx.layout_of(enum_ty) else { + return; + }; let writes_discriminant = match enum_layout.variants { Variants::Single { index } => { assert_eq!(index, *variant_index); @@ -568,7 +574,8 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> { } => *variant_index != untagged_variant, }; if writes_discriminant { - let Ok(discr) = self.ecx.discriminant_for_variant(enum_ty, *variant_index) + let Some(discr) = + self.ecx.discriminant_for_variant(enum_ty, *variant_index).discard_err() else { return; }; @@ -645,7 +652,9 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> { let Some(discr) = discr.place() else { return }; let discr_ty = discr.ty(self.body, self.tcx).ty; - let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { return }; + let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { + return; + }; let Some(conditions) = state.try_get(discr.as_ref(), &self.map) else { return }; if let Some((value, _)) = targets.iter().find(|&(_, target)| target == target_bb) { diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs index 783e7aabe85..ccc029b1e28 100644 --- a/compiler/rustc_mir_transform/src/known_panics_lint.rs +++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs @@ -6,7 +6,7 @@ use std::fmt::Debug; use rustc_const_eval::const_eval::DummyMachine; use rustc_const_eval::interpret::{ - ImmTy, InterpCx, InterpResult, Projectable, Scalar, format_interp_error, + ImmTy, InterpCx, InterpResult, Projectable, Scalar, format_interp_error, interp_ok, }; use rustc_data_structures::fx::FxHashSet; use rustc_hir::HirId; @@ -101,7 +101,7 @@ impl<'tcx> Value<'tcx> { } (PlaceElem::Index(idx), Value::Aggregate { fields, .. }) => { let idx = prop.get_const(idx.into())?.immediate()?; - let idx = prop.ecx.read_target_usize(idx).ok()?.try_into().ok()?; + let idx = prop.ecx.read_target_usize(idx).discard_err()?.try_into().ok()?; if idx <= FieldIdx::MAX_AS_U32 { fields.get(FieldIdx::from_u32(idx)).unwrap_or(&Value::Uninit) } else { @@ -231,21 +231,20 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { where F: FnOnce(&mut Self) -> InterpResult<'tcx, T>, { - match f(self) { - Ok(val) => Some(val), - Err(error) => { - trace!("InterpCx operation failed: {:?}", error); + f(self) + .map_err(|err| { + trace!("InterpCx operation failed: {:?}", err); // Some errors shouldn't come up because creating them causes // an allocation, which we should avoid. When that happens, // dedicated error variants should be introduced instead. assert!( - !error.kind().formatted_string(), + !err.kind().formatted_string(), "known panics lint encountered formatting error: {}", - format_interp_error(self.ecx.tcx.dcx(), error), + format_interp_error(self.ecx.tcx.dcx(), err), ); - None - } - } + err + }) + .discard_err() } /// Returns the value, if any, of evaluating `c`. @@ -315,7 +314,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { .ecx .binary_op(BinOp::SubWithOverflow, &ImmTy::from_int(0, arg.layout), &arg)? .to_scalar_pair(); - Ok((arg, overflow.to_bool()?)) + interp_ok((arg, overflow.to_bool()?)) })?; if overflow { self.report_assert_as_lint( @@ -349,7 +348,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { let left_ty = left.ty(self.local_decls(), self.tcx); let left_size = self.ecx.layout_of(left_ty).ok()?.size; let right_size = r.layout.size; - let r_bits = r.to_scalar().to_bits(right_size).ok(); + let r_bits = r.to_scalar().to_bits(right_size).discard_err(); if r_bits.is_some_and(|b| b >= left_size.bits() as u128) { debug!("check_binary_op: reporting assert for {:?}", location); let panic = AssertKind::Overflow( @@ -496,7 +495,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { // This can be `None` if the lhs wasn't const propagated and we just // triggered the assert on the value of the rhs. self.eval_operand(op) - .and_then(|op| self.ecx.read_immediate(&op).ok()) + .and_then(|op| self.ecx.read_immediate(&op).discard_err()) .map_or(DbgVal::Underscore, |op| DbgVal::Val(op.to_const_int())) }; let msg = match msg { @@ -602,7 +601,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { Len(place) => { let len = match self.get_const(place)? { - Value::Immediate(src) => src.len(&self.ecx).ok()?, + Value::Immediate(src) => src.len(&self.ecx).discard_err()?, Value::Aggregate { fields, .. } => fields.len() as u64, Value::Uninit => match place.ty(self.local_decls(), self.tcx).ty.kind() { ty::Array(_, n) => n.try_eval_target_usize(self.tcx, self.param_env)?, @@ -615,7 +614,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { Ref(..) | RawPtr(..) => return None, NullaryOp(ref null_op, ty) => { - let op_layout = self.use_ecx(|this| this.ecx.layout_of(ty))?; + let op_layout = self.ecx.layout_of(ty).ok()?; let val = match null_op { NullOp::SizeOf => op_layout.size.bytes(), NullOp::AlignOf => op_layout.align.abi.bytes(), @@ -633,16 +632,16 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { Cast(ref kind, ref value, to) => match kind { CastKind::IntToInt | CastKind::IntToFloat => { let value = self.eval_operand(value)?; - let value = self.ecx.read_immediate(&value).ok()?; + let value = self.ecx.read_immediate(&value).discard_err()?; let to = self.ecx.layout_of(to).ok()?; - let res = self.ecx.int_to_int_or_float(&value, to).ok()?; + let res = self.ecx.int_to_int_or_float(&value, to).discard_err()?; res.into() } CastKind::FloatToFloat | CastKind::FloatToInt => { let value = self.eval_operand(value)?; - let value = self.ecx.read_immediate(&value).ok()?; + let value = self.ecx.read_immediate(&value).discard_err()?; let to = self.ecx.layout_of(to).ok()?; - let res = self.ecx.float_to_float_or_int(&value, to).ok()?; + let res = self.ecx.float_to_float_or_int(&value, to).discard_err()?; res.into() } CastKind::Transmute => { @@ -656,7 +655,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { _ => return None, } - value.offset(Size::ZERO, to, &self.ecx).ok()?.into() + value.offset(Size::ZERO, to, &self.ecx).discard_err()?.into() } _ => return None, }, @@ -781,7 +780,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> { TerminatorKind::SwitchInt { ref discr, ref targets } => { if let Some(ref value) = self.eval_operand(discr) && let Some(value_const) = self.use_ecx(|this| this.ecx.read_scalar(value)) - && let Ok(constant) = value_const.to_bits(value_const.size()) + && let Some(constant) = value_const.to_bits(value_const.size()).discard_err() { // We managed to evaluate the discriminant, so we know we only need to visit // one target. |
