diff options
Diffstat (limited to 'compiler/rustc_mir_transform/src')
19 files changed, 418 insertions, 319 deletions
diff --git a/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs b/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs index 48a6a83e146..264d8a13996 100644 --- a/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs +++ b/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs @@ -18,7 +18,8 @@ use crate::MirPass; use rustc_middle::mir::coverage::CoverageKind; -use rustc_middle::mir::{Body, BorrowKind, Rvalue, StatementKind, TerminatorKind}; +use rustc_middle::mir::{Body, BorrowKind, CastKind, Rvalue, StatementKind, TerminatorKind}; +use rustc_middle::ty::adjustment::PointerCoercion; use rustc_middle::ty::TyCtxt; pub struct CleanupPostBorrowck; @@ -36,6 +37,22 @@ impl<'tcx> MirPass<'tcx> for CleanupPostBorrowck { CoverageKind::BlockMarker { .. } | CoverageKind::SpanMarker { .. }, ) | StatementKind::FakeRead(..) => statement.make_nop(), + StatementKind::Assign(box ( + _, + Rvalue::Cast( + ref mut cast_kind @ CastKind::PointerCoercion( + PointerCoercion::ArrayToPointer + | PointerCoercion::MutToConstPointer, + ), + .., + ), + )) => { + // BorrowCk needed to track whether these cases were coercions or casts, + // to know whether to check lifetimes in their pointees, + // but from now on that distinction doesn't matter, + // so just make them ordinary pointer casts instead. + *cast_kind = CastKind::PtrToPtr; + } _ => (), } } diff --git a/compiler/rustc_mir_transform/src/coroutine.rs b/compiler/rustc_mir_transform/src/coroutine.rs index bf79b4e133a..05674792426 100644 --- a/compiler/rustc_mir_transform/src/coroutine.rs +++ b/compiler/rustc_mir_transform/src/coroutine.rs @@ -677,8 +677,8 @@ fn transform_async_context<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { fn eliminate_get_context_call<'tcx>(bb_data: &mut BasicBlockData<'tcx>) -> Local { let terminator = bb_data.terminator.take().unwrap(); - if let TerminatorKind::Call { mut args, destination, target, .. } = terminator.kind { - let arg = args.pop().unwrap(); + if let TerminatorKind::Call { args, destination, target, .. } = terminator.kind { + let [arg] = *Box::try_from(args).unwrap(); let local = arg.node.place().unwrap().local; let arg = Rvalue::Use(arg.node); diff --git a/compiler/rustc_mir_transform/src/cost_checker.rs b/compiler/rustc_mir_transform/src/cost_checker.rs index 2c692c95003..7e401b5482f 100644 --- a/compiler/rustc_mir_transform/src/cost_checker.rs +++ b/compiler/rustc_mir_transform/src/cost_checker.rs @@ -1,3 +1,4 @@ +use rustc_middle::bug; use rustc_middle::mir::visit::*; use rustc_middle::mir::*; use rustc_middle::ty::{self, ParamEnv, Ty, TyCtxt}; @@ -6,13 +7,16 @@ const INSTR_COST: usize = 5; const CALL_PENALTY: usize = 25; const LANDINGPAD_PENALTY: usize = 50; const RESUME_PENALTY: usize = 45; +const LARGE_SWITCH_PENALTY: usize = 20; +const CONST_SWITCH_BONUS: usize = 10; /// Verify that the callee body is compatible with the caller. #[derive(Clone)] pub(crate) struct CostChecker<'b, 'tcx> { tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, - cost: usize, + penalty: usize, + bonus: usize, callee_body: &'b Body<'tcx>, instance: Option<ty::Instance<'tcx>>, } @@ -24,11 +28,11 @@ impl<'b, 'tcx> CostChecker<'b, 'tcx> { instance: Option<ty::Instance<'tcx>>, callee_body: &'b Body<'tcx>, ) -> CostChecker<'b, 'tcx> { - CostChecker { tcx, param_env, callee_body, instance, cost: 0 } + CostChecker { tcx, param_env, callee_body, instance, penalty: 0, bonus: 0 } } pub fn cost(&self) -> usize { - self.cost + usize::saturating_sub(self.penalty, self.bonus) } fn instantiate_ty(&self, v: Ty<'tcx>) -> Ty<'tcx> { @@ -41,36 +45,57 @@ impl<'b, 'tcx> CostChecker<'b, 'tcx> { } impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> { - fn visit_statement(&mut self, statement: &Statement<'tcx>, _: Location) { - // Don't count StorageLive/StorageDead in the inlining cost. + fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) { + // Most costs are in rvalues and terminators, not in statements. match statement.kind { - StatementKind::StorageLive(_) - | StatementKind::StorageDead(_) - | StatementKind::Deinit(_) - | StatementKind::Nop => {} - _ => self.cost += INSTR_COST, + StatementKind::Intrinsic(ref ndi) => { + self.penalty += match **ndi { + NonDivergingIntrinsic::Assume(..) => INSTR_COST, + NonDivergingIntrinsic::CopyNonOverlapping(..) => CALL_PENALTY, + }; + } + _ => self.super_statement(statement, location), + } + } + + fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, _location: Location) { + match rvalue { + Rvalue::NullaryOp(NullOp::UbChecks, ..) + if !self + .tcx + .sess + .opts + .unstable_opts + .inline_mir_preserve_debug + .unwrap_or(self.tcx.sess.ub_checks()) => + { + // If this is in optimized MIR it's because it's used later, + // so if we don't need UB checks this session, give a bonus + // here to offset the cost of the call later. + self.bonus += CALL_PENALTY; + } + // These are essentially constants that didn't end up in an Operand, + // so treat them as also being free. + Rvalue::NullaryOp(..) => {} + _ => self.penalty += INSTR_COST, } } fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, _: Location) { - let tcx = self.tcx; - match terminator.kind { - TerminatorKind::Drop { ref place, unwind, .. } => { + match &terminator.kind { + TerminatorKind::Drop { place, unwind, .. } => { // If the place doesn't actually need dropping, treat it like a regular goto. - let ty = self.instantiate_ty(place.ty(self.callee_body, tcx).ty); - if ty.needs_drop(tcx, self.param_env) { - self.cost += CALL_PENALTY; + let ty = self.instantiate_ty(place.ty(self.callee_body, self.tcx).ty); + if ty.needs_drop(self.tcx, self.param_env) { + self.penalty += CALL_PENALTY; if let UnwindAction::Cleanup(_) = unwind { - self.cost += LANDINGPAD_PENALTY; + self.penalty += LANDINGPAD_PENALTY; } - } else { - self.cost += INSTR_COST; } } - TerminatorKind::Call { func: Operand::Constant(ref f), unwind, .. } => { - let fn_ty = self.instantiate_ty(f.const_.ty()); - self.cost += if let ty::FnDef(def_id, _) = *fn_ty.kind() - && tcx.intrinsic(def_id).is_some() + TerminatorKind::Call { func, unwind, .. } => { + self.penalty += if let Some((def_id, ..)) = func.const_fn_def() + && self.tcx.intrinsic(def_id).is_some() { // Don't give intrinsics the extra penalty for calls INSTR_COST @@ -78,23 +103,57 @@ impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> { CALL_PENALTY }; if let UnwindAction::Cleanup(_) = unwind { - self.cost += LANDINGPAD_PENALTY; + self.penalty += LANDINGPAD_PENALTY; } } - TerminatorKind::Assert { unwind, .. } => { - self.cost += CALL_PENALTY; + TerminatorKind::SwitchInt { discr, targets } => { + if discr.constant().is_some() { + // Not only will this become a `Goto`, but likely other + // things will be removable as unreachable. + self.bonus += CONST_SWITCH_BONUS; + } else if targets.all_targets().len() > 3 { + // More than false/true/unreachable gets extra cost. + self.penalty += LARGE_SWITCH_PENALTY; + } else { + self.penalty += INSTR_COST; + } + } + TerminatorKind::Assert { unwind, msg, .. } => { + self.penalty += if msg.is_optional_overflow_check() + && !self + .tcx + .sess + .opts + .unstable_opts + .inline_mir_preserve_debug + .unwrap_or(self.tcx.sess.overflow_checks()) + { + INSTR_COST + } else { + CALL_PENALTY + }; if let UnwindAction::Cleanup(_) = unwind { - self.cost += LANDINGPAD_PENALTY; + self.penalty += LANDINGPAD_PENALTY; } } - TerminatorKind::UnwindResume => self.cost += RESUME_PENALTY, + TerminatorKind::UnwindResume => self.penalty += RESUME_PENALTY, TerminatorKind::InlineAsm { unwind, .. } => { - self.cost += INSTR_COST; + self.penalty += INSTR_COST; if let UnwindAction::Cleanup(_) = unwind { - self.cost += LANDINGPAD_PENALTY; + self.penalty += LANDINGPAD_PENALTY; } } - _ => self.cost += INSTR_COST, + TerminatorKind::Unreachable => { + self.bonus += INSTR_COST; + } + TerminatorKind::Goto { .. } | TerminatorKind::Return => {} + TerminatorKind::UnwindTerminate(..) => {} + kind @ (TerminatorKind::FalseUnwind { .. } + | TerminatorKind::FalseEdge { .. } + | TerminatorKind::Yield { .. } + | TerminatorKind::CoroutineDrop) => { + bug!("{kind:?} should not be in runtime MIR"); + } } } } diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs index 25744009be8..1fce2abbbbf 100644 --- a/compiler/rustc_mir_transform/src/coverage/query.rs +++ b/compiler/rustc_mir_transform/src/coverage/query.rs @@ -6,11 +6,13 @@ use rustc_middle::query::TyCtxtAt; use rustc_middle::ty::{self, TyCtxt}; use rustc_middle::util::Providers; use rustc_span::def_id::LocalDefId; +use rustc_span::sym; /// Registers query/hook implementations related to coverage. pub(crate) fn provide(providers: &mut Providers) { providers.hooks.is_eligible_for_coverage = |TyCtxtAt { tcx, .. }, def_id| is_eligible_for_coverage(tcx, def_id); + providers.queries.coverage_attr_on = coverage_attr_on; providers.queries.coverage_ids_info = coverage_ids_info; } @@ -38,7 +40,12 @@ fn is_eligible_for_coverage(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool { return false; } - if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NO_COVERAGE) { + if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NAKED) { + trace!("InstrumentCoverage skipped for {def_id:?} (`#[naked]`)"); + return false; + } + + if !tcx.coverage_attr_on(def_id) { trace!("InstrumentCoverage skipped for {def_id:?} (`#[coverage(off)]`)"); return false; } @@ -46,6 +53,30 @@ fn is_eligible_for_coverage(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool { true } +/// Query implementation for `coverage_attr_on`. +fn coverage_attr_on(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool { + // Check for annotations directly on this def. + if let Some(attr) = tcx.get_attr(def_id, sym::coverage) { + match attr.meta_item_list().as_deref() { + Some([item]) if item.has_name(sym::off) => return false, + Some([item]) if item.has_name(sym::on) => return true, + Some(_) | None => { + // Other possibilities should have been rejected by `rustc_parse::validate_attr`. + tcx.dcx().span_bug(attr.span, "unexpected value of coverage attribute"); + } + } + } + + match tcx.opt_local_parent(def_id) { + // Check the parent def (and so on recursively) until we find an + // enclosing attribute or reach the crate root. + Some(parent) => tcx.coverage_attr_on(parent), + // We reached the crate root without seeing a coverage attribute, so + // allow coverage instrumentation by default. + None => true, + } +} + /// Query implementation for `coverage_ids_info`. fn coverage_ids_info<'tcx>( tcx: TyCtxt<'tcx>, diff --git a/compiler/rustc_mir_transform/src/coverage/tests.rs b/compiler/rustc_mir_transform/src/coverage/tests.rs index 048547dc9f5..63a9f303b85 100644 --- a/compiler/rustc_mir_transform/src/coverage/tests.rs +++ b/compiler/rustc_mir_transform/src/coverage/tests.rs @@ -134,7 +134,7 @@ impl<'tcx> MockBlocks<'tcx> { some_from_block, TerminatorKind::Call { func: Operand::Copy(self.dummy_place.clone()), - args: vec![], + args: [].into(), destination: self.dummy_place.clone(), target: Some(TEMP_BLOCK), unwind: UnwindAction::Continue, diff --git a/compiler/rustc_mir_transform/src/ctfe_limit.rs b/compiler/rustc_mir_transform/src/ctfe_limit.rs index dcc960e1e02..a0dddec185c 100644 --- a/compiler/rustc_mir_transform/src/ctfe_limit.rs +++ b/compiler/rustc_mir_transform/src/ctfe_limit.rs @@ -1,5 +1,6 @@ //! A pass that inserts the `ConstEvalCounter` instruction into any blocks that have a back edge //! (thus indicating there is a loop in the CFG), or whose terminator is a function call. + use crate::MirPass; use rustc_data_structures::graph::dominators::Dominators; diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs index 0fd85eb345d..8b965f4d18e 100644 --- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs +++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs @@ -181,11 +181,6 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> { state.insert_value_idx(value_target, val, self.map()); } if let Some(overflow_target) = overflow_target { - let overflow = match overflow { - FlatSet::Top => FlatSet::Top, - FlatSet::Elem(overflow) => FlatSet::Elem(overflow), - FlatSet::Bottom => FlatSet::Bottom, - }; // We have flooded `target` earlier. state.insert_value_idx(overflow_target, overflow, self.map()); } diff --git a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs index 5e3cd853675..0cb304da80a 100644 --- a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs +++ b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs @@ -11,38 +11,6 @@ use rustc_target::spec::PanicStrategy; use crate::errors; -/// Some of the functions declared as "may unwind" by `fn_can_unwind` can't actually unwind. In -/// particular, `extern "C"` is still considered as can-unwind on stable, but we need to consider -/// it cannot-unwind here. So below we check `fn_can_unwind() && abi_can_unwind()` before concluding -/// that a function call can unwind. -fn abi_can_unwind(abi: Abi) -> bool { - use Abi::*; - match abi { - C { unwind } - | System { unwind } - | Cdecl { unwind } - | Stdcall { unwind } - | Fastcall { unwind } - | Vectorcall { unwind } - | Thiscall { unwind } - | Aapcs { unwind } - | Win64 { unwind } - | SysV64 { unwind } => unwind, - PtxKernel - | Msp430Interrupt - | X86Interrupt - | EfiApi - | AvrInterrupt - | AvrNonBlockingInterrupt - | RiscvInterruptM - | RiscvInterruptS - | CCmseNonSecureCall - | Wasm - | Unadjusted => false, - RustIntrinsic | Rust | RustCall | RustCold => unreachable!(), // these ABIs are already skipped earlier - } -} - // Check if the body of this def_id can possibly leak a foreign unwind into Rust code. fn has_ffi_unwind_calls(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> bool { debug!("has_ffi_unwind_calls({local_def_id:?})"); @@ -103,7 +71,7 @@ fn has_ffi_unwind_calls(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> bool { _ => bug!("invalid callee of type {:?}", ty), }; - if layout::fn_can_unwind(tcx, fn_def_id, sig.abi()) && abi_can_unwind(sig.abi()) { + if layout::fn_can_unwind(tcx, fn_def_id, sig.abi()) { // We have detected a call that can possibly leak foreign unwind. // // Because the function body itself can unwind, we are not aborting this function call diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index 0f8f28e3462..936a7e2d9de 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -571,11 +571,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { let ret = self.ecx.ptr_to_ptr(&src, to).ok()?; ret.into() } - CastKind::PointerCoercion( - ty::adjustment::PointerCoercion::MutToConstPointer - | ty::adjustment::PointerCoercion::ArrayToPointer - | ty::adjustment::PointerCoercion::UnsafeFnPointer, - ) => { + CastKind::PointerCoercion(ty::adjustment::PointerCoercion::UnsafeFnPointer) => { let src = self.evaluated[value].as_ref()?; let src = self.ecx.read_immediate(src).ok()?; let to = self.ecx.layout_of(to).ok()?; @@ -827,25 +823,10 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { return self.simplify_cast(kind, value, to, location); } Rvalue::BinaryOp(op, box (ref mut lhs, ref mut rhs)) => { - let ty = lhs.ty(self.local_decls, self.tcx); - let lhs = self.simplify_operand(lhs, location); - let rhs = self.simplify_operand(rhs, location); - // Only short-circuit options after we called `simplify_operand` - // on both operands for side effect. - let lhs = lhs?; - let rhs = rhs?; - - if let Some(value) = self.simplify_binary(op, ty, lhs, rhs) { - return Some(value); - } - Value::BinaryOp(op, lhs, rhs) + return self.simplify_binary(op, lhs, rhs, location); } - Rvalue::UnaryOp(op, ref mut arg) => { - let arg = self.simplify_operand(arg, location)?; - if let Some(value) = self.simplify_unary(op, arg) { - return Some(value); - } - Value::UnaryOp(op, arg) + Rvalue::UnaryOp(op, ref mut arg_op) => { + return self.simplify_unary(op, arg_op, location); } Rvalue::Discriminant(ref mut place) => { let place = self.simplify_place_value(place, location)?; @@ -953,13 +934,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { was_updated = true; } - if was_updated { - if let Some(const_) = self.try_as_constant(fields[0]) { - field_ops[FieldIdx::ZERO] = Operand::Constant(Box::new(const_)); - } else if let Some(local) = self.try_as_local(fields[0], location) { - field_ops[FieldIdx::ZERO] = Operand::Copy(Place::from(local)); - self.reused_locals.insert(local); - } + if was_updated && let Some(op) = self.try_as_operand(fields[0], location) { + field_ops[FieldIdx::ZERO] = op; } } @@ -969,11 +945,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { let first = fields[0]; if fields.iter().all(|&v| v == first) { let len = ty::Const::from_target_usize(self.tcx, fields.len().try_into().unwrap()); - if let Some(const_) = self.try_as_constant(first) { - *rvalue = Rvalue::Repeat(Operand::Constant(Box::new(const_)), len); - } else if let Some(local) = self.try_as_local(first, location) { - *rvalue = Rvalue::Repeat(Operand::Copy(local.into()), len); - self.reused_locals.insert(local); + if let Some(op) = self.try_as_operand(first, location) { + *rvalue = Rvalue::Repeat(op, len); } return Some(self.insert(Value::Repeat(first, len))); } @@ -983,8 +956,58 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } #[instrument(level = "trace", skip(self), ret)] - fn simplify_unary(&mut self, op: UnOp, value: VnIndex) -> Option<VnIndex> { - let value = match (op, self.get(value)) { + fn simplify_unary( + &mut self, + op: UnOp, + arg_op: &mut Operand<'tcx>, + location: Location, + ) -> Option<VnIndex> { + let mut arg_index = self.simplify_operand(arg_op, location)?; + + // PtrMetadata doesn't care about *const vs *mut vs & vs &mut, + // so start by removing those distinctions so we can update the `Operand` + if op == UnOp::PtrMetadata { + let mut was_updated = false; + loop { + match self.get(arg_index) { + // Pointer casts that preserve metadata, such as + // `*const [i32]` <-> `*mut [i32]` <-> `*mut [f32]`. + // It's critical that this not eliminate cases like + // `*const [T]` -> `*const T` which remove metadata. + // We run on potentially-generic MIR, though, so unlike codegen + // we can't always know exactly what the metadata are. + // To allow things like `*mut (?A, ?T)` <-> `*mut (?B, ?T)`, + // it's fine to get a projection as the type. + Value::Cast { kind: CastKind::PtrToPtr, value: inner, from, to } + if self.pointers_have_same_metadata(*from, *to) => + { + arg_index = *inner; + was_updated = true; + continue; + } + + // `&mut *p`, `&raw *p`, etc don't change metadata. + Value::Address { place, kind: _, provenance: _ } + if let PlaceRef { local, projection: [PlaceElem::Deref] } = + place.as_ref() + && let Some(local_index) = self.locals[local] => + { + arg_index = local_index; + was_updated = true; + continue; + } + + _ => { + if was_updated && let Some(op) = self.try_as_operand(arg_index, location) { + *arg_op = op; + } + break; + } + } + } + } + + let value = match (op, self.get(arg_index)) { (UnOp::Not, Value::UnaryOp(UnOp::Not, inner)) => return Some(*inner), (UnOp::Neg, Value::UnaryOp(UnOp::Neg, inner)) => return Some(*inner), (UnOp::Not, Value::BinaryOp(BinOp::Eq, lhs, rhs)) => { @@ -996,9 +1019,26 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { (UnOp::PtrMetadata, Value::Aggregate(AggregateTy::RawPtr { .. }, _, fields)) => { return Some(fields[1]); } - _ => return None, + // We have an unsizing cast, which assigns the length to fat pointer metadata. + ( + UnOp::PtrMetadata, + Value::Cast { + kind: CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize), + from, + to, + .. + }, + ) if let ty::Slice(..) = to.builtin_deref(true).unwrap().kind() + && let ty::Array(_, len) = from.builtin_deref(true).unwrap().kind() => + { + return self.insert_constant(Const::from_ty_const( + *len, + self.tcx.types.usize, + self.tcx, + )); + } + _ => Value::UnaryOp(op, arg_index), }; - Some(self.insert(value)) } @@ -1006,6 +1046,52 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { fn simplify_binary( &mut self, op: BinOp, + lhs_operand: &mut Operand<'tcx>, + rhs_operand: &mut Operand<'tcx>, + location: Location, + ) -> Option<VnIndex> { + let lhs = self.simplify_operand(lhs_operand, location); + let rhs = self.simplify_operand(rhs_operand, location); + // Only short-circuit options after we called `simplify_operand` + // on both operands for side effect. + let mut lhs = lhs?; + let mut rhs = rhs?; + + let lhs_ty = lhs_operand.ty(self.local_decls, self.tcx); + + // If we're comparing pointers, remove `PtrToPtr` casts if the from + // types of both casts and the metadata all match. + if let BinOp::Eq | BinOp::Ne | BinOp::Lt | BinOp::Le | BinOp::Gt | BinOp::Ge = op + && lhs_ty.is_any_ptr() + && let Value::Cast { + kind: CastKind::PtrToPtr, value: lhs_value, from: lhs_from, .. + } = self.get(lhs) + && let Value::Cast { + kind: CastKind::PtrToPtr, value: rhs_value, from: rhs_from, .. + } = self.get(rhs) + && lhs_from == rhs_from + && self.pointers_have_same_metadata(*lhs_from, lhs_ty) + { + lhs = *lhs_value; + rhs = *rhs_value; + if let Some(op) = self.try_as_operand(lhs, location) { + *lhs_operand = op; + } + if let Some(op) = self.try_as_operand(rhs, location) { + *rhs_operand = op; + } + } + + if let Some(value) = self.simplify_binary_inner(op, lhs_ty, lhs, rhs) { + return Some(value); + } + let value = Value::BinaryOp(op, lhs, rhs); + Some(self.insert(value)) + } + + fn simplify_binary_inner( + &mut self, + op: BinOp, lhs_ty: Ty<'tcx>, lhs: VnIndex, rhs: VnIndex, @@ -1164,29 +1250,43 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } } - if let PtrToPtr | PointerCoercion(MutToConstPointer) = kind + // PtrToPtr-then-PtrToPtr can skip the intermediate step + if let PtrToPtr = kind && let Value::Cast { kind: inner_kind, value: inner_value, from: inner_from, to: _ } = *self.get(value) - && let PtrToPtr | PointerCoercion(MutToConstPointer) = inner_kind + && let PtrToPtr = inner_kind { from = inner_from; value = inner_value; - *kind = PtrToPtr; was_updated = true; if inner_from == to { return Some(inner_value); } } - if was_updated { - if let Some(const_) = self.try_as_constant(value) { - *operand = Operand::Constant(Box::new(const_)); - } else if let Some(local) = self.try_as_local(value, location) { - *operand = Operand::Copy(local.into()); - self.reused_locals.insert(local); + // PtrToPtr-then-Transmute can just transmute the original, so long as the + // PtrToPtr didn't change metadata (and thus the size of the pointer) + if let Transmute = kind + && let Value::Cast { + kind: PtrToPtr, + value: inner_value, + from: inner_from, + to: inner_to, + } = *self.get(value) + && self.pointers_have_same_metadata(inner_from, inner_to) + { + from = inner_from; + value = inner_value; + was_updated = true; + if inner_from == to { + return Some(inner_value); } } + if was_updated && let Some(op) = self.try_as_operand(value, location) { + *operand = op; + } + Some(self.insert(Value::Cast { kind: *kind, value, from, to })) } @@ -1230,6 +1330,21 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { // Fallback: a symbolic `Len`. Some(self.insert(Value::Len(inner))) } + + fn pointers_have_same_metadata(&self, left_ptr_ty: Ty<'tcx>, right_ptr_ty: Ty<'tcx>) -> bool { + let left_meta_ty = left_ptr_ty.pointee_metadata_ty_or_projection(self.tcx); + let right_meta_ty = right_ptr_ty.pointee_metadata_ty_or_projection(self.tcx); + if left_meta_ty == right_meta_ty { + true + } else if let Ok(left) = + self.tcx.try_normalize_erasing_regions(self.param_env, left_meta_ty) + && let Ok(right) = self.tcx.try_normalize_erasing_regions(self.param_env, right_meta_ty) + { + left == right + } else { + false + } + } } fn op_to_prop_const<'tcx>( @@ -1300,6 +1415,19 @@ fn op_to_prop_const<'tcx>( } impl<'tcx> VnState<'_, 'tcx> { + /// If either [`Self::try_as_constant`] as [`Self::try_as_local`] succeeds, + /// returns that result as an [`Operand`]. + fn try_as_operand(&mut self, index: VnIndex, location: Location) -> Option<Operand<'tcx>> { + if let Some(const_) = self.try_as_constant(index) { + Some(Operand::Constant(Box::new(const_))) + } else if let Some(local) = self.try_as_local(index, location) { + self.reused_locals.insert(local); + Some(Operand::Copy(local.into())) + } else { + None + } + } + /// If `index` is a `Value::Constant`, return the `Constant` to be put in the MIR. fn try_as_constant(&mut self, index: VnIndex) -> Option<ConstOperand<'tcx>> { // This was already constant in MIR, do not change it. diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs index d04bb8d302e..07482d0571a 100644 --- a/compiler/rustc_mir_transform/src/inline.rs +++ b/compiler/rustc_mir_transform/src/inline.rs @@ -1,4 +1,5 @@ -//! Inlining pass for MIR functions +//! Inlining pass for MIR functions. + use crate::deref_separator::deref_finder; use rustc_attr::InlineAttr; use rustc_hir::def::DefKind; @@ -10,7 +11,7 @@ use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs} use rustc_middle::mir::visit::*; use rustc_middle::mir::*; use rustc_middle::ty::TypeVisitableExt; -use rustc_middle::ty::{self, Instance, InstanceKind, ParamEnv, Ty, TyCtxt}; +use rustc_middle::ty::{self, Instance, InstanceKind, ParamEnv, Ty, TyCtxt, TypeFlags}; use rustc_session::config::{DebugInfo, OptLevel}; use rustc_span::source_map::Spanned; use rustc_span::sym; @@ -305,6 +306,16 @@ impl<'tcx> Inliner<'tcx> { InstanceKind::Intrinsic(_) | InstanceKind::Virtual(..) => { return Err("instance without MIR (intrinsic / virtual)"); } + + // FIXME(#127030): `ConstParamHasTy` has bad interactions with + // the drop shim builder, which does not evaluate predicates in + // the correct param-env for types being dropped. Stall resolving + // the MIR for this instance until all of its const params are + // substituted. + InstanceKind::DropGlue(_, Some(ty)) if ty.has_type_flags(TypeFlags::HAS_CT_PARAM) => { + return Err("still needs substitution"); + } + // This cannot result in an immediate cycle since the callee MIR is a shim, which does // not get any optimizations run on it. Any subsequent inlining may cause cycles, but we // do not need to catch this here, we can wait until the inliner decides to continue @@ -623,8 +634,7 @@ impl<'tcx> Inliner<'tcx> { }; // Copy the arguments if needed. - let args: Vec<_> = - self.make_call_args(args, &callsite, caller_body, &callee_body, return_block); + let args = self.make_call_args(args, &callsite, caller_body, &callee_body, return_block); let mut integrator = Integrator { args: &args, @@ -735,12 +745,12 @@ impl<'tcx> Inliner<'tcx> { fn make_call_args( &self, - args: Vec<Spanned<Operand<'tcx>>>, + args: Box<[Spanned<Operand<'tcx>>]>, callsite: &CallSite<'tcx>, caller_body: &mut Body<'tcx>, callee_body: &Body<'tcx>, return_block: Option<BasicBlock>, - ) -> Vec<Local> { + ) -> Box<[Local]> { let tcx = self.tcx; // There is a bit of a mismatch between the *caller* of a closure and the *callee*. @@ -767,7 +777,8 @@ impl<'tcx> Inliner<'tcx> { // // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`. if callsite.fn_sig.abi() == Abi::RustCall && callee_body.spread_arg.is_none() { - let mut args = args.into_iter(); + // FIXME(edition_2024): switch back to a normal method call. + let mut args = <_>::into_iter(args); let self_ = self.create_temp_if_necessary( args.next().unwrap().node, callsite, @@ -801,7 +812,8 @@ impl<'tcx> Inliner<'tcx> { closure_ref_arg.chain(tuple_tmp_args).collect() } else { - args.into_iter() + // FIXME(edition_2024): switch back to a normal method call. + <_>::into_iter(args) .map(|a| self.create_temp_if_necessary(a.node, callsite, caller_body, return_block)) .collect() } diff --git a/compiler/rustc_mir_transform/src/instsimplify.rs b/compiler/rustc_mir_transform/src/instsimplify.rs index 6806c517c17..8209e5e2711 100644 --- a/compiler/rustc_mir_transform/src/instsimplify.rs +++ b/compiler/rustc_mir_transform/src/instsimplify.rs @@ -1,6 +1,7 @@ //! Performs various peephole optimizations. use crate::simplify::simplify_duplicate_switch_targets; +use crate::take_array; use rustc_ast::attr; use rustc_middle::bug; use rustc_middle::mir::*; @@ -285,7 +286,8 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> { return; } - let Some(arg_place) = args.pop().unwrap().node.place() else { return }; + let Ok([arg]) = take_array(args) else { return }; + let Some(arg_place) = arg.node.place() else { return }; statements.push(Statement { source_info: terminator.source_info, diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index afba6781a70..f7056702cb4 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -51,11 +51,11 @@ mod abort_unwinding_calls; mod add_call_guards; mod add_moves_for_packed_drops; mod add_retag; +mod add_subtyping_projections; +mod check_alignment; mod check_const_item_mutation; mod check_packed_ref; -mod remove_place_mention; // This pass is public to allow external drivers to perform MIR cleanup -mod add_subtyping_projections; pub mod cleanup_post_borrowck; mod copy_prop; mod coroutine; @@ -88,12 +88,12 @@ mod lower_slice_len; mod match_branches; mod mentioned_items; mod multiple_return_terminators; -mod normalize_array_len; mod nrvo; mod prettify; mod promote_consts; mod ref_prop; mod remove_noop_landing_pads; +mod remove_place_mention; mod remove_storage_markers; mod remove_uninit_drops; mod remove_unneeded_drops; @@ -103,7 +103,6 @@ mod reveal_all; mod shim; mod ssa; // This pass is public to allow external drivers to perform MIR cleanup -mod check_alignment; pub mod simplify; mod simplify_branches; mod simplify_comparison_integral; @@ -161,8 +160,9 @@ fn remap_mir_for_const_eval_select<'tcx>( } if let ty::FnDef(def_id, _) = *const_.ty().kind() && tcx.is_intrinsic(def_id, sym::const_eval_select) => { - let [tupled_args, called_in_const, called_at_rt]: [_; 3] = - std::mem::take(args).try_into().unwrap(); + let Ok([tupled_args, called_in_const, called_at_rt]) = take_array(args) else { + unreachable!() + }; let ty = tupled_args.node.ty(&body.local_decls, tcx); let fields = ty.tuple_fields(); let num_args = fields.len(); @@ -212,6 +212,11 @@ fn remap_mir_for_const_eval_select<'tcx>( body } +fn take_array<T, const N: usize>(b: &mut Box<[T]>) -> Result<[T; N], Box<[T]>> { + let b: Box<[T; N]> = std::mem::take(b).try_into()?; + Ok(*b) +} + fn is_mir_available(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool { tcx.mir_keys(()).contains(&def_id) } @@ -581,9 +586,6 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { &o1(simplify::SimplifyCfg::AfterUnreachableEnumBranching), // Inlining may have introduced a lot of redundant code and a large move pattern. // Now, we need to shrink the generated MIR. - - // Has to run after `slice::len` lowering - &normalize_array_len::NormalizeArrayLen, &ref_prop::ReferencePropagation, &sroa::ScalarReplacementOfAggregates, &match_branches::MatchBranchSimplification, diff --git a/compiler/rustc_mir_transform/src/lint.rs b/compiler/rustc_mir_transform/src/lint.rs index 5b269185e87..3d1e1e48175 100644 --- a/compiler/rustc_mir_transform/src/lint.rs +++ b/compiler/rustc_mir_transform/src/lint.rs @@ -1,6 +1,7 @@ //! This pass statically detects code which has undefined behaviour or is likely to be erroneous. //! It can be used to locate problems in MIR building or optimizations. It assumes that all code //! can be executed, so it has false positives. + use rustc_data_structures::fx::FxHashSet; use rustc_index::bit_set::BitSet; use rustc_middle::mir::visit::{PlaceContext, Visitor}; diff --git a/compiler/rustc_mir_transform/src/lower_intrinsics.rs b/compiler/rustc_mir_transform/src/lower_intrinsics.rs index 3ffc447217d..6aa90394355 100644 --- a/compiler/rustc_mir_transform/src/lower_intrinsics.rs +++ b/compiler/rustc_mir_transform/src/lower_intrinsics.rs @@ -1,5 +1,6 @@ //! Lowers intrinsic calls +use crate::take_array; use rustc_middle::mir::*; use rustc_middle::ty::{self, TyCtxt}; use rustc_middle::{bug, span_bug}; @@ -50,42 +51,34 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics { } sym::copy_nonoverlapping => { let target = target.unwrap(); - let mut args = args.drain(..); + let Ok([src, dst, count]) = take_array(args) else { + bug!("Wrong arguments for copy_non_overlapping intrinsic"); + }; block.statements.push(Statement { source_info: terminator.source_info, kind: StatementKind::Intrinsic(Box::new( NonDivergingIntrinsic::CopyNonOverlapping( rustc_middle::mir::CopyNonOverlapping { - src: args.next().unwrap().node, - dst: args.next().unwrap().node, - count: args.next().unwrap().node, + src: src.node, + dst: dst.node, + count: count.node, }, ), )), }); - assert_eq!( - args.next(), - None, - "Extra argument for copy_non_overlapping intrinsic" - ); - drop(args); terminator.kind = TerminatorKind::Goto { target }; } sym::assume => { let target = target.unwrap(); - let mut args = args.drain(..); + let Ok([arg]) = take_array(args) else { + bug!("Wrong arguments for assume intrinsic"); + }; block.statements.push(Statement { source_info: terminator.source_info, kind: StatementKind::Intrinsic(Box::new( - NonDivergingIntrinsic::Assume(args.next().unwrap().node), + NonDivergingIntrinsic::Assume(arg.node), )), }); - assert_eq!( - args.next(), - None, - "Extra argument for copy_non_overlapping intrinsic" - ); - drop(args); terminator.kind = TerminatorKind::Goto { target }; } sym::wrapping_add @@ -100,13 +93,9 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics { | sym::unchecked_shl | sym::unchecked_shr => { let target = target.unwrap(); - let lhs; - let rhs; - { - let mut args = args.drain(..); - lhs = args.next().unwrap(); - rhs = args.next().unwrap(); - } + let Ok([lhs, rhs]) = take_array(args) else { + bug!("Wrong arguments for {} intrinsic", intrinsic.name); + }; let bin_op = match intrinsic.name { sym::wrapping_add => BinOp::Add, sym::wrapping_sub => BinOp::Sub, @@ -132,13 +121,9 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics { } sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => { if let Some(target) = *target { - let lhs; - let rhs; - { - let mut args = args.drain(..); - lhs = args.next().unwrap(); - rhs = args.next().unwrap(); - } + let Ok([lhs, rhs]) = take_array(args) else { + bug!("Wrong arguments for {} intrinsic", intrinsic.name); + }; let bin_op = match intrinsic.name { sym::add_with_overflow => BinOp::AddWithOverflow, sym::sub_with_overflow => BinOp::SubWithOverflow, @@ -174,7 +159,7 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics { } } sym::read_via_copy => { - let [arg] = args.as_slice() else { + let Ok([arg]) = take_array(args) else { span_bug!(terminator.source_info.span, "Wrong number of arguments"); }; let derefed_place = if let Some(place) = arg.node.place() @@ -207,7 +192,7 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics { } sym::write_via_move => { let target = target.unwrap(); - let Ok([ptr, val]) = <[_; 2]>::try_from(std::mem::take(args)) else { + let Ok([ptr, val]) = take_array(args) else { span_bug!( terminator.source_info.span, "Wrong number of arguments for write_via_move intrinsic", @@ -247,7 +232,7 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics { } sym::offset => { let target = target.unwrap(); - let Ok([ptr, delta]) = <[_; 2]>::try_from(std::mem::take(args)) else { + let Ok([ptr, delta]) = take_array(args) else { span_bug!( terminator.source_info.span, "Wrong number of arguments for offset intrinsic", @@ -264,7 +249,7 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics { } sym::transmute | sym::transmute_unchecked => { let dst_ty = destination.ty(local_decls, tcx).ty; - let Ok([arg]) = <[_; 1]>::try_from(std::mem::take(args)) else { + let Ok([arg]) = take_array(args) else { span_bug!( terminator.source_info.span, "Wrong number of arguments for transmute intrinsic", @@ -289,7 +274,7 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics { } } sym::aggregate_raw_ptr => { - let Ok([data, meta]) = <[_; 2]>::try_from(std::mem::take(args)) else { + let Ok([data, meta]) = take_array(args) else { span_bug!( terminator.source_info.span, "Wrong number of arguments for aggregate_raw_ptr intrinsic", @@ -317,7 +302,7 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics { terminator.kind = TerminatorKind::Goto { target }; } sym::ptr_metadata => { - let Ok([ptr]) = <[_; 1]>::try_from(std::mem::take(args)) else { + let Ok([ptr]) = take_array(args) else { span_bug!( terminator.source_info.span, "Wrong number of arguments for ptr_metadata intrinsic", diff --git a/compiler/rustc_mir_transform/src/lower_slice_len.rs b/compiler/rustc_mir_transform/src/lower_slice_len.rs index 2267a621a83..77a7f4f47dd 100644 --- a/compiler/rustc_mir_transform/src/lower_slice_len.rs +++ b/compiler/rustc_mir_transform/src/lower_slice_len.rs @@ -1,10 +1,9 @@ -//! This pass lowers calls to core::slice::len to just Len op. +//! This pass lowers calls to core::slice::len to just PtrMetadata op. //! It should run before inlining! use rustc_hir::def_id::DefId; -use rustc_index::IndexSlice; use rustc_middle::mir::*; -use rustc_middle::ty::{self, TyCtxt}; +use rustc_middle::ty::TyCtxt; pub struct LowerSliceLenCalls; @@ -29,16 +28,11 @@ pub fn lower_slice_len_calls<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let basic_blocks = body.basic_blocks.as_mut_preserves_cfg(); for block in basic_blocks { // lower `<[_]>::len` calls - lower_slice_len_call(tcx, block, &body.local_decls, slice_len_fn_item_def_id); + lower_slice_len_call(block, slice_len_fn_item_def_id); } } -fn lower_slice_len_call<'tcx>( - tcx: TyCtxt<'tcx>, - block: &mut BasicBlockData<'tcx>, - local_decls: &IndexSlice<Local, LocalDecl<'tcx>>, - slice_len_fn_item_def_id: DefId, -) { +fn lower_slice_len_call<'tcx>(block: &mut BasicBlockData<'tcx>, slice_len_fn_item_def_id: DefId) { let terminator = block.terminator(); if let TerminatorKind::Call { func, @@ -50,19 +44,17 @@ fn lower_slice_len_call<'tcx>( } = &terminator.kind // some heuristics for fast rejection && let [arg] = &args[..] - && let Some(arg) = arg.node.place() - && let ty::FnDef(fn_def_id, _) = func.ty(local_decls, tcx).kind() - && *fn_def_id == slice_len_fn_item_def_id + && let Some((fn_def_id, _)) = func.const_fn_def() + && fn_def_id == slice_len_fn_item_def_id { // perform modifications from something like: // _5 = core::slice::<impl [u8]>::len(move _6) -> bb1 // into: - // _5 = Len(*_6) + // _5 = PtrMetadata(move _6) // goto bb1 // make new RValue for Len - let deref_arg = tcx.mk_place_deref(arg); - let r_value = Rvalue::Len(deref_arg); + let r_value = Rvalue::UnaryOp(UnOp::PtrMetadata, arg.node.clone()); let len_statement_kind = StatementKind::Assign(Box::new((*destination, r_value))); let add_statement = Statement { kind: len_statement_kind, source_info: terminator.source_info }; diff --git a/compiler/rustc_mir_transform/src/normalize_array_len.rs b/compiler/rustc_mir_transform/src/normalize_array_len.rs deleted file mode 100644 index d5e72706661..00000000000 --- a/compiler/rustc_mir_transform/src/normalize_array_len.rs +++ /dev/null @@ -1,103 +0,0 @@ -//! This pass eliminates casting of arrays into slices when their length -//! is taken using `.len()` method. Handy to preserve information in MIR for const prop - -use crate::ssa::SsaLocals; -use rustc_index::IndexVec; -use rustc_middle::mir::visit::*; -use rustc_middle::mir::*; -use rustc_middle::ty::{self, TyCtxt}; - -pub struct NormalizeArrayLen; - -impl<'tcx> MirPass<'tcx> for NormalizeArrayLen { - fn is_enabled(&self, sess: &rustc_session::Session) -> bool { - sess.mir_opt_level() >= 3 - } - - #[instrument(level = "trace", skip(self, tcx, body))] - fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - debug!(def_id = ?body.source.def_id()); - normalize_array_len_calls(tcx, body) - } -} - -fn normalize_array_len_calls<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id()); - let ssa = SsaLocals::new(tcx, body, param_env); - - let slice_lengths = compute_slice_length(tcx, &ssa, body); - debug!(?slice_lengths); - - Replacer { tcx, slice_lengths }.visit_body_preserves_cfg(body); -} - -fn compute_slice_length<'tcx>( - tcx: TyCtxt<'tcx>, - ssa: &SsaLocals, - body: &Body<'tcx>, -) -> IndexVec<Local, Option<ty::Const<'tcx>>> { - let mut slice_lengths = IndexVec::from_elem(None, &body.local_decls); - - for (local, rvalue, _) in ssa.assignments(body) { - match rvalue { - Rvalue::Cast( - CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize), - operand, - cast_ty, - ) => { - let operand_ty = operand.ty(body, tcx); - debug!(?operand_ty); - if let Some(operand_ty) = operand_ty.builtin_deref(true) - && let ty::Array(_, len) = operand_ty.kind() - && let Some(cast_ty) = cast_ty.builtin_deref(true) - && let ty::Slice(..) = cast_ty.kind() - { - slice_lengths[local] = Some(*len); - } - } - // The length information is stored in the fat pointer, so we treat `operand` as a value. - Rvalue::Use(operand) => { - if let Some(rhs) = operand.place() - && let Some(rhs) = rhs.as_local() - { - slice_lengths[local] = slice_lengths[rhs]; - } - } - // The length information is stored in the fat pointer. - // Reborrowing copies length information from one pointer to the other. - Rvalue::Ref(_, _, rhs) | Rvalue::AddressOf(_, rhs) => { - if let [PlaceElem::Deref] = rhs.projection[..] { - slice_lengths[local] = slice_lengths[rhs.local]; - } - } - _ => {} - } - } - - slice_lengths -} - -struct Replacer<'tcx> { - tcx: TyCtxt<'tcx>, - slice_lengths: IndexVec<Local, Option<ty::Const<'tcx>>>, -} - -impl<'tcx> MutVisitor<'tcx> for Replacer<'tcx> { - fn tcx(&self) -> TyCtxt<'tcx> { - self.tcx - } - - fn visit_rvalue(&mut self, rvalue: &mut Rvalue<'tcx>, loc: Location) { - if let Rvalue::Len(place) = rvalue - && let [PlaceElem::Deref] = &place.projection[..] - && let Some(len) = self.slice_lengths[place.local] - { - *rvalue = Rvalue::Use(Operand::Constant(Box::new(ConstOperand { - span: rustc_span::DUMMY_SP, - user_ty: None, - const_: Const::from_ty_const(len, self.tcx.types.usize, self.tcx), - }))); - } - self.super_rvalue(rvalue, loc); - } -} diff --git a/compiler/rustc_mir_transform/src/promote_consts.rs b/compiler/rustc_mir_transform/src/promote_consts.rs index ecdca8292b4..3f4d2b65ff2 100644 --- a/compiler/rustc_mir_transform/src/promote_consts.rs +++ b/compiler/rustc_mir_transform/src/promote_consts.rs @@ -60,7 +60,7 @@ impl<'tcx> MirPass<'tcx> for PromoteTemps<'tcx> { let ccx = ConstCx::new(tcx, body); let (mut temps, all_candidates) = collect_temps_and_candidates(&ccx); - let promotable_candidates = validate_candidates(&ccx, &mut temps, &all_candidates); + let promotable_candidates = validate_candidates(&ccx, &mut temps, all_candidates); let promoted = promote_candidates(body, tcx, temps, promotable_candidates); self.promoted_fragments.set(promoted); @@ -98,8 +98,8 @@ struct Collector<'a, 'tcx> { } impl<'tcx> Visitor<'tcx> for Collector<'_, 'tcx> { + #[instrument(level = "debug", skip(self))] fn visit_local(&mut self, index: Local, context: PlaceContext, location: Location) { - debug!("visit_local: index={:?} context={:?} location={:?}", index, context, location); // We're only interested in temporaries and the return place match self.ccx.body.local_kind(index) { LocalKind::Arg => return, @@ -111,20 +111,15 @@ impl<'tcx> Visitor<'tcx> for Collector<'_, 'tcx> { // then it's constant and thus drop is noop. // Non-uses are also irrelevant. if context.is_drop() || !context.is_use() { - debug!( - "visit_local: context.is_drop={:?} context.is_use={:?}", - context.is_drop(), - context.is_use(), - ); + debug!(is_drop = context.is_drop(), is_use = context.is_use()); return; } let temp = &mut self.temps[index]; - debug!("visit_local: temp={:?}", temp); + debug!(?temp); *temp = match *temp { TempState::Undefined => match context { - PlaceContext::MutatingUse(MutatingUseContext::Store) - | PlaceContext::MutatingUse(MutatingUseContext::Call) => { + PlaceContext::MutatingUse(MutatingUseContext::Store | MutatingUseContext::Call) => { TempState::Defined { location, uses: 0, valid: Err(()) } } _ => TempState::Unpromotable, @@ -137,7 +132,7 @@ impl<'tcx> Visitor<'tcx> for Collector<'_, 'tcx> { | PlaceContext::NonMutatingUse(_) => true, PlaceContext::MutatingUse(_) | PlaceContext::NonUse(_) => false, }; - debug!("visit_local: allowed_use={:?}", allowed_use); + debug!(?allowed_use); if allowed_use { *uses += 1; return; @@ -146,6 +141,7 @@ impl<'tcx> Visitor<'tcx> for Collector<'_, 'tcx> { } TempState::Unpromotable | TempState::PromotedOut => TempState::Unpromotable, }; + debug!(?temp); } fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) { @@ -695,15 +691,12 @@ impl<'tcx> Validator<'_, 'tcx> { fn validate_candidates( ccx: &ConstCx<'_, '_>, temps: &mut IndexSlice<Local, TempState>, - candidates: &[Candidate], + mut candidates: Vec<Candidate>, ) -> Vec<Candidate> { let mut validator = Validator { ccx, temps, promotion_safe_blocks: None }; + candidates.retain(|&candidate| validator.validate_candidate(candidate).is_ok()); candidates - .iter() - .copied() - .filter(|&candidate| validator.validate_candidate(candidate).is_ok()) - .collect() } struct Promoter<'a, 'tcx> { @@ -972,7 +965,12 @@ fn promote_candidates<'tcx>( candidates: Vec<Candidate>, ) -> IndexVec<Promoted, Body<'tcx>> { // Visit candidates in reverse, in case they're nested. - debug!("promote_candidates({:?})", candidates); + debug!(promote_candidates = ?candidates); + + // eagerly fail fast + if candidates.is_empty() { + return IndexVec::new(); + } let mut promotions = IndexVec::new(); diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs index 825f8957187..25577e88e28 100644 --- a/compiler/rustc_mir_transform/src/shim.rs +++ b/compiler/rustc_mir_transform/src/shim.rs @@ -562,7 +562,7 @@ impl<'tcx> CloneShimBuilder<'tcx> { vec![statement], TerminatorKind::Call { func, - args: vec![Spanned { node: Operand::Move(ref_loc), span: DUMMY_SP }], + args: [Spanned { node: Operand::Move(ref_loc), span: DUMMY_SP }].into(), destination: dest, target: Some(next), unwind: UnwindAction::Cleanup(cleanup), @@ -843,7 +843,7 @@ fn build_call_shim<'tcx>( }; // BB #0 - let args = args.into_iter().map(|a| Spanned { node: a, span: DUMMY_SP }).collect::<Vec<_>>(); + let args = args.into_iter().map(|a| Spanned { node: a, span: DUMMY_SP }).collect(); block( &mut blocks, statements, diff --git a/compiler/rustc_mir_transform/src/validate.rs b/compiler/rustc_mir_transform/src/validate.rs index 2cca1a6f507..2018a8fe667 100644 --- a/compiler/rustc_mir_transform/src/validate.rs +++ b/compiler/rustc_mir_transform/src/validate.rs @@ -1116,12 +1116,17 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { UnOp::PtrMetadata => { if !matches!(self.mir_phase, MirPhase::Runtime(_)) { // It would probably be fine to support this in earlier phases, - // but at the time of writing it's only ever introduced from intrinsic lowering, + // but at the time of writing it's only ever introduced from intrinsic lowering + // or other runtime-phase optimization passes, // so earlier things can just `bug!` on it. self.fail(location, "PtrMetadata should be in runtime MIR only"); } - check_kinds!(a, "Cannot PtrMetadata non-pointer type {:?}", ty::RawPtr(..)); + check_kinds!( + a, + "Cannot PtrMetadata non-pointer non-reference type {:?}", + ty::RawPtr(..) | ty::Ref(..) + ); } } } @@ -1188,6 +1193,9 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { "CastKind::{kind:?} output must be a raw const pointer, not {:?}", ty::RawPtr(_, Mutability::Not) ); + if self.mir_phase >= MirPhase::Analysis(AnalysisPhase::PostCleanup) { + self.fail(location, format!("After borrowck, MIR disallows {kind:?}")); + } } CastKind::PointerCoercion(PointerCoercion::ArrayToPointer) => { // FIXME: Check pointee types @@ -1201,6 +1209,9 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { "CastKind::{kind:?} output must be a raw pointer, not {:?}", ty::RawPtr(..) ); + if self.mir_phase >= MirPhase::Analysis(AnalysisPhase::PostCleanup) { + self.fail(location, format!("After borrowck, MIR disallows {kind:?}")); + } } CastKind::PointerCoercion(PointerCoercion::Unsize) => { // This is used for all `CoerceUnsized` types, @@ -1212,7 +1223,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { if !input_valid || !target_valid { self.fail( location, - format!("Wrong cast kind {kind:?} for the type {op_ty}",), + format!("Wrong cast kind {kind:?} for the type {op_ty}"), ); } } |
