diff options
Diffstat (limited to 'compiler/rustc_mir_transform/src')
36 files changed, 545 insertions, 474 deletions
diff --git a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs index 2502e8b603c..d8f85d2e379 100644 --- a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs +++ b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs @@ -56,7 +56,7 @@ impl<'tcx> MirPass<'tcx> for AbortUnwindingCalls { // example. let mut calls_to_terminate = Vec::new(); let mut cleanups_to_remove = Vec::new(); - for (id, block) in body.basic_blocks().iter_enumerated() { + for (id, block) in body.basic_blocks.iter_enumerated() { if block.is_cleanup { continue; } diff --git a/compiler/rustc_mir_transform/src/add_call_guards.rs b/compiler/rustc_mir_transform/src/add_call_guards.rs index f12c8560c0e..30966d22e2f 100644 --- a/compiler/rustc_mir_transform/src/add_call_guards.rs +++ b/compiler/rustc_mir_transform/src/add_call_guards.rs @@ -45,7 +45,7 @@ impl AddCallGuards { // We need a place to store the new blocks generated let mut new_blocks = Vec::new(); - let cur_len = body.basic_blocks().len(); + let cur_len = body.basic_blocks.len(); for block in body.basic_blocks_mut() { match block.terminator { diff --git a/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs b/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs index 8de0aad041c..ffb5d8c6d95 100644 --- a/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs +++ b/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs @@ -55,7 +55,7 @@ fn add_moves_for_packed_drops_patch<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) let mut patch = MirPatch::new(body); let param_env = tcx.param_env(def_id); - for (bb, data) in body.basic_blocks().iter_enumerated() { + for (bb, data) in body.basic_blocks.iter_enumerated() { let loc = Location { block: bb, statement_index: data.statements.len() }; let terminator = data.terminator(); diff --git a/compiler/rustc_mir_transform/src/add_retag.rs b/compiler/rustc_mir_transform/src/add_retag.rs index 9c5896c4e4a..036b5589849 100644 --- a/compiler/rustc_mir_transform/src/add_retag.rs +++ b/compiler/rustc_mir_transform/src/add_retag.rs @@ -66,7 +66,6 @@ impl<'tcx> MirPass<'tcx> for AddRetag { // We need an `AllCallEdges` pass before we can do any work. super::add_call_guards::AllCallEdges.run_pass(tcx, body); - let (span, arg_count) = (body.span, body.arg_count); let basic_blocks = body.basic_blocks.as_mut(); let local_decls = &body.local_decls; let needs_retag = |place: &Place<'tcx>| { @@ -90,20 +89,18 @@ impl<'tcx> MirPass<'tcx> for AddRetag { // PART 1 // Retag arguments at the beginning of the start block. { - // FIXME: Consider using just the span covering the function - // argument declaration. - let source_info = SourceInfo::outermost(span); // Gather all arguments, skip return value. - let places = local_decls - .iter_enumerated() - .skip(1) - .take(arg_count) - .map(|(local, _)| Place::from(local)) - .filter(needs_retag); + let places = local_decls.iter_enumerated().skip(1).take(body.arg_count).filter_map( + |(local, decl)| { + let place = Place::from(local); + needs_retag(&place).then_some((place, decl.source_info)) + }, + ); + // Emit their retags. basic_blocks[START_BLOCK].statements.splice( 0..0, - places.map(|place| Statement { + places.map(|(place, source_info)| Statement { source_info, kind: StatementKind::Retag(RetagKind::FnEntry, Box::new(place)), }), diff --git a/compiler/rustc_mir_transform/src/const_goto.rs b/compiler/rustc_mir_transform/src/const_goto.rs index 5acf939f06b..0a305a40209 100644 --- a/compiler/rustc_mir_transform/src/const_goto.rs +++ b/compiler/rustc_mir_transform/src/const_goto.rs @@ -61,14 +61,14 @@ impl<'tcx> Visitor<'tcx> for ConstGotoOptimizationFinder<'_, 'tcx> { let _: Option<_> = try { let target = terminator.kind.as_goto()?; // We only apply this optimization if the last statement is a const assignment - let last_statement = self.body.basic_blocks()[location.block].statements.last()?; + let last_statement = self.body.basic_blocks[location.block].statements.last()?; if let (place, Rvalue::Use(Operand::Constant(_const))) = last_statement.kind.as_assign()? { // We found a constant being assigned to `place`. // Now check that the target of this Goto switches on this place. - let target_bb = &self.body.basic_blocks()[target]; + let target_bb = &self.body.basic_blocks[target]; // The `StorageDead(..)` statement does not affect the functionality of mir. // We can move this part of the statement up to the predecessor. diff --git a/compiler/rustc_mir_transform/src/const_prop.rs b/compiler/rustc_mir_transform/src/const_prop.rs index 1c087b93b49..53f33a7a0ba 100644 --- a/compiler/rustc_mir_transform/src/const_prop.rs +++ b/compiler/rustc_mir_transform/src/const_prop.rs @@ -28,7 +28,7 @@ use crate::MirPass; use rustc_const_eval::interpret::{ self, compile_time_machine, AllocId, ConstAllocation, ConstValue, CtfeValidationMode, Frame, ImmTy, Immediate, InterpCx, InterpResult, LocalState, LocalValue, MemoryKind, OpTy, PlaceTy, - Pointer, Scalar, ScalarMaybeUninit, StackPopCleanup, StackPopUnwind, + Pointer, Scalar, StackPopCleanup, StackPopUnwind, }; /// The maximum number of bytes that we'll allocate space for a local or the return value. @@ -131,7 +131,7 @@ impl<'tcx> MirPass<'tcx> for ConstProp { let dummy_body = &Body::new( body.source, - body.basic_blocks().clone(), + (*body.basic_blocks).clone(), body.source_scopes.clone(), body.local_decls.clone(), Default::default(), @@ -243,24 +243,6 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx> throw_machine_stop_str!("pointer arithmetic or comparisons aren't supported in ConstProp") } - fn access_local<'a>( - frame: &'a Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>, - local: Local, - ) -> InterpResult<'tcx, &'a interpret::Operand<Self::Provenance>> { - let l = &frame.locals[local]; - - if matches!( - l.value, - LocalValue::Live(interpret::Operand::Immediate(interpret::Immediate::Uninit)) - ) { - // For us "uninit" means "we don't know its value, might be initiailized or not". - // So stop here. - throw_machine_stop_str!("tried to access alocal with unknown value ") - } - - l.access() - } - fn access_local_mut<'a>( ecx: &'a mut InterpCx<'mir, 'tcx, Self>, frame: usize, @@ -431,7 +413,13 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { fn get_const(&self, place: Place<'tcx>) -> Option<OpTy<'tcx>> { let op = match self.ecx.eval_place_to_op(place, None) { - Ok(op) => op, + Ok(op) => { + if matches!(*op, interpret::Operand::Immediate(Immediate::Uninit)) { + // Make sure nobody accidentally uses this value. + return None; + } + op + } Err(e) => { trace!("get_const failed: {}", e); return None; @@ -440,7 +428,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { // Try to read the local as an immediate so that if it is representable as a scalar, we can // handle it as such, but otherwise, just return the value as is. - Some(match self.ecx.read_immediate_raw(&op, /*force*/ false) { + Some(match self.ecx.read_immediate_raw(&op) { Ok(Ok(imm)) => imm.into(), _ => op, }) @@ -532,8 +520,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { let left_ty = left.ty(self.local_decls, self.tcx); let left_size = self.ecx.layout_of(left_ty).ok()?.size; let right_size = r.layout.size; - let r_bits = r.to_scalar().ok(); - let r_bits = r_bits.and_then(|r| r.to_bits(right_size).ok()); + let r_bits = r.to_scalar().to_bits(right_size).ok(); if r_bits.map_or(false, |b| b >= left_size.bits() as u128) { return None; } @@ -562,7 +549,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { // and use it to do const-prop here and everywhere else // where it makes sense. if let interpret::Operand::Immediate(interpret::Immediate::Scalar( - ScalarMaybeUninit::Scalar(scalar), + scalar, )) = *value { *operand = self.operand_from_scalar( @@ -644,6 +631,14 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { if rvalue.needs_subst() { return None; } + if !rvalue + .ty(&self.ecx.frame().body.local_decls, *self.ecx.tcx) + .is_sized(self.ecx.tcx, self.param_env) + { + // the interpreter doesn't support unsized locals (only unsized arguments), + // but rustc does (in a kinda broken way), so we have to skip them here + return None; + } if self.tcx.sess.mir_opt_level() >= 4 { self.eval_rvalue_with_identities(rvalue, place) @@ -661,21 +656,23 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { self.use_ecx(|this| match rvalue { Rvalue::BinaryOp(op, box (left, right)) | Rvalue::CheckedBinaryOp(op, box (left, right)) => { - let l = this.ecx.eval_operand(left, None); - let r = this.ecx.eval_operand(right, None); + let l = this.ecx.eval_operand(left, None).and_then(|x| this.ecx.read_immediate(&x)); + let r = + this.ecx.eval_operand(right, None).and_then(|x| this.ecx.read_immediate(&x)); let const_arg = match (l, r) { - (Ok(ref x), Err(_)) | (Err(_), Ok(ref x)) => this.ecx.read_immediate(x)?, - (Err(e), Err(_)) => return Err(e), - (Ok(_), Ok(_)) => return this.ecx.eval_rvalue_into_place(rvalue, place), + (Ok(x), Err(_)) | (Err(_), Ok(x)) => x, // exactly one side is known + (Err(e), Err(_)) => return Err(e), // neither side is known + (Ok(_), Ok(_)) => return this.ecx.eval_rvalue_into_place(rvalue, place), // both sides are known }; if !matches!(const_arg.layout.abi, abi::Abi::Scalar(..)) { // We cannot handle Scalar Pair stuff. - return this.ecx.eval_rvalue_into_place(rvalue, place); + // No point in calling `eval_rvalue_into_place`, since only one side is known + throw_machine_stop_str!("cannot optimize this") } - let arg_value = const_arg.to_scalar()?.to_bits(const_arg.layout.size)?; + let arg_value = const_arg.to_scalar().to_bits(const_arg.layout.size)?; let dest = this.ecx.eval_place(place)?; match op { @@ -689,7 +686,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { BinOp::Mul if const_arg.layout.ty.is_integral() && arg_value == 0 => { if let Rvalue::CheckedBinaryOp(_, _) = rvalue { let val = Immediate::ScalarPair( - const_arg.to_scalar()?.into(), + const_arg.to_scalar().into(), Scalar::from_bool(false).into(), ); this.ecx.write_immediate(val, &dest) @@ -697,7 +694,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { this.ecx.write_immediate(*const_arg, &dest) } } - _ => this.ecx.eval_rvalue_into_place(rvalue, place), + _ => throw_machine_stop_str!("cannot optimize this"), } } _ => this.ecx.eval_rvalue_into_place(rvalue, place), @@ -743,21 +740,18 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { } // FIXME> figure out what to do when read_immediate_raw fails - let imm = self.use_ecx(|this| this.ecx.read_immediate_raw(value, /*force*/ false)); + let imm = self.use_ecx(|this| this.ecx.read_immediate_raw(value)); if let Some(Ok(imm)) = imm { match *imm { - interpret::Immediate::Scalar(ScalarMaybeUninit::Scalar(scalar)) => { + interpret::Immediate::Scalar(scalar) => { *rval = Rvalue::Use(self.operand_from_scalar( scalar, value.layout.ty, source_info.span, )); } - Immediate::ScalarPair( - ScalarMaybeUninit::Scalar(_), - ScalarMaybeUninit::Scalar(_), - ) => { + Immediate::ScalarPair(..) => { // Found a value represented as a pair. For now only do const-prop if the type // of `rvalue` is also a tuple with two scalars. // FIXME: enable the general case stated above ^. @@ -812,13 +806,10 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { } match **op { - interpret::Operand::Immediate(Immediate::Scalar(ScalarMaybeUninit::Scalar(s))) => { - s.try_to_int().is_ok() + interpret::Operand::Immediate(Immediate::Scalar(s)) => s.try_to_int().is_ok(), + interpret::Operand::Immediate(Immediate::ScalarPair(l, r)) => { + l.try_to_int().is_ok() && r.try_to_int().is_ok() } - interpret::Operand::Immediate(Immediate::ScalarPair( - ScalarMaybeUninit::Scalar(l), - ScalarMaybeUninit::Scalar(r), - )) => l.try_to_int().is_ok() && r.try_to_int().is_ok(), _ => false, } } @@ -1079,8 +1070,12 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> { TerminatorKind::Assert { expected, ref mut cond, .. } => { if let Some(ref value) = self.eval_operand(&cond) { trace!("assertion on {:?} should be {:?}", value, expected); - let expected = ScalarMaybeUninit::from(Scalar::from_bool(*expected)); - let value_const = self.ecx.read_scalar(&value).unwrap(); + let expected = Scalar::from_bool(*expected); + let Ok(value_const) = self.ecx.read_scalar(&value) else { + // FIXME should be used use_ecx rather than a local match... but we have + // quite a few of these read_scalar/read_immediate that need fixing. + return + }; if expected != value_const { // Poison all places this operand references so that further code // doesn't use the invalid value @@ -1092,13 +1087,11 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> { } } else { if self.should_const_prop(value) { - if let ScalarMaybeUninit::Scalar(scalar) = value_const { - *cond = self.operand_from_scalar( - scalar, - self.tcx.types.bool, - source_info.span, - ); - } + *cond = self.operand_from_scalar( + value_const, + self.tcx.types.bool, + source_info.span, + ); } } } diff --git a/compiler/rustc_mir_transform/src/const_prop_lint.rs b/compiler/rustc_mir_transform/src/const_prop_lint.rs index c2ea55af48a..082d6c9f07e 100644 --- a/compiler/rustc_mir_transform/src/const_prop_lint.rs +++ b/compiler/rustc_mir_transform/src/const_prop_lint.rs @@ -6,9 +6,9 @@ use crate::const_prop::ConstPropMachine; use crate::const_prop::ConstPropMode; use crate::MirLint; use rustc_const_eval::const_eval::ConstEvalErr; +use rustc_const_eval::interpret::Immediate; use rustc_const_eval::interpret::{ - self, InterpCx, InterpResult, LocalState, LocalValue, MemoryKind, OpTy, Scalar, - ScalarMaybeUninit, StackPopCleanup, + self, InterpCx, InterpResult, LocalState, LocalValue, MemoryKind, OpTy, Scalar, StackPopCleanup, }; use rustc_hir::def::DefKind; use rustc_hir::HirId; @@ -106,7 +106,7 @@ impl<'tcx> MirLint<'tcx> for ConstProp { let dummy_body = &Body::new( body.source, - body.basic_blocks().clone(), + (*body.basic_blocks).clone(), body.source_scopes.clone(), body.local_decls.clone(), Default::default(), @@ -230,7 +230,13 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { fn get_const(&self, place: Place<'tcx>) -> Option<OpTy<'tcx>> { let op = match self.ecx.eval_place_to_op(place, None) { - Ok(op) => op, + Ok(op) => { + if matches!(*op, interpret::Operand::Immediate(Immediate::Uninit)) { + // Make sure nobody accidentally uses this value. + return None; + } + op + } Err(e) => { trace!("get_const failed: {}", e); return None; @@ -239,7 +245,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { // Try to read the local as an immediate so that if it is representable as a scalar, we can // handle it as such, but otherwise, just return the value as is. - Some(match self.ecx.read_immediate_raw(&op, /*force*/ false) { + Some(match self.ecx.read_immediate_raw(&op) { Ok(Ok(imm)) => imm.into(), _ => op, }) @@ -401,8 +407,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { let left_ty = left.ty(self.local_decls, self.tcx); let left_size = self.ecx.layout_of(left_ty).ok()?.size; let right_size = r.layout.size; - let r_bits = r.to_scalar().ok(); - let r_bits = r_bits.and_then(|r| r.to_bits(right_size).ok()); + let r_bits = r.to_scalar().to_bits(right_size).ok(); if r_bits.map_or(false, |b| b >= left_size.bits() as u128) { debug!("check_binary_op: reporting assert for {:?}", source_info); self.report_assert_as_lint( @@ -517,6 +522,14 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { if rvalue.needs_subst() { return None; } + if !rvalue + .ty(&self.ecx.frame().body.local_decls, *self.ecx.tcx) + .is_sized(self.ecx.tcx, self.param_env) + { + // the interpreter doesn't support unsized locals (only unsized arguments), + // but rustc does (in a kinda broken way), so we have to skip them here + return None; + } self.use_ecx(source_info, |this| this.ecx.eval_rvalue_into_place(rvalue, place)) } @@ -524,7 +537,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> { fn visit_body(&mut self, body: &Body<'tcx>) { - for (bb, data) in body.basic_blocks().iter_enumerated() { + for (bb, data) in body.basic_blocks.iter_enumerated() { self.visit_basic_block_data(bb, data); } } @@ -625,8 +638,12 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> { TerminatorKind::Assert { expected, ref msg, ref cond, .. } => { if let Some(ref value) = self.eval_operand(&cond, source_info) { trace!("assertion on {:?} should be {:?}", value, expected); - let expected = ScalarMaybeUninit::from(Scalar::from_bool(*expected)); - let value_const = self.ecx.read_scalar(&value).unwrap(); + let expected = Scalar::from_bool(*expected); + let Ok(value_const) = self.ecx.read_scalar(&value) else { + // FIXME should be used use_ecx rather than a local match... but we have + // quite a few of these read_scalar/read_immediate that need fixing. + return + }; if expected != value_const { enum DbgVal<T> { Val(T), @@ -643,9 +660,9 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> { let mut eval_to_int = |op| { // This can be `None` if the lhs wasn't const propagated and we just // triggered the assert on the value of the rhs. - self.eval_operand(op, source_info).map_or(DbgVal::Underscore, |op| { - DbgVal::Val(self.ecx.read_immediate(&op).unwrap().to_const_int()) - }) + self.eval_operand(op, source_info) + .and_then(|op| self.ecx.read_immediate(&op).ok()) + .map_or(DbgVal::Underscore, |op| DbgVal::Val(op.to_const_int())) }; let msg = match msg { AssertKind::DivisionByZero(op) => { diff --git a/compiler/rustc_mir_transform/src/coverage/graph.rs b/compiler/rustc_mir_transform/src/coverage/graph.rs index 759ea7cd328..782129be088 100644 --- a/compiler/rustc_mir_transform/src/coverage/graph.rs +++ b/compiler/rustc_mir_transform/src/coverage/graph.rs @@ -713,7 +713,7 @@ impl< ShortCircuitPreorder { body, - visited: BitSet::new_empty(body.basic_blocks().len()), + visited: BitSet::new_empty(body.basic_blocks.len()), worklist, filtered_successors, } @@ -747,7 +747,7 @@ impl< } fn size_hint(&self) -> (usize, Option<usize>) { - let size = self.body.basic_blocks().len() - self.visited.count(); + let size = self.body.basic_blocks.len() - self.visited.count(); (size, Some(size)) } } diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs index 2619626a567..60481014488 100644 --- a/compiler/rustc_mir_transform/src/coverage/mod.rs +++ b/compiler/rustc_mir_transform/src/coverage/mod.rs @@ -80,7 +80,7 @@ impl<'tcx> MirPass<'tcx> for InstrumentCoverage { return; } - match mir_body.basic_blocks()[mir::START_BLOCK].terminator().kind { + match mir_body.basic_blocks[mir::START_BLOCK].terminator().kind { TerminatorKind::Unreachable => { trace!("InstrumentCoverage skipped for unreachable `START_BLOCK`"); return; @@ -541,7 +541,7 @@ fn fn_sig_and_body<'tcx>( // to HIR for it. let hir_node = tcx.hir().get_if_local(def_id).expect("expected DefId is local"); let fn_body_id = hir::map::associated_body(hir_node).expect("HIR node is a function with body"); - (hir::map::fn_sig(hir_node), tcx.hir().body(fn_body_id)) + (hir_node.fn_sig(), tcx.hir().body(fn_body_id)) } fn get_body_span<'tcx>( diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs index 9d02f58ae65..dc1e68b253e 100644 --- a/compiler/rustc_mir_transform/src/coverage/query.rs +++ b/compiler/rustc_mir_transform/src/coverage/query.rs @@ -84,7 +84,7 @@ impl CoverageVisitor { } fn visit_body(&mut self, body: &Body<'_>) { - for bb_data in body.basic_blocks().iter() { + for bb_data in body.basic_blocks.iter() { for statement in bb_data.statements.iter() { if let StatementKind::Coverage(box ref coverage) = statement.kind { if is_inlined(body, statement) { @@ -138,7 +138,7 @@ fn coverageinfo<'tcx>(tcx: TyCtxt<'tcx>, instance_def: ty::InstanceDef<'tcx>) -> fn covered_code_regions<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> Vec<&'tcx CodeRegion> { let body = mir_body(tcx, def_id); - body.basic_blocks() + body.basic_blocks .iter() .flat_map(|data| { data.statements.iter().filter_map(|statement| match statement.kind { diff --git a/compiler/rustc_mir_transform/src/coverage/tests.rs b/compiler/rustc_mir_transform/src/coverage/tests.rs index 6380f03528a..9c9ed5fa510 100644 --- a/compiler/rustc_mir_transform/src/coverage/tests.rs +++ b/compiler/rustc_mir_transform/src/coverage/tests.rs @@ -176,7 +176,7 @@ fn debug_basic_blocks<'tcx>(mir_body: &Body<'tcx>) -> String { format!( "{:?}", mir_body - .basic_blocks() + .basic_blocks .iter_enumerated() .map(|(bb, data)| { let term = &data.terminator(); @@ -213,7 +213,7 @@ fn print_mir_graphviz(name: &str, mir_body: &Body<'_>) { "digraph {} {{\n{}\n}}", name, mir_body - .basic_blocks() + .basic_blocks .iter_enumerated() .map(|(bb, data)| { format!( @@ -653,7 +653,7 @@ fn test_traverse_coverage_with_loops() { fn synthesize_body_span_from_terminators(mir_body: &Body<'_>) -> Span { let mut some_span: Option<Span> = None; - for (_, data) in mir_body.basic_blocks().iter_enumerated() { + for (_, data) in mir_body.basic_blocks.iter_enumerated() { let term_span = data.terminator().source_info.span; if let Some(span) = some_span.as_mut() { *span = span.to(term_span); diff --git a/compiler/rustc_mir_transform/src/deaggregator.rs b/compiler/rustc_mir_transform/src/deaggregator.rs index b93fe5879f4..fe272de20f8 100644 --- a/compiler/rustc_mir_transform/src/deaggregator.rs +++ b/compiler/rustc_mir_transform/src/deaggregator.rs @@ -6,10 +6,6 @@ use rustc_middle::ty::TyCtxt; pub struct Deaggregator; impl<'tcx> MirPass<'tcx> for Deaggregator { - fn phase_change(&self) -> Option<MirPhase> { - Some(MirPhase::Deaggregated) - } - fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let basic_blocks = body.basic_blocks.as_mut_preserves_cfg(); for bb in basic_blocks { diff --git a/compiler/rustc_mir_transform/src/deduplicate_blocks.rs b/compiler/rustc_mir_transform/src/deduplicate_blocks.rs index d1977ed49fe..909116a77f5 100644 --- a/compiler/rustc_mir_transform/src/deduplicate_blocks.rs +++ b/compiler/rustc_mir_transform/src/deduplicate_blocks.rs @@ -58,7 +58,7 @@ fn find_duplicates(body: &Body<'_>) -> FxHashMap<BasicBlock, BasicBlock> { let mut duplicates = FxHashMap::default(); let bbs_to_go_through = - body.basic_blocks().iter_enumerated().filter(|(_, bbd)| !bbd.is_cleanup).count(); + body.basic_blocks.iter_enumerated().filter(|(_, bbd)| !bbd.is_cleanup).count(); let mut same_hashes = FxHashMap::with_capacity_and_hasher(bbs_to_go_through, Default::default()); @@ -71,8 +71,7 @@ fn find_duplicates(body: &Body<'_>) -> FxHashMap<BasicBlock, BasicBlock> { // When we see bb1, we see that it is a duplicate of bb3, and therefore insert it in the duplicates list // with replacement bb3. // When the duplicates are removed, we will end up with only bb3. - for (bb, bbd) in body.basic_blocks().iter_enumerated().rev().filter(|(_, bbd)| !bbd.is_cleanup) - { + for (bb, bbd) in body.basic_blocks.iter_enumerated().rev().filter(|(_, bbd)| !bbd.is_cleanup) { // Basic blocks can get really big, so to avoid checking for duplicates in basic blocks // that are unlikely to have duplicates, we stop early. The early bail number has been // found experimentally by eprintln while compiling the crates in the rustc-perf suite. diff --git a/compiler/rustc_mir_transform/src/deref_separator.rs b/compiler/rustc_mir_transform/src/deref_separator.rs index 87d7b664015..7508df92df1 100644 --- a/compiler/rustc_mir_transform/src/deref_separator.rs +++ b/compiler/rustc_mir_transform/src/deref_separator.rs @@ -28,8 +28,6 @@ impl<'tcx> MutVisitor<'tcx> for DerefChecker<'tcx> { let mut last_len = 0; let mut last_deref_idx = 0; - let mut prev_temp: Option<Local> = None; - for (idx, elem) in place.projection[0..].iter().enumerate() { if *elem == ProjectionElem::Deref { last_deref_idx = idx; @@ -39,14 +37,12 @@ impl<'tcx> MutVisitor<'tcx> for DerefChecker<'tcx> { for (idx, (p_ref, p_elem)) in place.iter_projections().enumerate() { if !p_ref.projection.is_empty() && p_elem == ProjectionElem::Deref { let ty = p_ref.ty(&self.local_decls, self.tcx).ty; - let temp = self.patcher.new_local_with_info( + let temp = self.patcher.new_internal_with_info( ty, self.local_decls[p_ref.local].source_info.span, Some(Box::new(LocalInfo::DerefTemp)), ); - self.patcher.add_statement(loc, StatementKind::StorageLive(temp)); - // We are adding current p_ref's projections to our // temp value, excluding projections we already covered. let deref_place = Place::from(place_local) @@ -66,22 +62,8 @@ impl<'tcx> MutVisitor<'tcx> for DerefChecker<'tcx> { Place::from(temp).project_deeper(&place.projection[idx..], self.tcx); *place = temp_place; } - - // We are destroying the previous temp since it's no longer used. - if let Some(prev_temp) = prev_temp { - self.patcher.add_statement(loc, StatementKind::StorageDead(prev_temp)); - } - - prev_temp = Some(temp); } } - - // Since we won't be able to reach final temp, we destroy it outside the loop. - if let Some(prev_temp) = prev_temp { - let last_loc = - Location { block: loc.block, statement_index: loc.statement_index + 1 }; - self.patcher.add_statement(last_loc, StatementKind::StorageDead(prev_temp)); - } } } } @@ -100,6 +82,5 @@ pub fn deref_finder<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { impl<'tcx> MirPass<'tcx> for Derefer { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { deref_finder(tcx, body); - body.phase = MirPhase::Derefered; } } diff --git a/compiler/rustc_mir_transform/src/dest_prop.rs b/compiler/rustc_mir_transform/src/dest_prop.rs index 33572068f5c..da55510920e 100644 --- a/compiler/rustc_mir_transform/src/dest_prop.rs +++ b/compiler/rustc_mir_transform/src/dest_prop.rs @@ -150,7 +150,7 @@ impl<'tcx> MirPass<'tcx> for DestinationPropagation { def_id, body.local_decls.len(), relevant, - body.basic_blocks().len() + body.basic_blocks.len() ); if relevant > MAX_LOCALS { warn!( @@ -159,11 +159,11 @@ impl<'tcx> MirPass<'tcx> for DestinationPropagation { ); return; } - if body.basic_blocks().len() > MAX_BLOCKS { + if body.basic_blocks.len() > MAX_BLOCKS { warn!( "too many blocks in {:?} ({}, max is {}), not optimizing", def_id, - body.basic_blocks().len(), + body.basic_blocks.len(), MAX_BLOCKS ); return; diff --git a/compiler/rustc_mir_transform/src/early_otherwise_branch.rs b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs index dba42f7aff0..32e738bbcea 100644 --- a/compiler/rustc_mir_transform/src/early_otherwise_branch.rs +++ b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs @@ -104,8 +104,8 @@ impl<'tcx> MirPass<'tcx> for EarlyOtherwiseBranch { let mut should_cleanup = false; // Also consider newly generated bbs in the same pass - for i in 0..body.basic_blocks().len() { - let bbs = body.basic_blocks(); + for i in 0..body.basic_blocks.len() { + let bbs = &*body.basic_blocks; let parent = BasicBlock::from_usize(i); let Some(opt_data) = evaluate_candidate(tcx, body, parent) else { continue @@ -316,7 +316,7 @@ fn evaluate_candidate<'tcx>( body: &Body<'tcx>, parent: BasicBlock, ) -> Option<OptimizationData<'tcx>> { - let bbs = body.basic_blocks(); + let bbs = &body.basic_blocks; let TerminatorKind::SwitchInt { targets, switch_ty: parent_ty, diff --git a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs index 76522233689..294af2455d0 100644 --- a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs +++ b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs @@ -69,9 +69,7 @@ impl<'tcx, 'a> MutVisitor<'tcx> for ElaborateBoxDerefVisitor<'tcx, 'a> { let (unique_ty, nonnull_ty, ptr_ty) = build_ptr_tys(tcx, base_ty.boxed_ty(), self.unique_did, self.nonnull_did); - let ptr_local = self.patch.new_temp(ptr_ty, source_info.span); - - self.patch.add_statement(location, StatementKind::StorageLive(ptr_local)); + let ptr_local = self.patch.new_internal(ptr_ty, source_info.span); self.patch.add_assign( location, @@ -83,11 +81,6 @@ impl<'tcx, 'a> MutVisitor<'tcx> for ElaborateBoxDerefVisitor<'tcx, 'a> { ); place.local = ptr_local; - - self.patch.add_statement( - Location { block: location.block, statement_index: location.statement_index + 1 }, - StatementKind::StorageDead(ptr_local), - ); } self.super_place(place, context, location); @@ -114,27 +107,8 @@ impl<'tcx> MirPass<'tcx> for ElaborateBoxDerefs { let mut visitor = ElaborateBoxDerefVisitor { tcx, unique_did, nonnull_did, local_decls, patch }; - for (block, BasicBlockData { statements, terminator, .. }) in - body.basic_blocks.as_mut_preserves_cfg().iter_enumerated_mut() - { - let mut index = 0; - for statement in statements { - let location = Location { block, statement_index: index }; - visitor.visit_statement(statement, location); - index += 1; - } - - let location = Location { block, statement_index: index }; - match terminator { - // yielding into a box is handled when lowering generators - Some(Terminator { kind: TerminatorKind::Yield { value, .. }, .. }) => { - visitor.visit_operand(value, location); - } - Some(terminator) => { - visitor.visit_terminator(terminator, location); - } - None => {} - } + for (block, data) in body.basic_blocks.as_mut_preserves_cfg().iter_enumerated_mut() { + visitor.visit_basic_block_data(block, data); } visitor.patch.apply(body); diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs index 9c1fcbaa69d..65f4956d23a 100644 --- a/compiler/rustc_mir_transform/src/elaborate_drops.rs +++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs @@ -21,10 +21,6 @@ use std::fmt; pub struct ElaborateDrops; impl<'tcx> MirPass<'tcx> for ElaborateDrops { - fn phase_change(&self) -> Option<MirPhase> { - Some(MirPhase::DropsLowered) - } - fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { debug!("elaborate_drops({:?} @ {:?})", body.source, body.span); @@ -89,13 +85,13 @@ fn find_dead_unwinds<'tcx>( debug!("find_dead_unwinds({:?})", body.span); // We only need to do this pass once, because unwind edges can only // reach cleanup blocks, which can't have unwind edges themselves. - let mut dead_unwinds = BitSet::new_empty(body.basic_blocks().len()); + let mut dead_unwinds = BitSet::new_empty(body.basic_blocks.len()); let mut flow_inits = MaybeInitializedPlaces::new(tcx, body, &env) .into_engine(tcx, body) .pass_name("find_dead_unwinds") .iterate_to_fixpoint() .into_results_cursor(body); - for (bb, bb_data) in body.basic_blocks().iter_enumerated() { + for (bb, bb_data) in body.basic_blocks.iter_enumerated() { let place = match bb_data.terminator().kind { TerminatorKind::Drop { ref place, unwind: Some(_), .. } | TerminatorKind::DropAndReplace { ref place, unwind: Some(_), .. } => { @@ -303,7 +299,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { } fn collect_drop_flags(&mut self) { - for (bb, data) in self.body.basic_blocks().iter_enumerated() { + for (bb, data) in self.body.basic_blocks.iter_enumerated() { let terminator = data.terminator(); let place = match terminator.kind { TerminatorKind::Drop { ref place, .. } @@ -358,7 +354,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { } fn elaborate_drops(&mut self) { - for (bb, data) in self.body.basic_blocks().iter_enumerated() { + for (bb, data) in self.body.basic_blocks.iter_enumerated() { let loc = Location { block: bb, statement_index: data.statements.len() }; let terminator = data.terminator(); @@ -515,7 +511,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { } fn drop_flags_for_fn_rets(&mut self) { - for (bb, data) in self.body.basic_blocks().iter_enumerated() { + for (bb, data) in self.body.basic_blocks.iter_enumerated() { if let TerminatorKind::Call { destination, target: Some(tgt), cleanup: Some(_), .. } = data.terminator().kind @@ -550,7 +546,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { // drop flags by themselves, to avoid the drop flags being // clobbered before they are read. - for (bb, data) in self.body.basic_blocks().iter_enumerated() { + for (bb, data) in self.body.basic_blocks.iter_enumerated() { debug!("drop_flags_for_locs({:?})", data); for i in 0..(data.statements.len() + 1) { debug!("drop_flag_for_locs: stmt {}", i); diff --git a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs index 7728fdaffb0..7522a50a8c6 100644 --- a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs +++ b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs @@ -65,7 +65,7 @@ fn has_ffi_unwind_calls(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> bool { let mut tainted = false; - for block in body.basic_blocks() { + for block in body.basic_blocks.iter() { if block.is_cleanup { continue; } diff --git a/compiler/rustc_mir_transform/src/generator.rs b/compiler/rustc_mir_transform/src/generator.rs index 91ecf387922..dbff4a6bd69 100644 --- a/compiler/rustc_mir_transform/src/generator.rs +++ b/compiler/rustc_mir_transform/src/generator.rs @@ -490,12 +490,12 @@ fn locals_live_across_suspend_points<'tcx>( .iterate_to_fixpoint() .into_results_cursor(body_ref); - let mut storage_liveness_map = IndexVec::from_elem(None, body.basic_blocks()); + let mut storage_liveness_map = IndexVec::from_elem(None, &body.basic_blocks); let mut live_locals_at_suspension_points = Vec::new(); let mut source_info_at_suspension_points = Vec::new(); let mut live_locals_at_any_suspension_point = BitSet::new_empty(body.local_decls.len()); - for (block, data) in body.basic_blocks().iter_enumerated() { + for (block, data) in body.basic_blocks.iter_enumerated() { if let TerminatorKind::Yield { .. } = data.terminator().kind { let loc = Location { block, statement_index: data.statements.len() }; @@ -704,7 +704,7 @@ impl<'mir, 'tcx> rustc_mir_dataflow::ResultsVisitor<'mir, 'tcx> impl StorageConflictVisitor<'_, '_, '_> { fn apply_state(&mut self, flow_state: &BitSet<Local>, loc: Location) { // Ignore unreachable blocks. - if self.body.basic_blocks()[loc.block].terminator().kind == TerminatorKind::Unreachable { + if self.body.basic_blocks[loc.block].terminator().kind == TerminatorKind::Unreachable { return; } @@ -886,7 +886,7 @@ fn elaborate_generator_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let mut elaborator = DropShimElaborator { body, patch: MirPatch::new(body), tcx, param_env }; - for (block, block_data) in body.basic_blocks().iter_enumerated() { + for (block, block_data) in body.basic_blocks.iter_enumerated() { let (target, unwind, source_info) = match block_data.terminator() { Terminator { source_info, kind: TerminatorKind::Drop { place, target, unwind } } => { if let Some(local) = place.as_local() { @@ -991,7 +991,7 @@ fn insert_panic_block<'tcx>( body: &mut Body<'tcx>, message: AssertMessage<'tcx>, ) -> BasicBlock { - let assert_block = BasicBlock::new(body.basic_blocks().len()); + let assert_block = BasicBlock::new(body.basic_blocks.len()); let term = TerminatorKind::Assert { cond: Operand::Constant(Box::new(Constant { span: body.span, @@ -1021,7 +1021,7 @@ fn can_return<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, param_env: ty::ParamEn } // If there's a return terminator the function may return. - for block in body.basic_blocks() { + for block in body.basic_blocks.iter() { if let TerminatorKind::Return = block.terminator().kind { return true; } @@ -1038,7 +1038,7 @@ fn can_unwind<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> bool { } // Unwinds can only start at certain terminators. - for block in body.basic_blocks() { + for block in body.basic_blocks.iter() { match block.terminator().kind { // These never unwind. TerminatorKind::Goto { .. } @@ -1182,8 +1182,6 @@ fn create_cases<'tcx>( transform: &TransformVisitor<'tcx>, operation: Operation, ) -> Vec<(usize, BasicBlock)> { - let tcx = transform.tcx; - let source_info = SourceInfo::outermost(body.span); transform @@ -1216,85 +1214,13 @@ fn create_cases<'tcx>( if operation == Operation::Resume { // Move the resume argument to the destination place of the `Yield` terminator let resume_arg = Local::new(2); // 0 = return, 1 = self - - // handle `box yield` properly - let box_place = if let [projection @ .., ProjectionElem::Deref] = - &**point.resume_arg.projection - { - let box_place = - Place::from(point.resume_arg.local).project_deeper(projection, tcx); - - let box_ty = box_place.ty(&body.local_decls, tcx).ty; - - if box_ty.is_box() { Some((box_place, box_ty)) } else { None } - } else { - None - }; - - if let Some((box_place, box_ty)) = box_place { - let unique_did = box_ty - .ty_adt_def() - .expect("expected Box to be an Adt") - .non_enum_variant() - .fields[0] - .did; - - let Some(nonnull_def) = tcx.type_of(unique_did).ty_adt_def() else { - span_bug!(tcx.def_span(unique_did), "expected Box to contain Unique") - }; - - let nonnull_did = nonnull_def.non_enum_variant().fields[0].did; - - let (unique_ty, nonnull_ty, ptr_ty) = - crate::elaborate_box_derefs::build_ptr_tys( - tcx, - box_ty.boxed_ty(), - unique_did, - nonnull_did, - ); - - let ptr_local = body.local_decls.push(LocalDecl::new(ptr_ty, body.span)); - - statements.push(Statement { - source_info, - kind: StatementKind::StorageLive(ptr_local), - }); - - statements.push(Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - Place::from(ptr_local), - Rvalue::Use(Operand::Copy(box_place.project_deeper( - &crate::elaborate_box_derefs::build_projection( - unique_ty, nonnull_ty, ptr_ty, - ), - tcx, - ))), - ))), - }); - - statements.push(Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - Place::from(ptr_local) - .project_deeper(&[ProjectionElem::Deref], tcx), - Rvalue::Use(Operand::Move(resume_arg.into())), - ))), - }); - - statements.push(Statement { - source_info, - kind: StatementKind::StorageDead(ptr_local), - }); - } else { - statements.push(Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - point.resume_arg, - Rvalue::Use(Operand::Move(resume_arg.into())), - ))), - }); - } + statements.push(Statement { + source_info, + kind: StatementKind::Assign(Box::new(( + point.resume_arg, + Rvalue::Use(Operand::Move(resume_arg.into())), + ))), + }); } // Then jump to the real target @@ -1314,10 +1240,6 @@ fn create_cases<'tcx>( } impl<'tcx> MirPass<'tcx> for StateTransform { - fn phase_change(&self) -> Option<MirPhase> { - Some(MirPhase::GeneratorsLowered) - } - fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let Some(yield_ty) = body.yield_ty() else { // This only applies to generators diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs index 6704d3462f4..ba00f16308e 100644 --- a/compiler/rustc_mir_transform/src/inline.rs +++ b/compiler/rustc_mir_transform/src/inline.rs @@ -12,6 +12,7 @@ use rustc_middle::ty::{self, ConstKind, Instance, InstanceDef, ParamEnv, Ty, TyC use rustc_session::config::OptLevel; use rustc_span::def_id::DefId; use rustc_span::{hygiene::ExpnKind, ExpnData, LocalExpnId, Span}; +use rustc_target::abi::VariantIdx; use rustc_target::spec::abi::Abi; use super::simplify::{remove_dead_blocks, CfgSimplifier}; @@ -94,7 +95,7 @@ fn inline<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> bool { history: Vec::new(), changed: false, }; - let blocks = BasicBlock::new(0)..body.basic_blocks().next_index(); + let blocks = BasicBlock::new(0)..body.basic_blocks.next_index(); this.process_blocks(body, blocks); this.changed } @@ -216,9 +217,9 @@ impl<'tcx> Inliner<'tcx> { } } - let old_blocks = caller_body.basic_blocks().next_index(); + let old_blocks = caller_body.basic_blocks.next_index(); self.inline_call(caller_body, &callsite, callee_body); - let new_blocks = old_blocks..caller_body.basic_blocks().next_index(); + let new_blocks = old_blocks..caller_body.basic_blocks.next_index(); Ok(new_blocks) } @@ -408,124 +409,66 @@ impl<'tcx> Inliner<'tcx> { // Give a bonus functions with a small number of blocks, // We normally have two or three blocks for even // very small functions. - if callee_body.basic_blocks().len() <= 3 { + if callee_body.basic_blocks.len() <= 3 { threshold += threshold / 4; } debug!(" final inline threshold = {}", threshold); // FIXME: Give a bonus to functions with only a single caller - let mut first_block = true; - let mut cost = 0; + let diverges = matches!( + callee_body.basic_blocks[START_BLOCK].terminator().kind, + TerminatorKind::Unreachable | TerminatorKind::Call { target: None, .. } + ); + if diverges && !matches!(callee_attrs.inline, InlineAttr::Always) { + return Err("callee diverges unconditionally"); + } + + let mut checker = CostChecker { + tcx: self.tcx, + param_env: self.param_env, + instance: callsite.callee, + callee_body, + cost: 0, + validation: Ok(()), + }; - // Traverse the MIR manually so we can account for the effects of - // inlining on the CFG. + // Traverse the MIR manually so we can account for the effects of inlining on the CFG. let mut work_list = vec![START_BLOCK]; - let mut visited = BitSet::new_empty(callee_body.basic_blocks().len()); + let mut visited = BitSet::new_empty(callee_body.basic_blocks.len()); while let Some(bb) = work_list.pop() { if !visited.insert(bb.index()) { continue; } - let blk = &callee_body.basic_blocks()[bb]; - for stmt in &blk.statements { - // Don't count StorageLive/StorageDead in the inlining cost. - match stmt.kind { - StatementKind::StorageLive(_) - | StatementKind::StorageDead(_) - | StatementKind::Deinit(_) - | StatementKind::Nop => {} - _ => cost += INSTR_COST, - } - } - let term = blk.terminator(); - let mut is_drop = false; - match term.kind { - TerminatorKind::Drop { ref place, target, unwind } - | TerminatorKind::DropAndReplace { ref place, target, unwind, .. } => { - is_drop = true; - work_list.push(target); - // If the place doesn't actually need dropping, treat it like - // a regular goto. - let ty = callsite.callee.subst_mir(self.tcx, &place.ty(callee_body, tcx).ty); - if ty.needs_drop(tcx, self.param_env) { - cost += CALL_PENALTY; - if let Some(unwind) = unwind { - cost += LANDINGPAD_PENALTY; - work_list.push(unwind); - } - } else { - cost += INSTR_COST; - } - } + let blk = &callee_body.basic_blocks[bb]; + checker.visit_basic_block_data(bb, blk); - TerminatorKind::Unreachable | TerminatorKind::Call { target: None, .. } - if first_block => - { - // If the function always diverges, don't inline - // unless the cost is zero - threshold = 0; - } - - TerminatorKind::Call { func: Operand::Constant(ref f), cleanup, .. } => { - if let ty::FnDef(def_id, _) = - *callsite.callee.subst_mir(self.tcx, &f.literal.ty()).kind() - { - // Don't give intrinsics the extra penalty for calls - if tcx.is_intrinsic(def_id) { - cost += INSTR_COST; - } else { - cost += CALL_PENALTY; - } - } else { - cost += CALL_PENALTY; - } - if cleanup.is_some() { - cost += LANDINGPAD_PENALTY; - } - } - TerminatorKind::Assert { cleanup, .. } => { - cost += CALL_PENALTY; - - if cleanup.is_some() { - cost += LANDINGPAD_PENALTY; - } - } - TerminatorKind::Resume => cost += RESUME_PENALTY, - TerminatorKind::InlineAsm { cleanup, .. } => { - cost += INSTR_COST; + let term = blk.terminator(); + if let TerminatorKind::Drop { ref place, target, unwind } + | TerminatorKind::DropAndReplace { ref place, target, unwind, .. } = term.kind + { + work_list.push(target); - if cleanup.is_some() { - cost += LANDINGPAD_PENALTY; + // If the place doesn't actually need dropping, treat it like a regular goto. + let ty = callsite.callee.subst_mir(self.tcx, &place.ty(callee_body, tcx).ty); + if ty.needs_drop(tcx, self.param_env) && let Some(unwind) = unwind { + work_list.push(unwind); } - } - _ => cost += INSTR_COST, - } - - if !is_drop { - for succ in term.successors() { - work_list.push(succ); - } + } else { + work_list.extend(term.successors()) } - - first_block = false; } // Count up the cost of local variables and temps, if we know the size // use that, otherwise we use a moderately-large dummy cost. - - let ptr_size = tcx.data_layout.pointer_size.bytes(); - for v in callee_body.vars_and_temps_iter() { - let ty = callsite.callee.subst_mir(self.tcx, &callee_body.local_decls[v].ty); - // Cost of the var is the size in machine-words, if we know - // it. - if let Some(size) = type_size_of(tcx, self.param_env, ty) { - cost += ((size + ptr_size - 1) / ptr_size) as usize; - } else { - cost += UNKNOWN_SIZE_COST; - } + checker.visit_local_decl(v, &callee_body.local_decls[v]); } + // Abort if type validation found anything fishy. + checker.validation?; + + let cost = checker.cost; if let InlineAttr::Always = callee_attrs.inline { debug!("INLINING {:?} because inline(always) [cost={}]", callsite, cost); Ok(()) @@ -598,7 +541,7 @@ impl<'tcx> Inliner<'tcx> { args: &args, new_locals: Local::new(caller_body.local_decls.len()).., new_scopes: SourceScope::new(caller_body.source_scopes.len()).., - new_blocks: BasicBlock::new(caller_body.basic_blocks().len()).., + new_blocks: BasicBlock::new(caller_body.basic_blocks.len()).., destination: dest, callsite_scope: caller_body.source_scopes[callsite.source_info.scope].clone(), callsite, @@ -616,7 +559,9 @@ impl<'tcx> Inliner<'tcx> { // If there are any locals without storage markers, give them storage only for the // duration of the call. for local in callee_body.vars_and_temps_iter() { - if integrator.always_live_locals.contains(local) { + if !callee_body.local_decls[local].internal + && integrator.always_live_locals.contains(local) + { let new_local = integrator.map_local(local); caller_body[callsite.block].statements.push(Statement { source_info: callsite.source_info, @@ -629,7 +574,9 @@ impl<'tcx> Inliner<'tcx> { // the slice once. let mut n = 0; for local in callee_body.vars_and_temps_iter().rev() { - if integrator.always_live_locals.contains(local) { + if !callee_body.local_decls[local].internal + && integrator.always_live_locals.contains(local) + { let new_local = integrator.map_local(local); caller_body[block].statements.push(Statement { source_info: callsite.source_info, @@ -795,6 +742,193 @@ fn type_size_of<'tcx>( tcx.layout_of(param_env.and(ty)).ok().map(|layout| layout.size.bytes()) } +/// Verify that the callee body is compatible with the caller. +/// +/// This visitor mostly computes the inlining cost, +/// but also needs to verify that types match because of normalization failure. +struct CostChecker<'b, 'tcx> { + tcx: TyCtxt<'tcx>, + param_env: ParamEnv<'tcx>, + cost: usize, + callee_body: &'b Body<'tcx>, + instance: ty::Instance<'tcx>, + validation: Result<(), &'static str>, +} + +impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> { + fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) { + // Don't count StorageLive/StorageDead in the inlining cost. + match statement.kind { + StatementKind::StorageLive(_) + | StatementKind::StorageDead(_) + | StatementKind::Deinit(_) + | StatementKind::Nop => {} + _ => self.cost += INSTR_COST, + } + + self.super_statement(statement, location); + } + + fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) { + let tcx = self.tcx; + match terminator.kind { + TerminatorKind::Drop { ref place, unwind, .. } + | TerminatorKind::DropAndReplace { ref place, unwind, .. } => { + // If the place doesn't actually need dropping, treat it like a regular goto. + let ty = self.instance.subst_mir(tcx, &place.ty(self.callee_body, tcx).ty); + if ty.needs_drop(tcx, self.param_env) { + self.cost += CALL_PENALTY; + if unwind.is_some() { + self.cost += LANDINGPAD_PENALTY; + } + } else { + self.cost += INSTR_COST; + } + } + TerminatorKind::Call { func: Operand::Constant(ref f), cleanup, .. } => { + let fn_ty = self.instance.subst_mir(tcx, &f.literal.ty()); + self.cost += if let ty::FnDef(def_id, _) = *fn_ty.kind() && tcx.is_intrinsic(def_id) { + // Don't give intrinsics the extra penalty for calls + INSTR_COST + } else { + CALL_PENALTY + }; + if cleanup.is_some() { + self.cost += LANDINGPAD_PENALTY; + } + } + TerminatorKind::Assert { cleanup, .. } => { + self.cost += CALL_PENALTY; + if cleanup.is_some() { + self.cost += LANDINGPAD_PENALTY; + } + } + TerminatorKind::Resume => self.cost += RESUME_PENALTY, + TerminatorKind::InlineAsm { cleanup, .. } => { + self.cost += INSTR_COST; + if cleanup.is_some() { + self.cost += LANDINGPAD_PENALTY; + } + } + _ => self.cost += INSTR_COST, + } + + self.super_terminator(terminator, location); + } + + /// Count up the cost of local variables and temps, if we know the size + /// use that, otherwise we use a moderately-large dummy cost. + fn visit_local_decl(&mut self, local: Local, local_decl: &LocalDecl<'tcx>) { + let tcx = self.tcx; + let ptr_size = tcx.data_layout.pointer_size.bytes(); + + let ty = self.instance.subst_mir(tcx, &local_decl.ty); + // Cost of the var is the size in machine-words, if we know + // it. + if let Some(size) = type_size_of(tcx, self.param_env, ty) { + self.cost += ((size + ptr_size - 1) / ptr_size) as usize; + } else { + self.cost += UNKNOWN_SIZE_COST; + } + + self.super_local_decl(local, local_decl) + } + + /// This method duplicates code from MIR validation in an attempt to detect type mismatches due + /// to normalization failure. + fn visit_projection_elem( + &mut self, + local: Local, + proj_base: &[PlaceElem<'tcx>], + elem: PlaceElem<'tcx>, + context: PlaceContext, + location: Location, + ) { + if let ProjectionElem::Field(f, ty) = elem { + let parent = Place { local, projection: self.tcx.intern_place_elems(proj_base) }; + let parent_ty = parent.ty(&self.callee_body.local_decls, self.tcx); + let check_equal = |this: &mut Self, f_ty| { + if !equal_up_to_regions(this.tcx, this.param_env, ty, f_ty) { + trace!(?ty, ?f_ty); + this.validation = Err("failed to normalize projection type"); + return; + } + }; + + let kind = match parent_ty.ty.kind() { + &ty::Opaque(def_id, substs) => { + self.tcx.bound_type_of(def_id).subst(self.tcx, substs).kind() + } + kind => kind, + }; + + match kind { + ty::Tuple(fields) => { + let Some(f_ty) = fields.get(f.as_usize()) else { + self.validation = Err("malformed MIR"); + return; + }; + check_equal(self, *f_ty); + } + ty::Adt(adt_def, substs) => { + let var = parent_ty.variant_index.unwrap_or(VariantIdx::from_u32(0)); + let Some(field) = adt_def.variant(var).fields.get(f.as_usize()) else { + self.validation = Err("malformed MIR"); + return; + }; + check_equal(self, field.ty(self.tcx, substs)); + } + ty::Closure(_, substs) => { + let substs = substs.as_closure(); + let Some(f_ty) = substs.upvar_tys().nth(f.as_usize()) else { + self.validation = Err("malformed MIR"); + return; + }; + check_equal(self, f_ty); + } + &ty::Generator(def_id, substs, _) => { + let f_ty = if let Some(var) = parent_ty.variant_index { + let gen_body = if def_id == self.callee_body.source.def_id() { + self.callee_body + } else { + self.tcx.optimized_mir(def_id) + }; + + let Some(layout) = gen_body.generator_layout() else { + self.validation = Err("malformed MIR"); + return; + }; + + let Some(&local) = layout.variant_fields[var].get(f) else { + self.validation = Err("malformed MIR"); + return; + }; + + let Some(&f_ty) = layout.field_tys.get(local) else { + self.validation = Err("malformed MIR"); + return; + }; + + f_ty + } else { + let Some(f_ty) = substs.as_generator().prefix_tys().nth(f.index()) else { + self.validation = Err("malformed MIR"); + return; + }; + + f_ty + }; + + check_equal(self, f_ty); + } + _ => self.validation = Err("malformed MIR"), + } + } + + self.super_projection_elem(local, proj_base, elem, context, location); + } +} + /** * Integrator. * diff --git a/compiler/rustc_mir_transform/src/inline/cycle.rs b/compiler/rustc_mir_transform/src/inline/cycle.rs index 7810218fd67..b027f94925d 100644 --- a/compiler/rustc_mir_transform/src/inline/cycle.rs +++ b/compiler/rustc_mir_transform/src/inline/cycle.rs @@ -153,7 +153,7 @@ pub(crate) fn mir_inliner_callees<'tcx>( _ => tcx.instance_mir(instance), }; let mut calls = FxIndexSet::default(); - for bb_data in body.basic_blocks() { + for bb_data in body.basic_blocks.iter() { let terminator = bb_data.terminator(); if let TerminatorKind::Call { func, .. } = &terminator.kind { let ty = func.ty(&body.local_decls, tcx); diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index 56fb601d2b5..0b674b38f32 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -1,5 +1,6 @@ #![allow(rustc::potential_query_instability)] #![feature(box_patterns)] +#![feature(let_chains)] #![feature(let_else)] #![feature(map_try_insert)] #![feature(min_specialization)] @@ -25,7 +26,9 @@ use rustc_hir::def_id::{DefId, LocalDefId}; use rustc_hir::intravisit::{self, Visitor}; use rustc_index::vec::IndexVec; use rustc_middle::mir::visit::Visitor as _; -use rustc_middle::mir::{traversal, Body, ConstQualifs, MirPass, MirPhase, Promoted}; +use rustc_middle::mir::{ + traversal, AnalysisPhase, Body, ConstQualifs, MirPass, MirPhase, Promoted, RuntimePhase, +}; use rustc_middle::ty::query::Providers; use rustc_middle::ty::{self, TyCtxt, TypeVisitable}; @@ -199,6 +202,8 @@ fn mir_const_qualif(tcx: TyCtxt<'_>, def: ty::WithOptConstParam<LocalDefId>) -> } /// Make MIR ready for const evaluation. This is run on all MIR, not just on consts! +/// FIXME(oli-obk): it's unclear whether we still need this phase (and its corresponding query). +/// We used to have this for pre-miri MIR based const eval. fn mir_const<'tcx>( tcx: TyCtxt<'tcx>, def: ty::WithOptConstParam<LocalDefId>, @@ -234,7 +239,6 @@ fn mir_const<'tcx>( // What we need to do constant evaluation. &simplify::SimplifyCfg::new("initial"), &rustc_peek::SanityCheck, // Just a lint - &marker::PhaseChange(MirPhase::Const), ], ); tcx.alloc_steal_mir(body) @@ -340,7 +344,10 @@ fn inner_mir_for_ctfe(tcx: TyCtxt<'_>, def: ty::WithOptConstParam<LocalDefId>) - pm::run_passes( tcx, &mut body, - &[&const_prop::ConstProp, &marker::PhaseChange(MirPhase::Optimized)], + &[ + &const_prop::ConstProp, + &marker::PhaseChange(MirPhase::Runtime(RuntimePhase::Optimized)), + ], ); } } @@ -380,38 +387,61 @@ fn mir_drops_elaborated_and_const_checked<'tcx>( body.tainted_by_errors = Some(error_reported); } - // IMPORTANT - pm::run_passes(tcx, &mut body, &[&remove_false_edges::RemoveFalseEdges]); + run_analysis_to_runtime_passes(tcx, &mut body); + + tcx.alloc_steal_mir(body) +} + +fn run_analysis_to_runtime_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { + assert!(body.phase == MirPhase::Analysis(AnalysisPhase::Initial)); + let did = body.source.def_id(); + + debug!("analysis_mir_cleanup({:?})", did); + run_analysis_cleanup_passes(tcx, body); + assert!(body.phase == MirPhase::Analysis(AnalysisPhase::PostCleanup)); // Do a little drop elaboration before const-checking if `const_precise_live_drops` is enabled. if check_consts::post_drop_elaboration::checking_enabled(&ConstCx::new(tcx, &body)) { pm::run_passes( tcx, - &mut body, + body, &[ - &simplify::SimplifyCfg::new("remove-false-edges"), &remove_uninit_drops::RemoveUninitDrops, + &simplify::SimplifyCfg::new("remove-false-edges"), ], ); check_consts::post_drop_elaboration::check_live_drops(tcx, &body); // FIXME: make this a MIR lint } - run_post_borrowck_cleanup_passes(tcx, &mut body); - assert!(body.phase == MirPhase::Deaggregated); - tcx.alloc_steal_mir(body) + debug!("runtime_mir_lowering({:?})", did); + run_runtime_lowering_passes(tcx, body); + assert!(body.phase == MirPhase::Runtime(RuntimePhase::Initial)); + + debug!("runtime_mir_cleanup({:?})", did); + run_runtime_cleanup_passes(tcx, body); + assert!(body.phase == MirPhase::Runtime(RuntimePhase::PostCleanup)); } -/// After this series of passes, no lifetime analysis based on borrowing can be done. -fn run_post_borrowck_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - debug!("post_borrowck_cleanup({:?})", body.source.def_id()); +// FIXME(JakobDegen): Can we make these lists of passes consts? - let post_borrowck_cleanup: &[&dyn MirPass<'tcx>] = &[ - // Remove all things only needed by analysis +/// After this series of passes, no lifetime analysis based on borrowing can be done. +fn run_analysis_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { + let passes: &[&dyn MirPass<'tcx>] = &[ + &remove_false_edges::RemoveFalseEdges, &simplify_branches::SimplifyConstCondition::new("initial"), &remove_noop_landing_pads::RemoveNoopLandingPads, &cleanup_post_borrowck::CleanupNonCodegenStatements, &simplify::SimplifyCfg::new("early-opt"), &deref_separator::Derefer, + &marker::PhaseChange(MirPhase::Analysis(AnalysisPhase::PostCleanup)), + ]; + + pm::run_passes(tcx, body, passes); +} + +/// Returns the sequence of passes that lowers analysis to runtime MIR. +fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { + let passes: &[&dyn MirPass<'tcx>] = &[ // These next passes must be executed together &add_call_guards::CriticalCallEdges, &elaborate_drops::ElaborateDrops, @@ -425,16 +455,27 @@ fn run_post_borrowck_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tc // `AddRetag` needs to run after `ElaborateDrops`. Otherwise it should run fairly late, // but before optimizations begin. &elaborate_box_derefs::ElaborateBoxDerefs, + &generator::StateTransform, &add_retag::AddRetag, - &lower_intrinsics::LowerIntrinsics, - &simplify::SimplifyCfg::new("elaborate-drops"), - // `Deaggregator` is conceptually part of MIR building, some backends rely on it happening - // and it can help optimizations. + // Deaggregator is necessary for const prop. We may want to consider implementing + // CTFE support for aggregates. &deaggregator::Deaggregator, &Lint(const_prop_lint::ConstProp), + &marker::PhaseChange(MirPhase::Runtime(RuntimePhase::Initial)), + ]; + pm::run_passes_no_validate(tcx, body, passes); +} + +/// Returns the sequence of passes that do the initial cleanup of runtime MIR. +fn run_runtime_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { + let passes: &[&dyn MirPass<'tcx>] = &[ + &elaborate_box_derefs::ElaborateBoxDerefs, + &lower_intrinsics::LowerIntrinsics, + &simplify::SimplifyCfg::new("elaborate-drops"), + &marker::PhaseChange(MirPhase::Runtime(RuntimePhase::PostCleanup)), ]; - pm::run_passes(tcx, body, post_borrowck_cleanup); + pm::run_passes(tcx, body, passes); } fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { @@ -442,9 +483,7 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { WithMinOptLevel(1, x) } - // Lowering generator control-flow and variables has to happen before we do anything else - // to them. We run some optimizations before that, because they may be harder to do on the state - // machine than on MIR with async primitives. + // The main optimizations that we do on MIR. pm::run_passes( tcx, body, @@ -456,17 +495,6 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { &uninhabited_enum_branching::UninhabitedEnumBranching, &o1(simplify::SimplifyCfg::new("after-uninhabited-enum-branching")), &inline::Inline, - &generator::StateTransform, - ], - ); - - assert!(body.phase == MirPhase::GeneratorsLowered); - - // The main optimizations that we do on MIR. - pm::run_passes( - tcx, - body, - &[ &remove_storage_markers::RemoveStorageMarkers, &remove_zsts::RemoveZsts, &const_goto::ConstGoto, @@ -498,7 +526,7 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { &deduplicate_blocks::DeduplicateBlocks, // Some cleanup necessary at least for LLVM and potentially other codegen backends. &add_call_guards::CriticalCallEdges, - &marker::PhaseChange(MirPhase::Optimized), + &marker::PhaseChange(MirPhase::Runtime(RuntimePhase::Optimized)), // Dump the end result for testing and debugging purposes. &dump_mir::Marker("PreCodegen"), ], @@ -557,7 +585,7 @@ fn promoted_mir<'tcx>( if let Some(error_reported) = tainted_by_errors { body.tainted_by_errors = Some(error_reported); } - run_post_borrowck_cleanup_passes(tcx, body); + run_analysis_to_runtime_passes(tcx, body); } debug_assert!(!promoted.has_free_regions(), "Free regions in promoted MIR"); diff --git a/compiler/rustc_mir_transform/src/multiple_return_terminators.rs b/compiler/rustc_mir_transform/src/multiple_return_terminators.rs index 22b6dead99c..3957cd92c4e 100644 --- a/compiler/rustc_mir_transform/src/multiple_return_terminators.rs +++ b/compiler/rustc_mir_transform/src/multiple_return_terminators.rs @@ -15,7 +15,7 @@ impl<'tcx> MirPass<'tcx> for MultipleReturnTerminators { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { // find basic blocks with no statement and a return terminator - let mut bbs_simple_returns = BitSet::new_empty(body.basic_blocks().len()); + let mut bbs_simple_returns = BitSet::new_empty(body.basic_blocks.len()); let def_id = body.source.def_id(); let bbs = body.basic_blocks_mut(); for idx in bbs.indices() { diff --git a/compiler/rustc_mir_transform/src/normalize_array_len.rs b/compiler/rustc_mir_transform/src/normalize_array_len.rs index c0217a10541..a159e617178 100644 --- a/compiler/rustc_mir_transform/src/normalize_array_len.rs +++ b/compiler/rustc_mir_transform/src/normalize_array_len.rs @@ -21,10 +21,10 @@ impl<'tcx> MirPass<'tcx> for NormalizeArrayLen { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { // early returns for edge cases of highly unrolled functions - if body.basic_blocks().len() > MAX_NUM_BLOCKS { + if body.basic_blocks.len() > MAX_NUM_BLOCKS { return; } - if body.local_decls().len() > MAX_NUM_LOCALS { + if body.local_decls.len() > MAX_NUM_LOCALS { return; } normalize_array_len_calls(tcx, body) diff --git a/compiler/rustc_mir_transform/src/nrvo.rs b/compiler/rustc_mir_transform/src/nrvo.rs index 42d732730ec..4291e81c78c 100644 --- a/compiler/rustc_mir_transform/src/nrvo.rs +++ b/compiler/rustc_mir_transform/src/nrvo.rs @@ -89,7 +89,7 @@ fn local_eligible_for_nrvo(body: &mut mir::Body<'_>) -> Option<Local> { } let mut copied_to_return_place = None; - for block in body.basic_blocks().indices() { + for block in body.basic_blocks.indices() { // Look for blocks with a `Return` terminator. if !matches!(body[block].terminator().kind, mir::TerminatorKind::Return) { continue; @@ -122,7 +122,7 @@ fn find_local_assigned_to_return_place( body: &mut mir::Body<'_>, ) -> Option<Local> { let mut block = start; - let mut seen = HybridBitSet::new_empty(body.basic_blocks().len()); + let mut seen = HybridBitSet::new_empty(body.basic_blocks.len()); // Iterate as long as `block` has exactly one predecessor that we have not yet visited. while seen.insert(block) { diff --git a/compiler/rustc_mir_transform/src/pass_manager.rs b/compiler/rustc_mir_transform/src/pass_manager.rs index e27d4ab1688..67ea5cfdb3c 100644 --- a/compiler/rustc_mir_transform/src/pass_manager.rs +++ b/compiler/rustc_mir_transform/src/pass_manager.rs @@ -1,6 +1,6 @@ use std::borrow::Cow; -use rustc_middle::mir::{self, Body, MirPhase}; +use rustc_middle::mir::{self, Body, MirPhase, RuntimePhase}; use rustc_middle::ty::TyCtxt; use rustc_session::Session; @@ -72,48 +72,62 @@ where } } +/// Run the sequence of passes without validating the MIR after each pass. The MIR is still +/// validated at the end. +pub fn run_passes_no_validate<'tcx>( + tcx: TyCtxt<'tcx>, + body: &mut Body<'tcx>, + passes: &[&dyn MirPass<'tcx>], +) { + run_passes_inner(tcx, body, passes, false); +} + pub fn run_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, passes: &[&dyn MirPass<'tcx>]) { + run_passes_inner(tcx, body, passes, true); +} + +fn run_passes_inner<'tcx>( + tcx: TyCtxt<'tcx>, + body: &mut Body<'tcx>, + passes: &[&dyn MirPass<'tcx>], + validate_each: bool, +) { let start_phase = body.phase; let mut cnt = 0; - let validate = tcx.sess.opts.unstable_opts.validate_mir; + let validate = validate_each & tcx.sess.opts.unstable_opts.validate_mir; let overridden_passes = &tcx.sess.opts.unstable_opts.mir_enable_passes; trace!(?overridden_passes); - if validate { - validate_body(tcx, body, format!("start of phase transition from {:?}", start_phase)); - } - for pass in passes { let name = pass.name(); - if let Some((_, polarity)) = overridden_passes.iter().rev().find(|(s, _)| s == &*name) { - trace!( - pass = %name, - "{} as requested by flag", - if *polarity { "Running" } else { "Not running" }, - ); - if !polarity { - continue; - } - } else { - if !pass.is_enabled(&tcx.sess) { - continue; - } - } - let dump_enabled = pass.is_mir_dump_enabled(); + // Gather information about what we should be doing for this pass + let overriden = + overridden_passes.iter().rev().find(|(s, _)| s == &*name).map(|(_name, polarity)| { + trace!( + pass = %name, + "{} as requested by flag", + if *polarity { "Running" } else { "Not running" }, + ); + *polarity + }); + let is_enabled = overriden.unwrap_or_else(|| pass.is_enabled(&tcx.sess)); + let new_phase = pass.phase_change(); + let dump_enabled = (is_enabled && pass.is_mir_dump_enabled()) || new_phase.is_some(); + let validate = (validate && is_enabled) + || new_phase == Some(MirPhase::Runtime(RuntimePhase::Optimized)); if dump_enabled { dump_mir(tcx, body, start_phase, &name, cnt, false); } - - pass.run_pass(tcx, body); - + if is_enabled { + pass.run_pass(tcx, body); + } if dump_enabled { dump_mir(tcx, body, start_phase, &name, cnt, true); cnt += 1; } - if let Some(new_phase) = pass.phase_change() { if body.phase >= new_phase { panic!("Invalid MIR phase transition from {:?} to {:?}", body.phase, new_phase); @@ -121,15 +135,10 @@ pub fn run_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, passes: &[&dyn body.phase = new_phase; } - if validate { - validate_body(tcx, body, format!("after pass {}", pass.name())); + validate_body(tcx, body, format!("after pass {}", name)); } } - - if validate || body.phase == MirPhase::Optimized { - validate_body(tcx, body, format!("end of phase transition to {:?}", body.phase)); - } } pub fn validate_body<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, when: String) { @@ -144,7 +153,7 @@ pub fn dump_mir<'tcx>( cnt: usize, is_after: bool, ) { - let phase_index = phase as u32; + let phase_index = phase.phase_index(); mir::dump_mir( tcx, diff --git a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs index 5c441c5b194..41a0bfac41a 100644 --- a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs +++ b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs @@ -94,7 +94,7 @@ impl RemoveNoopLandingPads { let mut jumps_folded = 0; let mut landing_pads_removed = 0; - let mut nop_landing_pads = BitSet::new_empty(body.basic_blocks().len()); + let mut nop_landing_pads = BitSet::new_empty(body.basic_blocks.len()); // This is a post-order traversal, so that if A post-dominates B // then A will be visited before B. diff --git a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs index 96b715402e7..78b6f714a9b 100644 --- a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs +++ b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs @@ -35,7 +35,7 @@ impl<'tcx> MirPass<'tcx> for RemoveUninitDrops { .into_results_cursor(body); let mut to_remove = vec![]; - for (bb, block) in body.basic_blocks().iter_enumerated() { + for (bb, block) in body.basic_blocks.iter_enumerated() { let terminator = block.terminator(); let (TerminatorKind::Drop { place, .. } | TerminatorKind::DropAndReplace { place, .. }) = &terminator.kind diff --git a/compiler/rustc_mir_transform/src/separate_const_switch.rs b/compiler/rustc_mir_transform/src/separate_const_switch.rs index 925eb10a1f7..190f9c1ac15 100644 --- a/compiler/rustc_mir_transform/src/separate_const_switch.rs +++ b/compiler/rustc_mir_transform/src/separate_const_switch.rs @@ -62,7 +62,7 @@ impl<'tcx> MirPass<'tcx> for SeparateConstSwitch { pub fn separate_const_switch(body: &mut Body<'_>) -> usize { let mut new_blocks: SmallVec<[(BasicBlock, BasicBlock); 6]> = SmallVec::new(); let predecessors = body.basic_blocks.predecessors(); - 'block_iter: for (block_id, block) in body.basic_blocks().iter_enumerated() { + 'block_iter: for (block_id, block) in body.basic_blocks.iter_enumerated() { if let TerminatorKind::SwitchInt { discr: Operand::Copy(switch_place) | Operand::Move(switch_place), .. @@ -90,7 +90,7 @@ pub fn separate_const_switch(body: &mut Body<'_>) -> usize { let mut predecessors_left = predecessors[block_id].len(); 'predec_iter: for predecessor_id in predecessors[block_id].iter().copied() { - let predecessor = &body.basic_blocks()[predecessor_id]; + let predecessor = &body.basic_blocks[predecessor_id]; // First we make sure the predecessor jumps // in a reasonable way diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs index 3620e94bec7..2b21fbe0a61 100644 --- a/compiler/rustc_mir_transform/src/shim.rs +++ b/compiler/rustc_mir_transform/src/shim.rs @@ -17,8 +17,8 @@ use std::iter; use crate::util::expand_aggregate; use crate::{ - abort_unwinding_calls, add_call_guards, add_moves_for_packed_drops, marker, pass_manager as pm, - remove_noop_landing_pads, simplify, + abort_unwinding_calls, add_call_guards, add_moves_for_packed_drops, deref_separator, marker, + pass_manager as pm, remove_noop_landing_pads, simplify, }; use rustc_middle::mir::patch::MirPatch; use rustc_mir_dataflow::elaborate_drops::{self, DropElaborator, DropFlagMode, DropStyle}; @@ -92,11 +92,12 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<' &mut result, &[ &add_moves_for_packed_drops::AddMovesForPackedDrops, + &deref_separator::Derefer, &remove_noop_landing_pads::RemoveNoopLandingPads, &simplify::SimplifyCfg::new("make_shim"), &add_call_guards::CriticalCallEdges, &abort_unwinding_calls::AbortUnwindingCalls, - &marker::PhaseChange(MirPhase::Const), + &marker::PhaseChange(MirPhase::Runtime(RuntimePhase::Optimized)), ], ); diff --git a/compiler/rustc_mir_transform/src/simplify.rs b/compiler/rustc_mir_transform/src/simplify.rs index 7a6ca917d0f..bed48db959a 100644 --- a/compiler/rustc_mir_transform/src/simplify.rs +++ b/compiler/rustc_mir_transform/src/simplify.rs @@ -74,7 +74,7 @@ pub struct CfgSimplifier<'a, 'tcx> { impl<'a, 'tcx> CfgSimplifier<'a, 'tcx> { pub fn new(body: &'a mut Body<'tcx>) -> Self { - let mut pred_count = IndexVec::from_elem(0u32, body.basic_blocks()); + let mut pred_count = IndexVec::from_elem(0u32, &body.basic_blocks); // we can't use mir.predecessors() here because that counts // dead blocks, which we don't want to. @@ -263,7 +263,7 @@ impl<'a, 'tcx> CfgSimplifier<'a, 'tcx> { pub fn remove_dead_blocks<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let reachable = traversal::reachable_as_bitset(body); - let num_blocks = body.basic_blocks().len(); + let num_blocks = body.basic_blocks.len(); if num_blocks == reachable.count() { return; } diff --git a/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs index bbfaace7041..321d8c63b6e 100644 --- a/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs +++ b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs @@ -151,7 +151,7 @@ struct OptimizationFinder<'a, 'tcx> { impl<'tcx> OptimizationFinder<'_, 'tcx> { fn find_optimizations(&self) -> Vec<OptimizationInfo<'tcx>> { self.body - .basic_blocks() + .basic_blocks .iter_enumerated() .filter_map(|(bb_idx, bb)| { // find switch diff --git a/compiler/rustc_mir_transform/src/simplify_try.rs b/compiler/rustc_mir_transform/src/simplify_try.rs index d52f1261b23..baeb620ef24 100644 --- a/compiler/rustc_mir_transform/src/simplify_try.rs +++ b/compiler/rustc_mir_transform/src/simplify_try.rs @@ -596,7 +596,7 @@ struct SimplifyBranchSameOptimizationFinder<'a, 'tcx> { impl<'tcx> SimplifyBranchSameOptimizationFinder<'_, 'tcx> { fn find(&self) -> Vec<SimplifyBranchSameOptimization> { self.body - .basic_blocks() + .basic_blocks .iter_enumerated() .filter_map(|(bb_idx, bb)| { let (discr_switched_on, targets_and_values) = match &bb.terminator().kind { @@ -632,7 +632,7 @@ impl<'tcx> SimplifyBranchSameOptimizationFinder<'_, 'tcx> { let mut iter_bbs_reachable = targets_and_values .iter() - .map(|target_and_value| (target_and_value, &self.body.basic_blocks()[target_and_value.target])) + .map(|target_and_value| (target_and_value, &self.body.basic_blocks[target_and_value.target])) .filter(|(_, bb)| { // Reaching `unreachable` is UB so assume it doesn't happen. bb.terminator().kind != TerminatorKind::Unreachable diff --git a/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs b/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs index 30be64f5b2f..96ea15f1b80 100644 --- a/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs +++ b/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs @@ -79,7 +79,7 @@ fn ensure_otherwise_unreachable<'tcx>( targets: &SwitchTargets, ) -> Option<BasicBlockData<'tcx>> { let otherwise = targets.otherwise(); - let bb = &body.basic_blocks()[otherwise]; + let bb = &body.basic_blocks[otherwise]; if bb.terminator().kind == TerminatorKind::Unreachable && bb.statements.iter().all(|s| matches!(&s.kind, StatementKind::StorageDead(_))) { @@ -102,10 +102,10 @@ impl<'tcx> MirPass<'tcx> for UninhabitedEnumBranching { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { trace!("UninhabitedEnumBranching starting for {:?}", body.source); - for bb in body.basic_blocks().indices() { + for bb in body.basic_blocks.indices() { trace!("processing block {:?}", bb); - let Some(discriminant_ty) = get_switched_on_type(&body.basic_blocks()[bb], tcx, body) else { + let Some(discriminant_ty) = get_switched_on_type(&body.basic_blocks[bb], tcx, body) else { continue; }; diff --git a/compiler/rustc_mir_transform/src/unreachable_prop.rs b/compiler/rustc_mir_transform/src/unreachable_prop.rs index f916ca36217..95fda2eafe8 100644 --- a/compiler/rustc_mir_transform/src/unreachable_prop.rs +++ b/compiler/rustc_mir_transform/src/unreachable_prop.rs @@ -12,9 +12,8 @@ pub struct UnreachablePropagation; impl MirPass<'_> for UnreachablePropagation { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { - // Enable only under -Zmir-opt-level=4 as in some cases (check the deeply-nested-opt - // perf benchmark) LLVM may spend quite a lot of time optimizing the generated code. - sess.mir_opt_level() >= 4 + // Enable only under -Zmir-opt-level=2 as this can make programs less debuggable. + sess.mir_opt_level() >= 2 } fn run_pass<'tcx>(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { @@ -38,7 +37,19 @@ impl MirPass<'_> for UnreachablePropagation { } } + // We do want do keep some unreachable blocks, but make them empty. + for bb in unreachable_blocks { + if !tcx.consider_optimizing(|| { + format!("UnreachablePropagation {:?} ", body.source.def_id()) + }) { + break; + } + + body.basic_blocks_mut()[bb].statements.clear(); + } + let replaced = !replacements.is_empty(); + for (bb, terminator_kind) in replacements { if !tcx.consider_optimizing(|| { format!("UnreachablePropagation {:?} ", body.source.def_id()) @@ -57,42 +68,55 @@ impl MirPass<'_> for UnreachablePropagation { fn remove_successors<'tcx, F>( terminator_kind: &TerminatorKind<'tcx>, - predicate: F, + is_unreachable: F, ) -> Option<TerminatorKind<'tcx>> where F: Fn(BasicBlock) -> bool, { - let terminator = match *terminator_kind { - TerminatorKind::Goto { target } if predicate(target) => TerminatorKind::Unreachable, - TerminatorKind::SwitchInt { ref discr, switch_ty, ref targets } => { + let terminator = match terminator_kind { + // This will unconditionally run into an unreachable and is therefore unreachable as well. + TerminatorKind::Goto { target } if is_unreachable(*target) => TerminatorKind::Unreachable, + TerminatorKind::SwitchInt { targets, discr, switch_ty } => { let otherwise = targets.otherwise(); - let original_targets_len = targets.iter().len() + 1; - let (mut values, mut targets): (Vec<_>, Vec<_>) = - targets.iter().filter(|(_, bb)| !predicate(*bb)).unzip(); + // If all targets are unreachable, we can be unreachable as well. + if targets.all_targets().iter().all(|bb| is_unreachable(*bb)) { + TerminatorKind::Unreachable + } else if is_unreachable(otherwise) { + // If there are multiple targets, don't delete unreachable branches (like an unreachable otherwise) + // unless otherwise is unreachable, in which case deleting a normal branch causes it to be merged with + // the otherwise, keeping its unreachable. + // This looses information about reachability causing worse codegen. + // For example (see src/test/codegen/match-optimizes-away.rs) + // + // pub enum Two { A, B } + // pub fn identity(x: Two) -> Two { + // match x { + // Two::A => Two::A, + // Two::B => Two::B, + // } + // } + // + // This generates a `switchInt() -> [0: 0, 1: 1, otherwise: unreachable]`, which allows us or LLVM to + // turn it into just `x` later. Without the unreachable, such a transformation would be illegal. + // If the otherwise branch is unreachable, we can delete all other unreacahble targets, as they will + // still point to the unreachable and therefore not lose reachability information. + let reachable_iter = targets.iter().filter(|(_, bb)| !is_unreachable(*bb)); - if !predicate(otherwise) { - targets.push(otherwise); - } else { - values.pop(); - } + let new_targets = SwitchTargets::new(reachable_iter, otherwise); - let retained_targets_len = targets.len(); + // No unreachable branches were removed. + if new_targets.all_targets().len() == targets.all_targets().len() { + return None; + } - if targets.is_empty() { - TerminatorKind::Unreachable - } else if targets.len() == 1 { - TerminatorKind::Goto { target: targets[0] } - } else if original_targets_len != retained_targets_len { TerminatorKind::SwitchInt { discr: discr.clone(), - switch_ty, - targets: SwitchTargets::new( - values.iter().copied().zip(targets.iter().copied()), - *targets.last().unwrap(), - ), + switch_ty: *switch_ty, + targets: new_targets, } } else { + // If the otherwise branch is reachable, we don't want to delete any unreachable branches. return None; } } |
