diff options
Diffstat (limited to 'compiler/rustc_mir_transform')
| -rw-r--r-- | compiler/rustc_mir_transform/src/const_prop.rs | 45 | ||||
| -rw-r--r-- | compiler/rustc_mir_transform/src/const_prop_lint.rs | 10 | ||||
| -rw-r--r-- | compiler/rustc_mir_transform/src/coverage/mod.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_mir_transform/src/deref_separator.rs | 20 | ||||
| -rw-r--r-- | compiler/rustc_mir_transform/src/elaborate_box_derefs.rs | 32 | ||||
| -rw-r--r-- | compiler/rustc_mir_transform/src/generator.rs | 88 | ||||
| -rw-r--r-- | compiler/rustc_mir_transform/src/inline.rs | 322 | ||||
| -rw-r--r-- | compiler/rustc_mir_transform/src/unreachable_prop.rs | 2 |
8 files changed, 263 insertions, 258 deletions
diff --git a/compiler/rustc_mir_transform/src/const_prop.rs b/compiler/rustc_mir_transform/src/const_prop.rs index 1c087b93b49..9f3a9d0b878 100644 --- a/compiler/rustc_mir_transform/src/const_prop.rs +++ b/compiler/rustc_mir_transform/src/const_prop.rs @@ -28,7 +28,7 @@ use crate::MirPass; use rustc_const_eval::interpret::{ self, compile_time_machine, AllocId, ConstAllocation, ConstValue, CtfeValidationMode, Frame, ImmTy, Immediate, InterpCx, InterpResult, LocalState, LocalValue, MemoryKind, OpTy, PlaceTy, - Pointer, Scalar, ScalarMaybeUninit, StackPopCleanup, StackPopUnwind, + Pointer, Scalar, StackPopCleanup, StackPopUnwind, }; /// The maximum number of bytes that we'll allocate space for a local or the return value. @@ -440,7 +440,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { // Try to read the local as an immediate so that if it is representable as a scalar, we can // handle it as such, but otherwise, just return the value as is. - Some(match self.ecx.read_immediate_raw(&op, /*force*/ false) { + Some(match self.ecx.read_immediate_raw(&op) { Ok(Ok(imm)) => imm.into(), _ => op, }) @@ -532,8 +532,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { let left_ty = left.ty(self.local_decls, self.tcx); let left_size = self.ecx.layout_of(left_ty).ok()?.size; let right_size = r.layout.size; - let r_bits = r.to_scalar().ok(); - let r_bits = r_bits.and_then(|r| r.to_bits(right_size).ok()); + let r_bits = r.to_scalar().to_bits(right_size).ok(); if r_bits.map_or(false, |b| b >= left_size.bits() as u128) { return None; } @@ -562,7 +561,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { // and use it to do const-prop here and everywhere else // where it makes sense. if let interpret::Operand::Immediate(interpret::Immediate::Scalar( - ScalarMaybeUninit::Scalar(scalar), + scalar, )) = *value { *operand = self.operand_from_scalar( @@ -675,7 +674,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { return this.ecx.eval_rvalue_into_place(rvalue, place); } - let arg_value = const_arg.to_scalar()?.to_bits(const_arg.layout.size)?; + let arg_value = const_arg.to_scalar().to_bits(const_arg.layout.size)?; let dest = this.ecx.eval_place(place)?; match op { @@ -689,7 +688,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { BinOp::Mul if const_arg.layout.ty.is_integral() && arg_value == 0 => { if let Rvalue::CheckedBinaryOp(_, _) = rvalue { let val = Immediate::ScalarPair( - const_arg.to_scalar()?.into(), + const_arg.to_scalar().into(), Scalar::from_bool(false).into(), ); this.ecx.write_immediate(val, &dest) @@ -743,21 +742,18 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { } // FIXME> figure out what to do when read_immediate_raw fails - let imm = self.use_ecx(|this| this.ecx.read_immediate_raw(value, /*force*/ false)); + let imm = self.use_ecx(|this| this.ecx.read_immediate_raw(value)); if let Some(Ok(imm)) = imm { match *imm { - interpret::Immediate::Scalar(ScalarMaybeUninit::Scalar(scalar)) => { + interpret::Immediate::Scalar(scalar) => { *rval = Rvalue::Use(self.operand_from_scalar( scalar, value.layout.ty, source_info.span, )); } - Immediate::ScalarPair( - ScalarMaybeUninit::Scalar(_), - ScalarMaybeUninit::Scalar(_), - ) => { + Immediate::ScalarPair(..) => { // Found a value represented as a pair. For now only do const-prop if the type // of `rvalue` is also a tuple with two scalars. // FIXME: enable the general case stated above ^. @@ -812,13 +808,10 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { } match **op { - interpret::Operand::Immediate(Immediate::Scalar(ScalarMaybeUninit::Scalar(s))) => { - s.try_to_int().is_ok() + interpret::Operand::Immediate(Immediate::Scalar(s)) => s.try_to_int().is_ok(), + interpret::Operand::Immediate(Immediate::ScalarPair(l, r)) => { + l.try_to_int().is_ok() && r.try_to_int().is_ok() } - interpret::Operand::Immediate(Immediate::ScalarPair( - ScalarMaybeUninit::Scalar(l), - ScalarMaybeUninit::Scalar(r), - )) => l.try_to_int().is_ok() && r.try_to_int().is_ok(), _ => false, } } @@ -1079,7 +1072,7 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> { TerminatorKind::Assert { expected, ref mut cond, .. } => { if let Some(ref value) = self.eval_operand(&cond) { trace!("assertion on {:?} should be {:?}", value, expected); - let expected = ScalarMaybeUninit::from(Scalar::from_bool(*expected)); + let expected = Scalar::from_bool(*expected); let value_const = self.ecx.read_scalar(&value).unwrap(); if expected != value_const { // Poison all places this operand references so that further code @@ -1092,13 +1085,11 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> { } } else { if self.should_const_prop(value) { - if let ScalarMaybeUninit::Scalar(scalar) = value_const { - *cond = self.operand_from_scalar( - scalar, - self.tcx.types.bool, - source_info.span, - ); - } + *cond = self.operand_from_scalar( + value_const, + self.tcx.types.bool, + source_info.span, + ); } } } diff --git a/compiler/rustc_mir_transform/src/const_prop_lint.rs b/compiler/rustc_mir_transform/src/const_prop_lint.rs index c2ea55af48a..1bc65721ea6 100644 --- a/compiler/rustc_mir_transform/src/const_prop_lint.rs +++ b/compiler/rustc_mir_transform/src/const_prop_lint.rs @@ -7,8 +7,7 @@ use crate::const_prop::ConstPropMode; use crate::MirLint; use rustc_const_eval::const_eval::ConstEvalErr; use rustc_const_eval::interpret::{ - self, InterpCx, InterpResult, LocalState, LocalValue, MemoryKind, OpTy, Scalar, - ScalarMaybeUninit, StackPopCleanup, + self, InterpCx, InterpResult, LocalState, LocalValue, MemoryKind, OpTy, Scalar, StackPopCleanup, }; use rustc_hir::def::DefKind; use rustc_hir::HirId; @@ -239,7 +238,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { // Try to read the local as an immediate so that if it is representable as a scalar, we can // handle it as such, but otherwise, just return the value as is. - Some(match self.ecx.read_immediate_raw(&op, /*force*/ false) { + Some(match self.ecx.read_immediate_raw(&op) { Ok(Ok(imm)) => imm.into(), _ => op, }) @@ -401,8 +400,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { let left_ty = left.ty(self.local_decls, self.tcx); let left_size = self.ecx.layout_of(left_ty).ok()?.size; let right_size = r.layout.size; - let r_bits = r.to_scalar().ok(); - let r_bits = r_bits.and_then(|r| r.to_bits(right_size).ok()); + let r_bits = r.to_scalar().to_bits(right_size).ok(); if r_bits.map_or(false, |b| b >= left_size.bits() as u128) { debug!("check_binary_op: reporting assert for {:?}", source_info); self.report_assert_as_lint( @@ -625,7 +623,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> { TerminatorKind::Assert { expected, ref msg, ref cond, .. } => { if let Some(ref value) = self.eval_operand(&cond, source_info) { trace!("assertion on {:?} should be {:?}", value, expected); - let expected = ScalarMaybeUninit::from(Scalar::from_bool(*expected)); + let expected = Scalar::from_bool(*expected); let value_const = self.ecx.read_scalar(&value).unwrap(); if expected != value_const { enum DbgVal<T> { diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs index 2619626a567..e72d016d7ac 100644 --- a/compiler/rustc_mir_transform/src/coverage/mod.rs +++ b/compiler/rustc_mir_transform/src/coverage/mod.rs @@ -541,7 +541,7 @@ fn fn_sig_and_body<'tcx>( // to HIR for it. let hir_node = tcx.hir().get_if_local(def_id).expect("expected DefId is local"); let fn_body_id = hir::map::associated_body(hir_node).expect("HIR node is a function with body"); - (hir::map::fn_sig(hir_node), tcx.hir().body(fn_body_id)) + (hir_node.fn_sig(), tcx.hir().body(fn_body_id)) } fn get_body_span<'tcx>( diff --git a/compiler/rustc_mir_transform/src/deref_separator.rs b/compiler/rustc_mir_transform/src/deref_separator.rs index 87d7b664015..8869f3f92af 100644 --- a/compiler/rustc_mir_transform/src/deref_separator.rs +++ b/compiler/rustc_mir_transform/src/deref_separator.rs @@ -28,8 +28,6 @@ impl<'tcx> MutVisitor<'tcx> for DerefChecker<'tcx> { let mut last_len = 0; let mut last_deref_idx = 0; - let mut prev_temp: Option<Local> = None; - for (idx, elem) in place.projection[0..].iter().enumerate() { if *elem == ProjectionElem::Deref { last_deref_idx = idx; @@ -39,14 +37,12 @@ impl<'tcx> MutVisitor<'tcx> for DerefChecker<'tcx> { for (idx, (p_ref, p_elem)) in place.iter_projections().enumerate() { if !p_ref.projection.is_empty() && p_elem == ProjectionElem::Deref { let ty = p_ref.ty(&self.local_decls, self.tcx).ty; - let temp = self.patcher.new_local_with_info( + let temp = self.patcher.new_internal_with_info( ty, self.local_decls[p_ref.local].source_info.span, Some(Box::new(LocalInfo::DerefTemp)), ); - self.patcher.add_statement(loc, StatementKind::StorageLive(temp)); - // We are adding current p_ref's projections to our // temp value, excluding projections we already covered. let deref_place = Place::from(place_local) @@ -66,22 +62,8 @@ impl<'tcx> MutVisitor<'tcx> for DerefChecker<'tcx> { Place::from(temp).project_deeper(&place.projection[idx..], self.tcx); *place = temp_place; } - - // We are destroying the previous temp since it's no longer used. - if let Some(prev_temp) = prev_temp { - self.patcher.add_statement(loc, StatementKind::StorageDead(prev_temp)); - } - - prev_temp = Some(temp); } } - - // Since we won't be able to reach final temp, we destroy it outside the loop. - if let Some(prev_temp) = prev_temp { - let last_loc = - Location { block: loc.block, statement_index: loc.statement_index + 1 }; - self.patcher.add_statement(last_loc, StatementKind::StorageDead(prev_temp)); - } } } } diff --git a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs index 76522233689..294af2455d0 100644 --- a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs +++ b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs @@ -69,9 +69,7 @@ impl<'tcx, 'a> MutVisitor<'tcx> for ElaborateBoxDerefVisitor<'tcx, 'a> { let (unique_ty, nonnull_ty, ptr_ty) = build_ptr_tys(tcx, base_ty.boxed_ty(), self.unique_did, self.nonnull_did); - let ptr_local = self.patch.new_temp(ptr_ty, source_info.span); - - self.patch.add_statement(location, StatementKind::StorageLive(ptr_local)); + let ptr_local = self.patch.new_internal(ptr_ty, source_info.span); self.patch.add_assign( location, @@ -83,11 +81,6 @@ impl<'tcx, 'a> MutVisitor<'tcx> for ElaborateBoxDerefVisitor<'tcx, 'a> { ); place.local = ptr_local; - - self.patch.add_statement( - Location { block: location.block, statement_index: location.statement_index + 1 }, - StatementKind::StorageDead(ptr_local), - ); } self.super_place(place, context, location); @@ -114,27 +107,8 @@ impl<'tcx> MirPass<'tcx> for ElaborateBoxDerefs { let mut visitor = ElaborateBoxDerefVisitor { tcx, unique_did, nonnull_did, local_decls, patch }; - for (block, BasicBlockData { statements, terminator, .. }) in - body.basic_blocks.as_mut_preserves_cfg().iter_enumerated_mut() - { - let mut index = 0; - for statement in statements { - let location = Location { block, statement_index: index }; - visitor.visit_statement(statement, location); - index += 1; - } - - let location = Location { block, statement_index: index }; - match terminator { - // yielding into a box is handled when lowering generators - Some(Terminator { kind: TerminatorKind::Yield { value, .. }, .. }) => { - visitor.visit_operand(value, location); - } - Some(terminator) => { - visitor.visit_terminator(terminator, location); - } - None => {} - } + for (block, data) in body.basic_blocks.as_mut_preserves_cfg().iter_enumerated_mut() { + visitor.visit_basic_block_data(block, data); } visitor.patch.apply(body); diff --git a/compiler/rustc_mir_transform/src/generator.rs b/compiler/rustc_mir_transform/src/generator.rs index 91ecf387922..5b0d9900c0f 100644 --- a/compiler/rustc_mir_transform/src/generator.rs +++ b/compiler/rustc_mir_transform/src/generator.rs @@ -1182,8 +1182,6 @@ fn create_cases<'tcx>( transform: &TransformVisitor<'tcx>, operation: Operation, ) -> Vec<(usize, BasicBlock)> { - let tcx = transform.tcx; - let source_info = SourceInfo::outermost(body.span); transform @@ -1216,85 +1214,13 @@ fn create_cases<'tcx>( if operation == Operation::Resume { // Move the resume argument to the destination place of the `Yield` terminator let resume_arg = Local::new(2); // 0 = return, 1 = self - - // handle `box yield` properly - let box_place = if let [projection @ .., ProjectionElem::Deref] = - &**point.resume_arg.projection - { - let box_place = - Place::from(point.resume_arg.local).project_deeper(projection, tcx); - - let box_ty = box_place.ty(&body.local_decls, tcx).ty; - - if box_ty.is_box() { Some((box_place, box_ty)) } else { None } - } else { - None - }; - - if let Some((box_place, box_ty)) = box_place { - let unique_did = box_ty - .ty_adt_def() - .expect("expected Box to be an Adt") - .non_enum_variant() - .fields[0] - .did; - - let Some(nonnull_def) = tcx.type_of(unique_did).ty_adt_def() else { - span_bug!(tcx.def_span(unique_did), "expected Box to contain Unique") - }; - - let nonnull_did = nonnull_def.non_enum_variant().fields[0].did; - - let (unique_ty, nonnull_ty, ptr_ty) = - crate::elaborate_box_derefs::build_ptr_tys( - tcx, - box_ty.boxed_ty(), - unique_did, - nonnull_did, - ); - - let ptr_local = body.local_decls.push(LocalDecl::new(ptr_ty, body.span)); - - statements.push(Statement { - source_info, - kind: StatementKind::StorageLive(ptr_local), - }); - - statements.push(Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - Place::from(ptr_local), - Rvalue::Use(Operand::Copy(box_place.project_deeper( - &crate::elaborate_box_derefs::build_projection( - unique_ty, nonnull_ty, ptr_ty, - ), - tcx, - ))), - ))), - }); - - statements.push(Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - Place::from(ptr_local) - .project_deeper(&[ProjectionElem::Deref], tcx), - Rvalue::Use(Operand::Move(resume_arg.into())), - ))), - }); - - statements.push(Statement { - source_info, - kind: StatementKind::StorageDead(ptr_local), - }); - } else { - statements.push(Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - point.resume_arg, - Rvalue::Use(Operand::Move(resume_arg.into())), - ))), - }); - } + statements.push(Statement { + source_info, + kind: StatementKind::Assign(Box::new(( + point.resume_arg, + Rvalue::Use(Operand::Move(resume_arg.into())), + ))), + }); } // Then jump to the real target diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs index 6704d3462f4..d7d29840188 100644 --- a/compiler/rustc_mir_transform/src/inline.rs +++ b/compiler/rustc_mir_transform/src/inline.rs @@ -12,6 +12,7 @@ use rustc_middle::ty::{self, ConstKind, Instance, InstanceDef, ParamEnv, Ty, TyC use rustc_session::config::OptLevel; use rustc_span::def_id::DefId; use rustc_span::{hygiene::ExpnKind, ExpnData, LocalExpnId, Span}; +use rustc_target::abi::VariantIdx; use rustc_target::spec::abi::Abi; use super::simplify::{remove_dead_blocks, CfgSimplifier}; @@ -414,118 +415,60 @@ impl<'tcx> Inliner<'tcx> { debug!(" final inline threshold = {}", threshold); // FIXME: Give a bonus to functions with only a single caller - let mut first_block = true; - let mut cost = 0; + let diverges = matches!( + callee_body.basic_blocks()[START_BLOCK].terminator().kind, + TerminatorKind::Unreachable | TerminatorKind::Call { target: None, .. } + ); + if diverges && !matches!(callee_attrs.inline, InlineAttr::Always) { + return Err("callee diverges unconditionally"); + } + + let mut checker = CostChecker { + tcx: self.tcx, + param_env: self.param_env, + instance: callsite.callee, + callee_body, + cost: 0, + validation: Ok(()), + }; - // Traverse the MIR manually so we can account for the effects of - // inlining on the CFG. + // Traverse the MIR manually so we can account for the effects of inlining on the CFG. let mut work_list = vec![START_BLOCK]; let mut visited = BitSet::new_empty(callee_body.basic_blocks().len()); while let Some(bb) = work_list.pop() { if !visited.insert(bb.index()) { continue; } + let blk = &callee_body.basic_blocks()[bb]; + checker.visit_basic_block_data(bb, blk); - for stmt in &blk.statements { - // Don't count StorageLive/StorageDead in the inlining cost. - match stmt.kind { - StatementKind::StorageLive(_) - | StatementKind::StorageDead(_) - | StatementKind::Deinit(_) - | StatementKind::Nop => {} - _ => cost += INSTR_COST, - } - } let term = blk.terminator(); - let mut is_drop = false; - match term.kind { - TerminatorKind::Drop { ref place, target, unwind } - | TerminatorKind::DropAndReplace { ref place, target, unwind, .. } => { - is_drop = true; - work_list.push(target); - // If the place doesn't actually need dropping, treat it like - // a regular goto. - let ty = callsite.callee.subst_mir(self.tcx, &place.ty(callee_body, tcx).ty); - if ty.needs_drop(tcx, self.param_env) { - cost += CALL_PENALTY; - if let Some(unwind) = unwind { - cost += LANDINGPAD_PENALTY; - work_list.push(unwind); - } - } else { - cost += INSTR_COST; - } - } - - TerminatorKind::Unreachable | TerminatorKind::Call { target: None, .. } - if first_block => - { - // If the function always diverges, don't inline - // unless the cost is zero - threshold = 0; - } - - TerminatorKind::Call { func: Operand::Constant(ref f), cleanup, .. } => { - if let ty::FnDef(def_id, _) = - *callsite.callee.subst_mir(self.tcx, &f.literal.ty()).kind() - { - // Don't give intrinsics the extra penalty for calls - if tcx.is_intrinsic(def_id) { - cost += INSTR_COST; - } else { - cost += CALL_PENALTY; - } - } else { - cost += CALL_PENALTY; - } - if cleanup.is_some() { - cost += LANDINGPAD_PENALTY; - } - } - TerminatorKind::Assert { cleanup, .. } => { - cost += CALL_PENALTY; - - if cleanup.is_some() { - cost += LANDINGPAD_PENALTY; - } - } - TerminatorKind::Resume => cost += RESUME_PENALTY, - TerminatorKind::InlineAsm { cleanup, .. } => { - cost += INSTR_COST; + if let TerminatorKind::Drop { ref place, target, unwind } + | TerminatorKind::DropAndReplace { ref place, target, unwind, .. } = term.kind + { + work_list.push(target); - if cleanup.is_some() { - cost += LANDINGPAD_PENALTY; + // If the place doesn't actually need dropping, treat it like a regular goto. + let ty = callsite.callee.subst_mir(self.tcx, &place.ty(callee_body, tcx).ty); + if ty.needs_drop(tcx, self.param_env) && let Some(unwind) = unwind { + work_list.push(unwind); } - } - _ => cost += INSTR_COST, - } - - if !is_drop { - for succ in term.successors() { - work_list.push(succ); - } + } else { + work_list.extend(term.successors()) } - - first_block = false; } // Count up the cost of local variables and temps, if we know the size // use that, otherwise we use a moderately-large dummy cost. - - let ptr_size = tcx.data_layout.pointer_size.bytes(); - for v in callee_body.vars_and_temps_iter() { - let ty = callsite.callee.subst_mir(self.tcx, &callee_body.local_decls[v].ty); - // Cost of the var is the size in machine-words, if we know - // it. - if let Some(size) = type_size_of(tcx, self.param_env, ty) { - cost += ((size + ptr_size - 1) / ptr_size) as usize; - } else { - cost += UNKNOWN_SIZE_COST; - } + checker.visit_local_decl(v, &callee_body.local_decls[v]); } + // Abort if type validation found anything fishy. + checker.validation?; + + let cost = checker.cost; if let InlineAttr::Always = callee_attrs.inline { debug!("INLINING {:?} because inline(always) [cost={}]", callsite, cost); Ok(()) @@ -616,7 +559,9 @@ impl<'tcx> Inliner<'tcx> { // If there are any locals without storage markers, give them storage only for the // duration of the call. for local in callee_body.vars_and_temps_iter() { - if integrator.always_live_locals.contains(local) { + if !callee_body.local_decls[local].internal + && integrator.always_live_locals.contains(local) + { let new_local = integrator.map_local(local); caller_body[callsite.block].statements.push(Statement { source_info: callsite.source_info, @@ -629,7 +574,9 @@ impl<'tcx> Inliner<'tcx> { // the slice once. let mut n = 0; for local in callee_body.vars_and_temps_iter().rev() { - if integrator.always_live_locals.contains(local) { + if !callee_body.local_decls[local].internal + && integrator.always_live_locals.contains(local) + { let new_local = integrator.map_local(local); caller_body[block].statements.push(Statement { source_info: callsite.source_info, @@ -795,6 +742,193 @@ fn type_size_of<'tcx>( tcx.layout_of(param_env.and(ty)).ok().map(|layout| layout.size.bytes()) } +/// Verify that the callee body is compatible with the caller. +/// +/// This visitor mostly computes the inlining cost, +/// but also needs to verify that types match because of normalization failure. +struct CostChecker<'b, 'tcx> { + tcx: TyCtxt<'tcx>, + param_env: ParamEnv<'tcx>, + cost: usize, + callee_body: &'b Body<'tcx>, + instance: ty::Instance<'tcx>, + validation: Result<(), &'static str>, +} + +impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> { + fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) { + // Don't count StorageLive/StorageDead in the inlining cost. + match statement.kind { + StatementKind::StorageLive(_) + | StatementKind::StorageDead(_) + | StatementKind::Deinit(_) + | StatementKind::Nop => {} + _ => self.cost += INSTR_COST, + } + + self.super_statement(statement, location); + } + + fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) { + let tcx = self.tcx; + match terminator.kind { + TerminatorKind::Drop { ref place, unwind, .. } + | TerminatorKind::DropAndReplace { ref place, unwind, .. } => { + // If the place doesn't actually need dropping, treat it like a regular goto. + let ty = self.instance.subst_mir(tcx, &place.ty(self.callee_body, tcx).ty); + if ty.needs_drop(tcx, self.param_env) { + self.cost += CALL_PENALTY; + if unwind.is_some() { + self.cost += LANDINGPAD_PENALTY; + } + } else { + self.cost += INSTR_COST; + } + } + TerminatorKind::Call { func: Operand::Constant(ref f), cleanup, .. } => { + let fn_ty = self.instance.subst_mir(tcx, &f.literal.ty()); + self.cost += if let ty::FnDef(def_id, _) = *fn_ty.kind() && tcx.is_intrinsic(def_id) { + // Don't give intrinsics the extra penalty for calls + INSTR_COST + } else { + CALL_PENALTY + }; + if cleanup.is_some() { + self.cost += LANDINGPAD_PENALTY; + } + } + TerminatorKind::Assert { cleanup, .. } => { + self.cost += CALL_PENALTY; + if cleanup.is_some() { + self.cost += LANDINGPAD_PENALTY; + } + } + TerminatorKind::Resume => self.cost += RESUME_PENALTY, + TerminatorKind::InlineAsm { cleanup, .. } => { + self.cost += INSTR_COST; + if cleanup.is_some() { + self.cost += LANDINGPAD_PENALTY; + } + } + _ => self.cost += INSTR_COST, + } + + self.super_terminator(terminator, location); + } + + /// Count up the cost of local variables and temps, if we know the size + /// use that, otherwise we use a moderately-large dummy cost. + fn visit_local_decl(&mut self, local: Local, local_decl: &LocalDecl<'tcx>) { + let tcx = self.tcx; + let ptr_size = tcx.data_layout.pointer_size.bytes(); + + let ty = self.instance.subst_mir(tcx, &local_decl.ty); + // Cost of the var is the size in machine-words, if we know + // it. + if let Some(size) = type_size_of(tcx, self.param_env, ty) { + self.cost += ((size + ptr_size - 1) / ptr_size) as usize; + } else { + self.cost += UNKNOWN_SIZE_COST; + } + + self.super_local_decl(local, local_decl) + } + + /// This method duplicates code from MIR validation in an attempt to detect type mismatches due + /// to normalization failure. + fn visit_projection_elem( + &mut self, + local: Local, + proj_base: &[PlaceElem<'tcx>], + elem: PlaceElem<'tcx>, + context: PlaceContext, + location: Location, + ) { + if let ProjectionElem::Field(f, ty) = elem { + let parent = Place { local, projection: self.tcx.intern_place_elems(proj_base) }; + let parent_ty = parent.ty(&self.callee_body.local_decls, self.tcx); + let check_equal = |this: &mut Self, f_ty| { + if !equal_up_to_regions(this.tcx, this.param_env, ty, f_ty) { + trace!(?ty, ?f_ty); + this.validation = Err("failed to normalize projection type"); + return; + } + }; + + let kind = match parent_ty.ty.kind() { + &ty::Opaque(def_id, substs) => { + self.tcx.bound_type_of(def_id).subst(self.tcx, substs).kind() + } + kind => kind, + }; + + match kind { + ty::Tuple(fields) => { + let Some(f_ty) = fields.get(f.as_usize()) else { + self.validation = Err("malformed MIR"); + return; + }; + check_equal(self, *f_ty); + } + ty::Adt(adt_def, substs) => { + let var = parent_ty.variant_index.unwrap_or(VariantIdx::from_u32(0)); + let Some(field) = adt_def.variant(var).fields.get(f.as_usize()) else { + self.validation = Err("malformed MIR"); + return; + }; + check_equal(self, field.ty(self.tcx, substs)); + } + ty::Closure(_, substs) => { + let substs = substs.as_closure(); + let Some(f_ty) = substs.upvar_tys().nth(f.as_usize()) else { + self.validation = Err("malformed MIR"); + return; + }; + check_equal(self, f_ty); + } + &ty::Generator(def_id, substs, _) => { + let f_ty = if let Some(var) = parent_ty.variant_index { + let gen_body = if def_id == self.callee_body.source.def_id() { + self.callee_body + } else { + self.tcx.optimized_mir(def_id) + }; + + let Some(layout) = gen_body.generator_layout() else { + self.validation = Err("malformed MIR"); + return; + }; + + let Some(&local) = layout.variant_fields[var].get(f) else { + self.validation = Err("malformed MIR"); + return; + }; + + let Some(&f_ty) = layout.field_tys.get(local) else { + self.validation = Err("malformed MIR"); + return; + }; + + f_ty + } else { + let Some(f_ty) = substs.as_generator().prefix_tys().nth(f.index()) else { + self.validation = Err("malformed MIR"); + return; + }; + + f_ty + }; + + check_equal(self, f_ty); + } + _ => self.validation = Err("malformed MIR"), + } + } + + self.super_projection_elem(local, proj_base, elem, context, location); + } +} + /** * Integrator. * diff --git a/compiler/rustc_mir_transform/src/unreachable_prop.rs b/compiler/rustc_mir_transform/src/unreachable_prop.rs index 56d7799a125..95fda2eafe8 100644 --- a/compiler/rustc_mir_transform/src/unreachable_prop.rs +++ b/compiler/rustc_mir_transform/src/unreachable_prop.rs @@ -84,7 +84,7 @@ where TerminatorKind::Unreachable } else if is_unreachable(otherwise) { // If there are multiple targets, don't delete unreachable branches (like an unreachable otherwise) - // unless otherwise is unrachable, in which case deleting a normal branch causes it to be merged with + // unless otherwise is unreachable, in which case deleting a normal branch causes it to be merged with // the otherwise, keeping its unreachable. // This looses information about reachability causing worse codegen. // For example (see src/test/codegen/match-optimizes-away.rs) |
