diff options
| author | bors <bors@rust-lang.org> | 2024-06-26 14:22:31 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2024-06-26 14:22:31 +0000 |
| commit | d7c59370cea68cd17006ec3440a43254fd0eda7d (patch) | |
| tree | c2cffb44abcc4ce7edb6a14acc3bb540d9ec8a3f /compiler/rustc_mir_transform/src | |
| parent | 4bdf8d2d5877f20b54c1506a607ad8c4744cc387 (diff) | |
| parent | dd545e148c0c1680be1555efcb02ece0a16ee3ef (diff) | |
| download | rust-d7c59370cea68cd17006ec3440a43254fd0eda7d.tar.gz rust-d7c59370cea68cd17006ec3440a43254fd0eda7d.zip | |
Auto merge of #126844 - scottmcm:more-ptr-cast-gvn, r=saethlin
Remove more `PtrToPtr` casts in GVN This addresses two things I noticed in MIR: 1. `NonNull::<T>::eq` does `(a as *mut T) == (b as *mut T)`, but it could just compare the `*const T`s, so this removes `PtrToPtr` casts that are on both sides of a pointer comparison, so long as they're not fat-to-thin casts. 2. `NonNull::<T>::addr` does `transmute::<_, usize>(p as *const ())`, but so long as `T: Thin` that cast doesn't do anything, and thus we can directly transmute the `*const T` instead. r? mir-opt
Diffstat (limited to 'compiler/rustc_mir_transform/src')
| -rw-r--r-- | compiler/rustc_mir_transform/src/cost_checker.rs | 29 | ||||
| -rw-r--r-- | compiler/rustc_mir_transform/src/gvn.rs | 114 |
2 files changed, 107 insertions, 36 deletions
diff --git a/compiler/rustc_mir_transform/src/cost_checker.rs b/compiler/rustc_mir_transform/src/cost_checker.rs index 32c0d27f635..7e401b5482f 100644 --- a/compiler/rustc_mir_transform/src/cost_checker.rs +++ b/compiler/rustc_mir_transform/src/cost_checker.rs @@ -60,7 +60,15 @@ impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> { fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, _location: Location) { match rvalue { - Rvalue::NullaryOp(NullOp::UbChecks, ..) if !self.tcx.sess.ub_checks() => { + Rvalue::NullaryOp(NullOp::UbChecks, ..) + if !self + .tcx + .sess + .opts + .unstable_opts + .inline_mir_preserve_debug + .unwrap_or(self.tcx.sess.ub_checks()) => + { // If this is in optimized MIR it's because it's used later, // so if we don't need UB checks this session, give a bonus // here to offset the cost of the call later. @@ -111,12 +119,19 @@ impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> { } } TerminatorKind::Assert { unwind, msg, .. } => { - self.penalty += - if msg.is_optional_overflow_check() && !self.tcx.sess.overflow_checks() { - INSTR_COST - } else { - CALL_PENALTY - }; + self.penalty += if msg.is_optional_overflow_check() + && !self + .tcx + .sess + .opts + .unstable_opts + .inline_mir_preserve_debug + .unwrap_or(self.tcx.sess.overflow_checks()) + { + INSTR_COST + } else { + CALL_PENALTY + }; if let UnwindAction::Cleanup(_) = unwind { self.penalty += LANDINGPAD_PENALTY; } diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index bfdefd5a7d6..936a7e2d9de 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -823,18 +823,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { return self.simplify_cast(kind, value, to, location); } Rvalue::BinaryOp(op, box (ref mut lhs, ref mut rhs)) => { - let ty = lhs.ty(self.local_decls, self.tcx); - let lhs = self.simplify_operand(lhs, location); - let rhs = self.simplify_operand(rhs, location); - // Only short-circuit options after we called `simplify_operand` - // on both operands for side effect. - let lhs = lhs?; - let rhs = rhs?; - - if let Some(value) = self.simplify_binary(op, ty, lhs, rhs) { - return Some(value); - } - Value::BinaryOp(op, lhs, rhs) + return self.simplify_binary(op, lhs, rhs, location); } Rvalue::UnaryOp(op, ref mut arg_op) => { return self.simplify_unary(op, arg_op, location); @@ -987,23 +976,10 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { // `*const [T]` -> `*const T` which remove metadata. // We run on potentially-generic MIR, though, so unlike codegen // we can't always know exactly what the metadata are. - // Thankfully, equality on `ptr_metadata_ty_or_tail` gives us - // what we need: `Ok(meta_ty)` if the metadata is known, or - // `Err(tail_ty)` if not. Matching metadata is ok, but if - // that's not known, then matching tail types is also ok, - // allowing things like `*mut (?A, ?T)` <-> `*mut (?B, ?T)`. - // FIXME: Would it be worth trying to normalize, rather than - // passing the identity closure? Or are the types in the - // Cast realistically about as normalized as we can get anyway? + // To allow things like `*mut (?A, ?T)` <-> `*mut (?B, ?T)`, + // it's fine to get a projection as the type. Value::Cast { kind: CastKind::PtrToPtr, value: inner, from, to } - if from - .builtin_deref(true) - .unwrap() - .ptr_metadata_ty_or_tail(self.tcx, |t| t) - == to - .builtin_deref(true) - .unwrap() - .ptr_metadata_ty_or_tail(self.tcx, |t| t) => + if self.pointers_have_same_metadata(*from, *to) => { arg_index = *inner; was_updated = true; @@ -1070,6 +1046,52 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { fn simplify_binary( &mut self, op: BinOp, + lhs_operand: &mut Operand<'tcx>, + rhs_operand: &mut Operand<'tcx>, + location: Location, + ) -> Option<VnIndex> { + let lhs = self.simplify_operand(lhs_operand, location); + let rhs = self.simplify_operand(rhs_operand, location); + // Only short-circuit options after we called `simplify_operand` + // on both operands for side effect. + let mut lhs = lhs?; + let mut rhs = rhs?; + + let lhs_ty = lhs_operand.ty(self.local_decls, self.tcx); + + // If we're comparing pointers, remove `PtrToPtr` casts if the from + // types of both casts and the metadata all match. + if let BinOp::Eq | BinOp::Ne | BinOp::Lt | BinOp::Le | BinOp::Gt | BinOp::Ge = op + && lhs_ty.is_any_ptr() + && let Value::Cast { + kind: CastKind::PtrToPtr, value: lhs_value, from: lhs_from, .. + } = self.get(lhs) + && let Value::Cast { + kind: CastKind::PtrToPtr, value: rhs_value, from: rhs_from, .. + } = self.get(rhs) + && lhs_from == rhs_from + && self.pointers_have_same_metadata(*lhs_from, lhs_ty) + { + lhs = *lhs_value; + rhs = *rhs_value; + if let Some(op) = self.try_as_operand(lhs, location) { + *lhs_operand = op; + } + if let Some(op) = self.try_as_operand(rhs, location) { + *rhs_operand = op; + } + } + + if let Some(value) = self.simplify_binary_inner(op, lhs_ty, lhs, rhs) { + return Some(value); + } + let value = Value::BinaryOp(op, lhs, rhs); + Some(self.insert(value)) + } + + fn simplify_binary_inner( + &mut self, + op: BinOp, lhs_ty: Ty<'tcx>, lhs: VnIndex, rhs: VnIndex, @@ -1228,6 +1250,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } } + // PtrToPtr-then-PtrToPtr can skip the intermediate step if let PtrToPtr = kind && let Value::Cast { kind: inner_kind, value: inner_value, from: inner_from, to: _ } = *self.get(value) @@ -1235,7 +1258,25 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { { from = inner_from; value = inner_value; - *kind = PtrToPtr; + was_updated = true; + if inner_from == to { + return Some(inner_value); + } + } + + // PtrToPtr-then-Transmute can just transmute the original, so long as the + // PtrToPtr didn't change metadata (and thus the size of the pointer) + if let Transmute = kind + && let Value::Cast { + kind: PtrToPtr, + value: inner_value, + from: inner_from, + to: inner_to, + } = *self.get(value) + && self.pointers_have_same_metadata(inner_from, inner_to) + { + from = inner_from; + value = inner_value; was_updated = true; if inner_from == to { return Some(inner_value); @@ -1289,6 +1330,21 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { // Fallback: a symbolic `Len`. Some(self.insert(Value::Len(inner))) } + + fn pointers_have_same_metadata(&self, left_ptr_ty: Ty<'tcx>, right_ptr_ty: Ty<'tcx>) -> bool { + let left_meta_ty = left_ptr_ty.pointee_metadata_ty_or_projection(self.tcx); + let right_meta_ty = right_ptr_ty.pointee_metadata_ty_or_projection(self.tcx); + if left_meta_ty == right_meta_ty { + true + } else if let Ok(left) = + self.tcx.try_normalize_erasing_regions(self.param_env, left_meta_ty) + && let Ok(right) = self.tcx.try_normalize_erasing_regions(self.param_env, right_meta_ty) + { + left == right + } else { + false + } + } } fn op_to_prop_const<'tcx>( |
