about summary refs log tree commit diff
path: root/compiler/rustc_const_eval/src
diff options
context:
space:
mode:
authorRalf Jung <post@ralfj.de>2023-09-09 10:35:26 +0200
committerRalf Jung <post@ralfj.de>2023-09-09 15:36:44 +0200
commit897a65804d7891c2d4518d6c6061e7baedaa745b (patch)
tree3be1e158f1364a446e13470b73f637212f1dc127 /compiler/rustc_const_eval/src
parentcd71a37f320c379df47ff64abd934f3a2da94c26 (diff)
downloadrust-897a65804d7891c2d4518d6c6061e7baedaa745b.tar.gz
rust-897a65804d7891c2d4518d6c6061e7baedaa745b.zip
interpret: change ABI-compat test to be type-based, so the test is consistent across targets
Diffstat (limited to 'compiler/rustc_const_eval/src')
-rw-r--r--compiler/rustc_const_eval/src/interpret/terminator.rs175
1 files changed, 125 insertions, 50 deletions
diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs
index eb4673c0edc..eeeec97936b 100644
--- a/compiler/rustc_const_eval/src/interpret/terminator.rs
+++ b/compiler/rustc_const_eval/src/interpret/terminator.rs
@@ -6,12 +6,16 @@ use rustc_middle::{
     mir,
     ty::{
         self,
-        layout::{FnAbiOf, LayoutOf, TyAndLayout},
+        layout::{FnAbiOf, IntegerExt, LayoutOf, TyAndLayout},
         Instance, Ty,
     },
 };
-use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
-use rustc_target::abi::{self, FieldIdx};
+use rustc_span::sym;
+use rustc_target::abi::FieldIdx;
+use rustc_target::abi::{
+    call::{ArgAbi, FnAbi, PassMode},
+    Integer,
+};
 use rustc_target::spec::abi::Abi;
 
 use super::{
@@ -255,6 +259,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
     /// Find the wrapped inner type of a transparent wrapper.
     /// Must not be called on 1-ZST (as they don't have a uniquely defined "wrapped field").
+    ///
+    /// We work with `TyAndLayout` here since that makes it much easier to iterate over all fields.
     fn unfold_transparent(&self, layout: TyAndLayout<'tcx>) -> TyAndLayout<'tcx> {
         match layout.ty.kind() {
             ty::Adt(adt_def, _) if adt_def.repr().transparent() => {
@@ -278,6 +284,37 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         }
     }
 
+    /// Unwrap types that are guaranteed a null-pointer-optimization
+    fn unfold_npo(&self, ty: Ty<'tcx>) -> InterpResult<'tcx, Ty<'tcx>> {
+        // Check if this is `Option` wrapping some type.
+        let inner_ty = match ty.kind() {
+            ty::Adt(def, args) if self.tcx.is_diagnostic_item(sym::Option, def.did()) => {
+                args[0].as_type().unwrap()
+            }
+            _ => {
+                // Not an `Option`.
+                return Ok(ty);
+            }
+        };
+        // Check if the inner type is one of the NPO-guaranteed ones.
+        Ok(match inner_ty.kind() {
+            ty::Ref(..) => {
+                // Option<&T> behaves like &T
+                inner_ty
+            }
+            ty::Adt(def, _)
+                if self.tcx.has_attr(def.did(), sym::rustc_nonnull_optimization_guaranteed) =>
+            {
+                // For non-null-guaranteed structs, unwrap newtypes.
+                self.unfold_transparent(self.layout_of(inner_ty)?).ty
+            }
+            _ => {
+                // Everything else we do not unfold.
+                ty
+            }
+        })
+    }
+
     /// Check if these two layouts look like they are fn-ABI-compatible.
     /// (We also compare the `PassMode`, so this doesn't have to check everything. But it turns out
     /// that only checking the `PassMode` is insufficient.)
@@ -285,63 +322,86 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         &self,
         caller_layout: TyAndLayout<'tcx>,
         callee_layout: TyAndLayout<'tcx>,
-    ) -> bool {
+    ) -> InterpResult<'tcx, bool> {
+        // Fast path: equal types are definitely compatible.
         if caller_layout.ty == callee_layout.ty {
-            // Fast path: equal types are definitely compatible.
-            return true;
+            return Ok(true);
         }
-
-        match caller_layout.abi {
-            // For Scalar/Vector/ScalarPair ABI, we directly compare them.
-            // NOTE: this is *not* a stable guarantee! It just reflects a property of our current
-            // ABIs. It's also fragile; the same pair of types might be considered ABI-compatible
-            // when used directly by-value but not considered compatible as a struct field or array
-            // element.
-            abi::Abi::Scalar(..) | abi::Abi::ScalarPair(..) | abi::Abi::Vector { .. } => {
-                caller_layout.abi.eq_up_to_validity(&callee_layout.abi)
-            }
-            _ => {
-                // Everything else is compatible only if they newtype-wrap the same type, or if they are both 1-ZST.
-                // (The latter part is needed to ensure e.g. that `struct Zst` is compatible with `struct Wrap((), Zst)`.)
-                // This is conservative, but also means that our check isn't quite so heavily dependent on the `PassMode`,
-                // which means having ABI-compatibility on one target is much more likely to imply compatibility for other targets.
-                if caller_layout.is_1zst() || callee_layout.is_1zst() {
-                    // If either is a 1-ZST, both must be.
-                    caller_layout.is_1zst() && callee_layout.is_1zst()
-                } else {
-                    // Neither is a 1-ZST, so we can check what they are wrapping.
-                    self.unfold_transparent(caller_layout).ty
-                        == self.unfold_transparent(callee_layout).ty
+        // 1-ZST are compatible with all 1-ZST (and with nothing else).
+        if caller_layout.is_1zst() || callee_layout.is_1zst() {
+            return Ok(caller_layout.is_1zst() && callee_layout.is_1zst());
+        }
+        // Unfold newtypes and NPO optimizations.
+        let caller_ty = self.unfold_npo(self.unfold_transparent(caller_layout).ty)?;
+        let callee_ty = self.unfold_npo(self.unfold_transparent(callee_layout).ty)?;
+        // Now see if these inner types are compatible.
+
+        // Compatible pointer types.
+        let pointee_ty = |ty: Ty<'tcx>| {
+            // We cannot use `builtin_deref` here since we need to reject `Box<T, MyAlloc>`.
+            Some(match ty.kind() {
+                ty::Ref(_, ty, _) => *ty,
+                ty::RawPtr(mt) => mt.ty,
+                // We should only accept `Box` with the default allocator.
+                // It's hard to test for that though so we accept every 1-ZST allocator.
+                ty::Adt(def, args)
+                    if def.is_box()
+                        && self.layout_of(args[1].expect_ty()).is_ok_and(|l| l.is_1zst()) =>
+                {
+                    args[0].expect_ty()
                 }
-            }
+                _ => return None,
+            })
+        };
+        if let (Some(left), Some(right)) = (pointee_ty(caller_ty), pointee_ty(callee_ty)) {
+            // This is okay if they have the same metadata type.
+            let meta_ty = |ty: Ty<'tcx>| {
+                let (meta, only_if_sized) = ty.ptr_metadata_ty(*self.tcx, |ty| ty);
+                assert!(
+                    !only_if_sized,
+                    "there should be no more 'maybe has that metadata' types during interpretation"
+                );
+                meta
+            };
+            return Ok(meta_ty(left) == meta_ty(right));
         }
+
+        // Compatible integer types (in particular, usize vs ptr-sized-u32/u64).
+        let int_ty = |ty: Ty<'tcx>| {
+            Some(match ty.kind() {
+                ty::Int(ity) => (Integer::from_int_ty(&self.tcx, *ity), /* signed */ true),
+                ty::Uint(uty) => (Integer::from_uint_ty(&self.tcx, *uty), /* signed */ false),
+                _ => return None,
+            })
+        };
+        if let (Some(left), Some(right)) = (int_ty(caller_ty), int_ty(callee_ty)) {
+            // This is okay if they are the same integer type.
+            return Ok(left == right);
+        }
+
+        // Fall back to exact equality.
+        // FIXME: We are missing the rules for "repr(C) wrapping compatible types".
+        Ok(caller_ty == callee_ty)
     }
 
     fn check_argument_compat(
         &self,
         caller_abi: &ArgAbi<'tcx, Ty<'tcx>>,
         callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
-    ) -> bool {
-        // Ideally `PassMode` would capture everything there is about argument passing, but that is
-        // not the case: in `FnAbi::llvm_type`, also parts of the layout and type information are
-        // used. So we need to check that *both* sufficiently agree to ensures the arguments are
-        // compatible.
-        // For instance, `layout_compat` is needed to reject `i32` vs `f32`, which is not reflected
-        // in `PassMode`. `mode_compat` is needed to reject `u8` vs `bool`, which have the same
-        // `abi::Primitive` but different `arg_ext`.
-        if self.layout_compat(caller_abi.layout, callee_abi.layout)
-            && caller_abi.mode.eq_abi(&callee_abi.mode)
-        {
-            // Something went very wrong if our checks don't imply layout ABI compatibility.
-            assert!(caller_abi.layout.eq_abi(&callee_abi.layout));
-            return true;
+    ) -> InterpResult<'tcx, bool> {
+        // We do not want to accept things as ABI-compatible that just "happen to be" compatible on the current target,
+        // so we implement a type-based check that reflects the guaranteed rules for ABI compatibility.
+        if self.layout_compat(caller_abi.layout, callee_abi.layout)? {
+            // Ensure that our checks imply actual ABI compatibility for this concrete call.
+            assert!(caller_abi.eq_abi(&callee_abi));
+            return Ok(true);
         } else {
             trace!(
                 "check_argument_compat: incompatible ABIs:\ncaller: {:?}\ncallee: {:?}",
                 caller_abi,
                 callee_abi
             );
-            return false;
+            return Ok(false);
         }
     }
 
@@ -360,6 +420,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         'tcx: 'x,
         'tcx: 'y,
     {
+        assert_eq!(callee_ty, callee_abi.layout.ty);
         if matches!(callee_abi.mode, PassMode::Ignore) {
             // This one is skipped. Still must be made live though!
             if !already_live {
@@ -371,8 +432,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         let Some((caller_arg, caller_abi)) = caller_args.next() else {
             throw_ub_custom!(fluent::const_eval_not_enough_caller_args);
         };
+        assert_eq!(caller_arg.layout().layout, caller_abi.layout.layout);
+        // Sadly we cannot assert that `caller_arg.layout().ty` and `caller_abi.layout.ty` are
+        // equal; in closures the types sometimes differ. We just hope that `caller_abi` is the
+        // right type to print to the user.
+
         // Check compatibility
-        if !self.check_argument_compat(caller_abi, callee_abi) {
+        if !self.check_argument_compat(caller_abi, callee_abi)? {
             let callee_ty = format!("{}", callee_ty);
             let caller_ty = format!("{}", caller_arg.layout().ty);
             throw_ub_custom!(
@@ -583,7 +649,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     // taking into account the `spread_arg`. If we could write
                     // this is a single iterator (that handles `spread_arg`), then
                     // `pass_argument` would be the loop body. It takes care to
-                    // not advance `caller_iter` for ZSTs.
+                    // not advance `caller_iter` for ignored arguments.
                     let mut callee_args_abis = callee_fn_abi.args.iter();
                     for local in body.args_iter() {
                         // Construct the destination place for this argument. At this point all
@@ -645,7 +711,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         throw_ub_custom!(fluent::const_eval_too_many_caller_args);
                     }
                     // Don't forget to check the return type!
-                    if !self.check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret) {
+                    if !self.check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret)? {
                         let callee_ty = format!("{}", callee_fn_abi.ret.layout.ty);
                         let caller_ty = format!("{}", caller_fn_abi.ret.layout.ty);
                         throw_ub_custom!(
@@ -674,7 +740,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     Ok(()) => Ok(()),
                 }
             }
-            // cannot use the shim here, because that will only result in infinite recursion
+            // `InstanceDef::Virtual` does not have callable MIR. Calls to `Virtual` instances must be
+            // codegen'd / interpreted as virtual calls through the vtable.
             ty::InstanceDef::Virtual(def_id, idx) => {
                 let mut args = args.to_vec();
                 // We have to implement all "object safe receivers". So we have to go search for a
@@ -798,18 +865,26 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 }
 
                 // Adjust receiver argument. Layout can be any (thin) ptr.
+                let receiver_ty = Ty::new_mut_ptr(self.tcx.tcx, dyn_ty);
                 args[0] = FnArg::Copy(
                     ImmTy::from_immediate(
                         Scalar::from_maybe_pointer(adjusted_receiver, self).into(),
-                        self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, dyn_ty))?,
+                        self.layout_of(receiver_ty)?,
                     )
                     .into(),
                 );
                 trace!("Patched receiver operand to {:#?}", args[0]);
+                // Need to also adjust the type in the ABI. Strangely, the layout there is actually
+                // already fine! Just the type is bogus. This is due to what `force_thin_self_ptr`
+                // does in `fn_abi_new_uncached`; supposedly, codegen relies on having the bogus
+                // type, so we just patch this up locally.
+                let mut caller_fn_abi = caller_fn_abi.clone();
+                caller_fn_abi.args[0].layout.ty = receiver_ty;
+
                 // recurse with concrete function
                 self.eval_fn_call(
                     FnVal::Instance(fn_inst),
-                    (caller_abi, caller_fn_abi),
+                    (caller_abi, &caller_fn_abi),
                     &args,
                     with_caller_location,
                     destination,