about summary refs log tree commit diff
path: root/compiler/rustc_ty_utils/src/abi.rs
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_ty_utils/src/abi.rs')
-rw-r--r--compiler/rustc_ty_utils/src/abi.rs138
1 files changed, 8 insertions, 130 deletions
diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs
index 09b9ecb3486..48149a08de8 100644
--- a/compiler/rustc_ty_utils/src/abi.rs
+++ b/compiler/rustc_ty_utils/src/abi.rs
@@ -1,7 +1,7 @@
 use std::iter;
 
-use rustc_abi::Primitive::{Float, Pointer};
-use rustc_abi::{Abi, AddressSpace, PointerKind, Scalar, Size};
+use rustc_abi::Primitive::Pointer;
+use rustc_abi::{Abi, PointerKind, Scalar, Size};
 use rustc_hir as hir;
 use rustc_hir::lang_items::LangItem;
 use rustc_middle::bug;
@@ -13,8 +13,7 @@ use rustc_middle::ty::{self, InstanceKind, Ty, TyCtxt};
 use rustc_session::config::OptLevel;
 use rustc_span::def_id::DefId;
 use rustc_target::abi::call::{
-    ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
-    RiscvInterruptKind,
+    ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, RiscvInterruptKind,
 };
 use rustc_target::spec::abi::Abi as SpecAbi;
 use tracing::debug;
@@ -678,6 +677,8 @@ fn fn_abi_adjust_for_abi<'tcx>(
     let tcx = cx.tcx();
 
     if abi == SpecAbi::Rust || abi == SpecAbi::RustCall || abi == SpecAbi::RustIntrinsic {
+        fn_abi.adjust_for_rust_abi(cx, abi);
+
         // Look up the deduced parameter attributes for this function, if we have its def ID and
         // we're optimizing in non-incremental mode. We'll tag its parameters with those attributes
         // as appropriate.
@@ -688,125 +689,9 @@ fn fn_abi_adjust_for_abi<'tcx>(
                 &[]
             };
 
-        let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>, arg_idx: Option<usize>| {
+        for (arg_idx, arg) in fn_abi.args.iter_mut().enumerate() {
             if arg.is_ignore() {
-                return;
-            }
-
-            // Avoid returning floats in x87 registers on x86 as loading and storing from x87
-            // registers will quiet signalling NaNs. Also avoid using SSE registers since they
-            // are not always available (depending on target features).
-            if tcx.sess.target.arch == "x86"
-                && arg_idx.is_none()
-                // Intrinsics themselves are not actual "real" functions, so theres no need to
-                // change their ABIs.
-                && abi != SpecAbi::RustIntrinsic
-            {
-                let has_float = match arg.layout.abi {
-                    Abi::Scalar(s) => matches!(s.primitive(), Float(_)),
-                    Abi::ScalarPair(s1, s2) => {
-                        matches!(s1.primitive(), Float(_)) || matches!(s2.primitive(), Float(_))
-                    }
-                    _ => false, // anyway not passed via registers on x86
-                };
-                if has_float {
-                    if arg.layout.size <= Pointer(AddressSpace::DATA).size(cx) {
-                        // Same size or smaller than pointer, return in a register.
-                        arg.cast_to(Reg { kind: RegKind::Integer, size: arg.layout.size });
-                    } else {
-                        // Larger than a pointer, return indirectly.
-                        arg.make_indirect();
-                    }
-                    return;
-                }
-            }
-
-            if arg_idx.is_none() && arg.layout.size > Pointer(AddressSpace::DATA).size(cx) * 2 {
-                // Return values larger than 2 registers using a return area
-                // pointer. LLVM and Cranelift disagree about how to return
-                // values that don't fit in the registers designated for return
-                // values. LLVM will force the entire return value to be passed
-                // by return area pointer, while Cranelift will look at each IR level
-                // return value independently and decide to pass it in a
-                // register or not, which would result in the return value
-                // being passed partially in registers and partially through a
-                // return area pointer.
-                //
-                // While Cranelift may need to be fixed as the LLVM behavior is
-                // generally more correct with respect to the surface language,
-                // forcing this behavior in rustc itself makes it easier for
-                // other backends to conform to the Rust ABI and for the C ABI
-                // rustc already handles this behavior anyway.
-                //
-                // In addition LLVM's decision to pass the return value in
-                // registers or using a return area pointer depends on how
-                // exactly the return type is lowered to an LLVM IR type. For
-                // example `Option<u128>` can be lowered as `{ i128, i128 }`
-                // in which case the x86_64 backend would use a return area
-                // pointer, or it could be passed as `{ i32, i128 }` in which
-                // case the x86_64 backend would pass it in registers by taking
-                // advantage of an LLVM ABI extension that allows using 3
-                // registers for the x86_64 sysv call conv rather than the
-                // officially specified 2 registers.
-                //
-                // FIXME: Technically we should look at the amount of available
-                // return registers rather than guessing that there are 2
-                // registers for return values. In practice only a couple of
-                // architectures have less than 2 return registers. None of
-                // which supported by Cranelift.
-                //
-                // NOTE: This adjustment is only necessary for the Rust ABI as
-                // for other ABI's the calling convention implementations in
-                // rustc_target already ensure any return value which doesn't
-                // fit in the available amount of return registers is passed in
-                // the right way for the current target.
-                arg.make_indirect();
-                return;
-            }
-
-            match arg.layout.abi {
-                Abi::Aggregate { .. } => {}
-
-                // This is a fun case! The gist of what this is doing is
-                // that we want callers and callees to always agree on the
-                // ABI of how they pass SIMD arguments. If we were to *not*
-                // make these arguments indirect then they'd be immediates
-                // in LLVM, which means that they'd used whatever the
-                // appropriate ABI is for the callee and the caller. That
-                // means, for example, if the caller doesn't have AVX
-                // enabled but the callee does, then passing an AVX argument
-                // across this boundary would cause corrupt data to show up.
-                //
-                // This problem is fixed by unconditionally passing SIMD
-                // arguments through memory between callers and callees
-                // which should get them all to agree on ABI regardless of
-                // target feature sets. Some more information about this
-                // issue can be found in #44367.
-                //
-                // Note that the intrinsic ABI is exempt here as
-                // that's how we connect up to LLVM and it's unstable
-                // anyway, we control all calls to it in libstd.
-                Abi::Vector { .. }
-                    if abi != SpecAbi::RustIntrinsic && tcx.sess.target.simd_types_indirect =>
-                {
-                    arg.make_indirect();
-                    return;
-                }
-
-                _ => return,
-            }
-            // Compute `Aggregate` ABI.
-
-            let is_indirect_not_on_stack =
-                matches!(arg.mode, PassMode::Indirect { on_stack: false, .. });
-            assert!(is_indirect_not_on_stack, "{:?}", arg);
-
-            let size = arg.layout.size;
-            if !arg.layout.is_unsized() && size <= Pointer(AddressSpace::DATA).size(cx) {
-                // We want to pass small aggregates as immediates, but using
-                // an LLVM aggregate type for this leads to bad optimizations,
-                // so we pick an appropriately sized integer type instead.
-                arg.cast_to(Reg { kind: RegKind::Integer, size });
+                continue;
             }
 
             // If we deduced that this parameter was read-only, add that to the attribute list now.
@@ -814,9 +699,7 @@ fn fn_abi_adjust_for_abi<'tcx>(
             // The `readonly` parameter only applies to pointers, so we can only do this if the
             // argument was passed indirectly. (If the argument is passed directly, it's an SSA
             // value, so it's implicitly immutable.)
-            if let (Some(arg_idx), &mut PassMode::Indirect { ref mut attrs, .. }) =
-                (arg_idx, &mut arg.mode)
-            {
+            if let &mut PassMode::Indirect { ref mut attrs, .. } = &mut arg.mode {
                 // The `deduced_param_attrs` list could be empty if this is a type of function
                 // we can't deduce any parameters for, so make sure the argument index is in
                 // bounds.
@@ -827,11 +710,6 @@ fn fn_abi_adjust_for_abi<'tcx>(
                     }
                 }
             }
-        };
-
-        fixup(&mut fn_abi.ret, None);
-        for (arg_idx, arg) in fn_abi.args.iter_mut().enumerate() {
-            fixup(arg, Some(arg_idx));
         }
     } else {
         fn_abi