about summary refs log tree commit diff
path: root/compiler
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2025-06-08 20:17:28 +0000
committerbors <bors@rust-lang.org>2025-06-08 20:17:28 +0000
commit6ccd4476036edfce364e6271f9e190ec7a2a1ff5 (patch)
treea1ee301bf688d51876d40197ea4b1ef23a27d4c2 /compiler
parentfb644e6a1a7d34c6bbb5ecfe5c185f8c977d6bb3 (diff)
parent52824052d7782762056aec11179e08ed015c0dd3 (diff)
downloadrust-6ccd4476036edfce364e6271f9e190ec7a2a1ff5.tar.gz
rust-6ccd4476036edfce364e6271f9e190ec7a2a1ff5.zip
Auto merge of #141700 - RalfJung:atomic-intrinsics-part2, r=bjorn3
Atomic intrinsics : use const generic ordering, part 2

This completes what got started in https://github.com/rust-lang/rust/pull/141507 by using a const generic for the ordering for all intrinsics. It is based on that PR; only the last commit is new.

Blocked on:
- https://github.com/rust-lang/rust/pull/141507
- https://github.com/rust-lang/rust/pull/141687
- https://github.com/rust-lang/stdarch/pull/1811
- https://github.com/rust-lang/rust/pull/141964

r? `@bjorn3`
Diffstat (limited to 'compiler')
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs28
-rw-r--r--compiler/rustc_codegen_ssa/messages.ftl8
-rw-r--r--compiler/rustc_codegen_ssa/src/errors.rs16
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/intrinsic.rs322
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0092.md15
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0093.md17
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0622.md4
-rw-r--r--compiler/rustc_hir_analysis/messages.ftl4
-rw-r--r--compiler/rustc_hir_analysis/src/check/intrinsic.rs955
-rw-r--r--compiler/rustc_hir_analysis/src/errors.rs9
-rw-r--r--compiler/rustc_span/src/symbol.rs16
11 files changed, 637 insertions, 757 deletions
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
index 27a5df8b152..a0f96d85dc3 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
@@ -875,7 +875,6 @@ fn codegen_regular_intrinsic_call<'tcx>(
             let ptr = ptr.load_scalar(fx);
 
             let ty = generic_args.type_at(0);
-            let _ord = generic_args.const_at(1).to_value(); // FIXME: forward this to cranelift once they support that
             match ty.kind() {
                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
                     // FIXME implement 128bit atomics
@@ -906,7 +905,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
             let val = CValue::by_val(val, fx.layout_of(ty));
             ret.write_cvalue(fx, val);
         }
-        _ if intrinsic.as_str().starts_with("atomic_store") => {
+        sym::atomic_store => {
             intrinsic_args!(fx, args => (ptr, val); intrinsic);
             let ptr = ptr.load_scalar(fx);
 
@@ -939,7 +938,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
 
             fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
         }
-        _ if intrinsic.as_str().starts_with("atomic_xchg") => {
+        sym::atomic_xchg => {
             intrinsic_args!(fx, args => (ptr, new); intrinsic);
             let ptr = ptr.load_scalar(fx);
 
@@ -960,8 +959,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
         }
-        _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
-            // both atomic_cxchg_* and atomic_cxchgweak_*
+        sym::atomic_cxchg | sym::atomic_cxchgweak => {
             intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
             let ptr = ptr.load_scalar(fx);
 
@@ -984,7 +982,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
             ret.write_cvalue(fx, ret_val)
         }
 
-        _ if intrinsic.as_str().starts_with("atomic_xadd") => {
+        sym::atomic_xadd => {
             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
             let ptr = ptr.load_scalar(fx);
 
@@ -1006,7 +1004,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
         }
-        _ if intrinsic.as_str().starts_with("atomic_xsub") => {
+        sym::atomic_xsub => {
             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
             let ptr = ptr.load_scalar(fx);
 
@@ -1028,7 +1026,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
         }
-        _ if intrinsic.as_str().starts_with("atomic_and") => {
+        sym::atomic_and => {
             intrinsic_args!(fx, args => (ptr, src); intrinsic);
             let ptr = ptr.load_scalar(fx);
 
@@ -1049,7 +1047,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
         }
-        _ if intrinsic.as_str().starts_with("atomic_or") => {
+        sym::atomic_or => {
             intrinsic_args!(fx, args => (ptr, src); intrinsic);
             let ptr = ptr.load_scalar(fx);
 
@@ -1070,7 +1068,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
         }
-        _ if intrinsic.as_str().starts_with("atomic_xor") => {
+        sym::atomic_xor => {
             intrinsic_args!(fx, args => (ptr, src); intrinsic);
             let ptr = ptr.load_scalar(fx);
 
@@ -1091,7 +1089,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
         }
-        _ if intrinsic.as_str().starts_with("atomic_nand") => {
+        sym::atomic_nand => {
             intrinsic_args!(fx, args => (ptr, src); intrinsic);
             let ptr = ptr.load_scalar(fx);
 
@@ -1112,7 +1110,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
         }
-        _ if intrinsic.as_str().starts_with("atomic_max") => {
+        sym::atomic_max => {
             intrinsic_args!(fx, args => (ptr, src); intrinsic);
             let ptr = ptr.load_scalar(fx);
 
@@ -1133,7 +1131,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
         }
-        _ if intrinsic.as_str().starts_with("atomic_umax") => {
+        sym::atomic_umax => {
             intrinsic_args!(fx, args => (ptr, src); intrinsic);
             let ptr = ptr.load_scalar(fx);
 
@@ -1154,7 +1152,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
         }
-        _ if intrinsic.as_str().starts_with("atomic_min") => {
+        sym::atomic_min => {
             intrinsic_args!(fx, args => (ptr, src); intrinsic);
             let ptr = ptr.load_scalar(fx);
 
@@ -1175,7 +1173,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
             let old = CValue::by_val(old, layout);
             ret.write_cvalue(fx, old);
         }
-        _ if intrinsic.as_str().starts_with("atomic_umin") => {
+        sym::atomic_umin => {
             intrinsic_args!(fx, args => (ptr, src); intrinsic);
             let ptr = ptr.load_scalar(fx);
 
diff --git a/compiler/rustc_codegen_ssa/messages.ftl b/compiler/rustc_codegen_ssa/messages.ftl
index acb4cbaa13f..91f6af7fb93 100644
--- a/compiler/rustc_codegen_ssa/messages.ftl
+++ b/compiler/rustc_codegen_ssa/messages.ftl
@@ -8,8 +8,6 @@ codegen_ssa_aix_strip_not_used = using host's `strip` binary to cross-compile to
 
 codegen_ssa_archive_build_failure = failed to build archive at `{$path}`: {$error}
 
-codegen_ssa_atomic_compare_exchange = Atomic compare-exchange intrinsic missing failure memory ordering
-
 codegen_ssa_autodiff_without_lto = using the autodiff feature requires using fat-lto
 
 codegen_ssa_bare_instruction_set = `#[instruction_set]` requires an argument
@@ -206,8 +204,6 @@ codegen_ssa_missing_cpp_build_tool_component = or a necessary component may be m
 
 codegen_ssa_missing_features = add the missing features in a `target_feature` attribute
 
-codegen_ssa_missing_memory_ordering = Atomic intrinsic missing memory ordering
-
 codegen_ssa_missing_query_depgraph =
     found CGU-reuse attribute but `-Zquery-dep-graph` was not specified
 
@@ -374,10 +370,6 @@ codegen_ssa_unexpected_parameter_name = unexpected parameter name
 codegen_ssa_unknown_archive_kind =
     Don't know how to build archive of type: {$kind}
 
-codegen_ssa_unknown_atomic_operation = unknown atomic operation
-
-codegen_ssa_unknown_atomic_ordering = unknown ordering in atomic intrinsic
-
 codegen_ssa_unknown_reuse_kind = unknown cgu-reuse-kind `{$kind}` specified
 
 codegen_ssa_unsupported_instruction_set = target does not support `#[instruction_set]`
diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs
index 572d7b1e06a..f843347db92 100644
--- a/compiler/rustc_codegen_ssa/src/errors.rs
+++ b/compiler/rustc_codegen_ssa/src/errors.rs
@@ -797,22 +797,6 @@ pub(crate) struct ShuffleIndicesEvaluation {
 }
 
 #[derive(Diagnostic)]
-#[diag(codegen_ssa_missing_memory_ordering)]
-pub(crate) struct MissingMemoryOrdering;
-
-#[derive(Diagnostic)]
-#[diag(codegen_ssa_unknown_atomic_ordering)]
-pub(crate) struct UnknownAtomicOrdering;
-
-#[derive(Diagnostic)]
-#[diag(codegen_ssa_atomic_compare_exchange)]
-pub(crate) struct AtomicCompareExchange;
-
-#[derive(Diagnostic)]
-#[diag(codegen_ssa_unknown_atomic_operation)]
-pub(crate) struct UnknownAtomicOperation;
-
-#[derive(Diagnostic)]
 pub enum InvalidMonomorphization<'tcx> {
     #[diag(codegen_ssa_invalid_monomorphization_basic_integer_type, code = E0511)]
     BasicIntegerType {
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
index 8c6f52084c2..a3f09f64a3e 100644
--- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -8,9 +8,10 @@ use rustc_span::sym;
 use super::FunctionCx;
 use super::operand::OperandRef;
 use super::place::PlaceRef;
+use crate::common::{AtomicRmwBinOp, SynchronizationScope};
 use crate::errors::InvalidMonomorphization;
 use crate::traits::*;
-use crate::{MemFlags, errors, meth, size_of_val};
+use crate::{MemFlags, meth, size_of_val};
 
 fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
     bx: &mut Bx,
@@ -62,7 +63,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         let span = source_info.span;
 
         let name = bx.tcx().item_name(instance.def_id());
-        let name_str = name.as_str();
         let fn_args = instance.args;
 
         // If we're swapping something that's *not* an `OperandValue::Ref`,
@@ -89,14 +89,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
         }
 
-        let ret_llval = |bx: &mut Bx, llval| {
-            if result.layout.ty.is_bool() {
-                let val = bx.from_immediate(llval);
-                bx.store_to_place(val, result.val);
-            } else if !result.layout.ty.is_unit() {
-                bx.store_to_place(llval, result.val);
-            }
-            Ok(())
+        let invalid_monomorphization_int_type = |ty| {
+            bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
+        };
+
+        let parse_atomic_ordering = |ord: ty::Value<'tcx>| {
+            let discr = ord.valtree.unwrap_branch()[0].unwrap_leaf();
+            discr.to_atomic_ordering()
         };
 
         let llval = match name {
@@ -336,184 +335,145 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 }
             }
 
-            // This requires that atomic intrinsics follow a specific naming pattern:
-            // "atomic_<operation>[_<ordering>]"
-            name if let Some(atomic) = name_str.strip_prefix("atomic_") => {
-                use rustc_middle::ty::AtomicOrdering::*;
-
-                use crate::common::{AtomicRmwBinOp, SynchronizationScope};
+            sym::atomic_load => {
+                let ty = fn_args.type_at(0);
+                if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
+                    invalid_monomorphization_int_type(ty);
+                    return Ok(());
+                }
+                let ordering = fn_args.const_at(1).to_value();
+                let layout = bx.layout_of(ty);
+                let source = args[0].immediate();
+                bx.atomic_load(
+                    bx.backend_type(layout),
+                    source,
+                    parse_atomic_ordering(ordering),
+                    layout.size,
+                )
+            }
+            sym::atomic_store => {
+                let ty = fn_args.type_at(0);
+                if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
+                    invalid_monomorphization_int_type(ty);
+                    return Ok(());
+                }
+                let ordering = fn_args.const_at(1).to_value();
+                let size = bx.layout_of(ty).size;
+                let val = args[1].immediate();
+                let ptr = args[0].immediate();
+                bx.atomic_store(val, ptr, parse_atomic_ordering(ordering), size);
+                return Ok(());
+            }
+            sym::atomic_cxchg | sym::atomic_cxchgweak => {
+                let ty = fn_args.type_at(0);
+                if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
+                    invalid_monomorphization_int_type(ty);
+                    return Ok(());
+                }
+                let succ_ordering = fn_args.const_at(1).to_value();
+                let fail_ordering = fn_args.const_at(2).to_value();
+                let weak = name == sym::atomic_cxchgweak;
+                let dst = args[0].immediate();
+                let cmp = args[1].immediate();
+                let src = args[2].immediate();
+                let (val, success) = bx.atomic_cmpxchg(
+                    dst,
+                    cmp,
+                    src,
+                    parse_atomic_ordering(succ_ordering),
+                    parse_atomic_ordering(fail_ordering),
+                    weak,
+                );
+                let val = bx.from_immediate(val);
+                let success = bx.from_immediate(success);
 
-                let invalid_monomorphization = |ty| {
-                    bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
-                        span,
-                        name,
-                        ty,
-                    });
-                };
+                let dest = result.project_field(bx, 0);
+                bx.store_to_place(val, dest.val);
+                let dest = result.project_field(bx, 1);
+                bx.store_to_place(success, dest.val);
 
-                let parse_const_generic_ordering = |ord: ty::Value<'tcx>| {
-                    let discr = ord.valtree.unwrap_branch()[0].unwrap_leaf();
-                    discr.to_atomic_ordering()
+                return Ok(());
+            }
+            // These are all AtomicRMW ops
+            sym::atomic_max | sym::atomic_min => {
+                let atom_op = if name == sym::atomic_max {
+                    AtomicRmwBinOp::AtomicMax
+                } else {
+                    AtomicRmwBinOp::AtomicMin
                 };
 
-                // Some intrinsics have the ordering already converted to a const generic parameter, we handle those first.
-                match name {
-                    sym::atomic_load => {
-                        let ty = fn_args.type_at(0);
-                        let ordering = fn_args.const_at(1).to_value();
-                        if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
-                            invalid_monomorphization(ty);
-                            return Ok(());
-                        }
-                        let layout = bx.layout_of(ty);
-                        let source = args[0].immediate();
-                        let llval = bx.atomic_load(
-                            bx.backend_type(layout),
-                            source,
-                            parse_const_generic_ordering(ordering),
-                            layout.size,
-                        );
-
-                        return ret_llval(bx, llval);
-                    }
-
-                    // The rest falls back to below.
-                    _ => {}
+                let ty = fn_args.type_at(0);
+                if matches!(ty.kind(), ty::Int(_)) {
+                    let ordering = fn_args.const_at(1).to_value();
+                    let ptr = args[0].immediate();
+                    let val = args[1].immediate();
+                    bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering))
+                } else {
+                    invalid_monomorphization_int_type(ty);
+                    return Ok(());
                 }
-
-                let Some((instruction, ordering)) = atomic.split_once('_') else {
-                    bx.sess().dcx().emit_fatal(errors::MissingMemoryOrdering);
+            }
+            sym::atomic_umax | sym::atomic_umin => {
+                let atom_op = if name == sym::atomic_umax {
+                    AtomicRmwBinOp::AtomicUMax
+                } else {
+                    AtomicRmwBinOp::AtomicUMin
                 };
 
-                let parse_ordering = |bx: &Bx, s| match s {
-                    "relaxed" => Relaxed,
-                    "acquire" => Acquire,
-                    "release" => Release,
-                    "acqrel" => AcqRel,
-                    "seqcst" => SeqCst,
-                    _ => bx.sess().dcx().emit_fatal(errors::UnknownAtomicOrdering),
+                let ty = fn_args.type_at(0);
+                if matches!(ty.kind(), ty::Uint(_)) {
+                    let ordering = fn_args.const_at(1).to_value();
+                    let ptr = args[0].immediate();
+                    let val = args[1].immediate();
+                    bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering))
+                } else {
+                    invalid_monomorphization_int_type(ty);
+                    return Ok(());
+                }
+            }
+            sym::atomic_xchg
+            | sym::atomic_xadd
+            | sym::atomic_xsub
+            | sym::atomic_and
+            | sym::atomic_nand
+            | sym::atomic_or
+            | sym::atomic_xor => {
+                let atom_op = match name {
+                    sym::atomic_xchg => AtomicRmwBinOp::AtomicXchg,
+                    sym::atomic_xadd => AtomicRmwBinOp::AtomicAdd,
+                    sym::atomic_xsub => AtomicRmwBinOp::AtomicSub,
+                    sym::atomic_and => AtomicRmwBinOp::AtomicAnd,
+                    sym::atomic_nand => AtomicRmwBinOp::AtomicNand,
+                    sym::atomic_or => AtomicRmwBinOp::AtomicOr,
+                    sym::atomic_xor => AtomicRmwBinOp::AtomicXor,
+                    _ => unreachable!(),
                 };
 
-                match instruction {
-                    "cxchg" | "cxchgweak" => {
-                        let Some((success, failure)) = ordering.split_once('_') else {
-                            bx.sess().dcx().emit_fatal(errors::AtomicCompareExchange);
-                        };
-                        let ty = fn_args.type_at(0);
-                        if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
-                            let weak = instruction == "cxchgweak";
-                            let dst = args[0].immediate();
-                            let cmp = args[1].immediate();
-                            let src = args[2].immediate();
-                            let (val, success) = bx.atomic_cmpxchg(
-                                dst,
-                                cmp,
-                                src,
-                                parse_ordering(bx, success),
-                                parse_ordering(bx, failure),
-                                weak,
-                            );
-                            let val = bx.from_immediate(val);
-                            let success = bx.from_immediate(success);
-
-                            let dest = result.project_field(bx, 0);
-                            bx.store_to_place(val, dest.val);
-                            let dest = result.project_field(bx, 1);
-                            bx.store_to_place(success, dest.val);
-                        } else {
-                            invalid_monomorphization(ty);
-                        }
-                        return Ok(());
-                    }
-
-                    "store" => {
-                        let ty = fn_args.type_at(0);
-                        if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
-                            let size = bx.layout_of(ty).size;
-                            let val = args[1].immediate();
-                            let ptr = args[0].immediate();
-                            bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
-                        } else {
-                            invalid_monomorphization(ty);
-                        }
-                        return Ok(());
-                    }
-
-                    "fence" => {
-                        bx.atomic_fence(
-                            parse_ordering(bx, ordering),
-                            SynchronizationScope::CrossThread,
-                        );
-                        return Ok(());
-                    }
-
-                    "singlethreadfence" => {
-                        bx.atomic_fence(
-                            parse_ordering(bx, ordering),
-                            SynchronizationScope::SingleThread,
-                        );
-                        return Ok(());
-                    }
-
-                    // These are all AtomicRMW ops
-                    "max" | "min" => {
-                        let atom_op = if instruction == "max" {
-                            AtomicRmwBinOp::AtomicMax
-                        } else {
-                            AtomicRmwBinOp::AtomicMin
-                        };
-
-                        let ty = fn_args.type_at(0);
-                        if matches!(ty.kind(), ty::Int(_)) {
-                            let ptr = args[0].immediate();
-                            let val = args[1].immediate();
-                            bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
-                        } else {
-                            invalid_monomorphization(ty);
-                            return Ok(());
-                        }
-                    }
-                    "umax" | "umin" => {
-                        let atom_op = if instruction == "umax" {
-                            AtomicRmwBinOp::AtomicUMax
-                        } else {
-                            AtomicRmwBinOp::AtomicUMin
-                        };
-
-                        let ty = fn_args.type_at(0);
-                        if matches!(ty.kind(), ty::Uint(_)) {
-                            let ptr = args[0].immediate();
-                            let val = args[1].immediate();
-                            bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
-                        } else {
-                            invalid_monomorphization(ty);
-                            return Ok(());
-                        }
-                    }
-                    op => {
-                        let atom_op = match op {
-                            "xchg" => AtomicRmwBinOp::AtomicXchg,
-                            "xadd" => AtomicRmwBinOp::AtomicAdd,
-                            "xsub" => AtomicRmwBinOp::AtomicSub,
-                            "and" => AtomicRmwBinOp::AtomicAnd,
-                            "nand" => AtomicRmwBinOp::AtomicNand,
-                            "or" => AtomicRmwBinOp::AtomicOr,
-                            "xor" => AtomicRmwBinOp::AtomicXor,
-                            _ => bx.sess().dcx().emit_fatal(errors::UnknownAtomicOperation),
-                        };
-
-                        let ty = fn_args.type_at(0);
-                        if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
-                            let ptr = args[0].immediate();
-                            let val = args[1].immediate();
-                            bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
-                        } else {
-                            invalid_monomorphization(ty);
-                            return Ok(());
-                        }
-                    }
+                let ty = fn_args.type_at(0);
+                if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
+                    let ordering = fn_args.const_at(1).to_value();
+                    let ptr = args[0].immediate();
+                    let val = args[1].immediate();
+                    bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering))
+                } else {
+                    invalid_monomorphization_int_type(ty);
+                    return Ok(());
                 }
             }
+            sym::atomic_fence => {
+                let ordering = fn_args.const_at(0).to_value();
+                bx.atomic_fence(parse_atomic_ordering(ordering), SynchronizationScope::CrossThread);
+                return Ok(());
+            }
+
+            sym::atomic_singlethreadfence => {
+                let ordering = fn_args.const_at(0).to_value();
+                bx.atomic_fence(
+                    parse_atomic_ordering(ordering),
+                    SynchronizationScope::SingleThread,
+                );
+                return Ok(());
+            }
 
             sym::nontemporal_store => {
                 let dst = args[0].deref(bx.cx());
@@ -556,7 +516,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
         };
 
-        ret_llval(bx, llval)
+        if result.layout.ty.is_bool() {
+            let val = bx.from_immediate(llval);
+            bx.store_to_place(val, result.val);
+        } else if !result.layout.ty.is_unit() {
+            bx.store_to_place(llval, result.val);
+        }
+        Ok(())
     }
 }
 
diff --git a/compiler/rustc_error_codes/src/error_codes/E0092.md b/compiler/rustc_error_codes/src/error_codes/E0092.md
index be459d040c2..9c63798ded7 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0092.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0092.md
@@ -1,8 +1,10 @@
+#### Note: this error code is no longer emitted by the compiler.
+
 An undefined atomic operation function was declared.
 
 Erroneous code example:
 
-```compile_fail,E0092
+```ignore (no longer emitted)
 #![feature(intrinsics)]
 #![allow(internal_features)]
 
@@ -12,13 +14,4 @@ unsafe fn atomic_foo(); // error: unrecognized atomic operation
 ```
 
 Please check you didn't make a mistake in the function's name. All intrinsic
-functions are defined in `compiler/rustc_codegen_llvm/src/intrinsic.rs` and in
-`library/core/src/intrinsics.rs` in the Rust source code. Example:
-
-```
-#![feature(intrinsics)]
-#![allow(internal_features)]
-
-#[rustc_intrinsic]
-unsafe fn atomic_fence_seqcst(); // ok!
-```
+functions are defined in `library/core/src/intrinsics` in the Rust source code.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0093.md b/compiler/rustc_error_codes/src/error_codes/E0093.md
index 9929a069927..3552c2db4cc 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0093.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0093.md
@@ -17,19 +17,4 @@ fn main() {
 ```
 
 Please check you didn't make a mistake in the function's name. All intrinsic
-functions are defined in `compiler/rustc_codegen_llvm/src/intrinsic.rs` and in
-`library/core/src/intrinsics.rs` in the Rust source code. Example:
-
-```
-#![feature(intrinsics)]
-#![allow(internal_features)]
-
-#[rustc_intrinsic]
-unsafe fn atomic_fence_seqcst(); // ok!
-
-fn main() {
-    unsafe {
-        atomic_fence_seqcst();
-    }
-}
-```
+functions are defined in `library/core/src/intrinsics` in the Rust source code.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0622.md b/compiler/rustc_error_codes/src/error_codes/E0622.md
index 9b8131a061e..cc66e067990 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0622.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0622.md
@@ -4,7 +4,7 @@ An intrinsic was declared without being a function.
 
 Erroneous code example:
 
-```no_run
+```ignore (no longer emitted)
 #![feature(intrinsics)]
 #![allow(internal_features)]
 
@@ -21,7 +21,7 @@ An intrinsic is a function available for use in a given programming language
 whose implementation is handled specially by the compiler. In order to fix this
 error, just declare a function. Example:
 
-```no_run
+```ignore (no longer emitted)
 #![feature(intrinsics)]
 #![allow(internal_features)]
 
diff --git a/compiler/rustc_hir_analysis/messages.ftl b/compiler/rustc_hir_analysis/messages.ftl
index a3a0e276f74..4fcd9f8a646 100644
--- a/compiler/rustc_hir_analysis/messages.ftl
+++ b/compiler/rustc_hir_analysis/messages.ftl
@@ -565,10 +565,6 @@ hir_analysis_unconstrained_generic_parameter = the {$param_def_kind} `{$param_na
 hir_analysis_unconstrained_opaque_type = unconstrained opaque type
     .note = `{$name}` must be used in combination with a concrete type within the same {$what}
 
-hir_analysis_unrecognized_atomic_operation =
-    unrecognized atomic operation function: `{$op}`
-    .label = unrecognized atomic operation
-
 hir_analysis_unrecognized_intrinsic_function =
     unrecognized intrinsic function: `{$name}`
     .label = unrecognized intrinsic
diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
index 234520c1583..481cdaa4c6c 100644
--- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs
+++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
@@ -9,10 +9,7 @@ use rustc_span::def_id::LocalDefId;
 use rustc_span::{Span, Symbol, sym};
 
 use crate::check::check_function_signature;
-use crate::errors::{
-    UnrecognizedAtomicOperation, UnrecognizedIntrinsicFunction,
-    WrongNumberOfGenericArgumentsToIntrinsic,
-};
+use crate::errors::{UnrecognizedIntrinsicFunction, WrongNumberOfGenericArgumentsToIntrinsic};
 
 fn equate_intrinsic_type<'tcx>(
     tcx: TyCtxt<'tcx>,
@@ -172,7 +169,6 @@ pub(crate) fn check_intrinsic_type(
             Ty::new_error_with_message(tcx, span, "expected param")
         }
     };
-    let name_str = intrinsic_name.as_str();
 
     let bound_vars = tcx.mk_bound_variable_kinds(&[
         ty::BoundVariableKind::Region(ty::BoundRegionKind::Anon),
@@ -198,508 +194,471 @@ pub(crate) fn check_intrinsic_type(
         (Ty::new_ref(tcx, env_region, va_list_ty, mutbl), va_list_ty)
     };
 
-    let (n_tps, n_lts, n_cts, inputs, output, safety) = if name_str.starts_with("atomic_") {
-        let split: Vec<&str> = name_str.split('_').collect();
-        assert!(split.len() >= 2, "Atomic intrinsic in an incorrect format");
+    let safety = intrinsic_operation_unsafety(tcx, intrinsic_id);
+    let n_lts = 0;
+    let (n_tps, n_cts, inputs, output) = match intrinsic_name {
+        sym::abort => (0, 0, vec![], tcx.types.never),
+        sym::unreachable => (0, 0, vec![], tcx.types.never),
+        sym::breakpoint => (0, 0, vec![], tcx.types.unit),
+        sym::size_of | sym::pref_align_of | sym::min_align_of | sym::variant_count => {
+            (1, 0, vec![], tcx.types.usize)
+        }
+        sym::size_of_val | sym::min_align_of_val => {
+            (1, 0, vec![Ty::new_imm_ptr(tcx, param(0))], tcx.types.usize)
+        }
+        sym::rustc_peek => (1, 0, vec![param(0)], param(0)),
+        sym::caller_location => (0, 0, vec![], tcx.caller_location_ty()),
+        sym::assert_inhabited | sym::assert_zero_valid | sym::assert_mem_uninitialized_valid => {
+            (1, 0, vec![], tcx.types.unit)
+        }
+        sym::forget => (1, 0, vec![param(0)], tcx.types.unit),
+        sym::transmute | sym::transmute_unchecked => (2, 0, vec![param(0)], param(1)),
+        sym::prefetch_read_data
+        | sym::prefetch_write_data
+        | sym::prefetch_read_instruction
+        | sym::prefetch_write_instruction => {
+            (1, 0, vec![Ty::new_imm_ptr(tcx, param(0)), tcx.types.i32], tcx.types.unit)
+        }
+        sym::needs_drop => (1, 0, vec![], tcx.types.bool),
+
+        sym::type_name => (1, 0, vec![], Ty::new_static_str(tcx)),
+        sym::type_id => (1, 0, vec![], tcx.types.u128),
+        sym::offset => (2, 0, vec![param(0), param(1)], param(0)),
+        sym::arith_offset => (
+            1,
+            0,
+            vec![Ty::new_imm_ptr(tcx, param(0)), tcx.types.isize],
+            Ty::new_imm_ptr(tcx, param(0)),
+        ),
+        sym::slice_get_unchecked => (3, 0, vec![param(1), tcx.types.usize], param(0)),
+        sym::ptr_mask => (
+            1,
+            0,
+            vec![Ty::new_imm_ptr(tcx, param(0)), tcx.types.usize],
+            Ty::new_imm_ptr(tcx, param(0)),
+        ),
+
+        sym::copy | sym::copy_nonoverlapping => (
+            1,
+            0,
+            vec![Ty::new_imm_ptr(tcx, param(0)), Ty::new_mut_ptr(tcx, param(0)), tcx.types.usize],
+            tcx.types.unit,
+        ),
+        sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => (
+            1,
+            0,
+            vec![Ty::new_mut_ptr(tcx, param(0)), Ty::new_imm_ptr(tcx, param(0)), tcx.types.usize],
+            tcx.types.unit,
+        ),
+        sym::compare_bytes => {
+            let byte_ptr = Ty::new_imm_ptr(tcx, tcx.types.u8);
+            (0, 0, vec![byte_ptr, byte_ptr, tcx.types.usize], tcx.types.i32)
+        }
+        sym::write_bytes | sym::volatile_set_memory => (
+            1,
+            0,
+            vec![Ty::new_mut_ptr(tcx, param(0)), tcx.types.u8, tcx.types.usize],
+            tcx.types.unit,
+        ),
+
+        sym::sqrtf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
+        sym::sqrtf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
+        sym::sqrtf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
+        sym::sqrtf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
+
+        sym::powif16 => (0, 0, vec![tcx.types.f16, tcx.types.i32], tcx.types.f16),
+        sym::powif32 => (0, 0, vec![tcx.types.f32, tcx.types.i32], tcx.types.f32),
+        sym::powif64 => (0, 0, vec![tcx.types.f64, tcx.types.i32], tcx.types.f64),
+        sym::powif128 => (0, 0, vec![tcx.types.f128, tcx.types.i32], tcx.types.f128),
+
+        sym::sinf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
+        sym::sinf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
+        sym::sinf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
+        sym::sinf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
+
+        sym::cosf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
+        sym::cosf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
+        sym::cosf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
+        sym::cosf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
+
+        sym::powf16 => (0, 0, vec![tcx.types.f16, tcx.types.f16], tcx.types.f16),
+        sym::powf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
+        sym::powf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
+        sym::powf128 => (0, 0, vec![tcx.types.f128, tcx.types.f128], tcx.types.f128),
+
+        sym::expf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
+        sym::expf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
+        sym::expf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
+        sym::expf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
+
+        sym::exp2f16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
+        sym::exp2f32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
+        sym::exp2f64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
+        sym::exp2f128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
+
+        sym::logf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
+        sym::logf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
+        sym::logf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
+        sym::logf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
+
+        sym::log10f16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
+        sym::log10f32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
+        sym::log10f64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
+        sym::log10f128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
+
+        sym::log2f16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
+        sym::log2f32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
+        sym::log2f64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
+        sym::log2f128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
+
+        sym::fmaf16 => (0, 0, vec![tcx.types.f16, tcx.types.f16, tcx.types.f16], tcx.types.f16),
+        sym::fmaf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32, tcx.types.f32], tcx.types.f32),
+        sym::fmaf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64, tcx.types.f64], tcx.types.f64),
+        sym::fmaf128 => {
+            (0, 0, vec![tcx.types.f128, tcx.types.f128, tcx.types.f128], tcx.types.f128)
+        }
 
-        // Each atomic op has variants with different suffixes (`_seq_cst`, `_acquire`, etc.). Use
-        // string ops to strip the suffixes, because the variants all get the same treatment here.
-        let (n_tps, n_cts, inputs, output) = match split[1] {
-            "cxchg" | "cxchgweak" => (
-                1,
-                0,
-                vec![Ty::new_mut_ptr(tcx, param(0)), param(0), param(0)],
-                Ty::new_tup(tcx, &[param(0), tcx.types.bool]),
-            ),
-            "load" => (1, 1, vec![Ty::new_imm_ptr(tcx, param(0))], param(0)),
-            "store" => (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit),
-
-            "xchg" | "xadd" | "xsub" | "and" | "nand" | "or" | "xor" | "max" | "min" | "umax"
-            | "umin" => (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], param(0)),
-            "fence" | "singlethreadfence" => (0, 0, Vec::new(), tcx.types.unit),
-            op => {
-                tcx.dcx().emit_err(UnrecognizedAtomicOperation { span, op });
-                return;
-            }
-        };
-        (n_tps, 0, n_cts, inputs, output, hir::Safety::Unsafe)
-    } else if intrinsic_name == sym::contract_check_ensures {
-        // contract_check_ensures::<Ret, C>(Ret, C) -> Ret
-        // where C: for<'a> Fn(&'a Ret) -> bool,
-        //
-        // so: two type params, 0 lifetime param, 0 const params, two inputs, no return
-        (2, 0, 0, vec![param(0), param(1)], param(1), hir::Safety::Safe)
-    } else {
-        let safety = intrinsic_operation_unsafety(tcx, intrinsic_id);
-        let (n_tps, n_cts, inputs, output) = match intrinsic_name {
-            sym::abort => (0, 0, vec![], tcx.types.never),
-            sym::unreachable => (0, 0, vec![], tcx.types.never),
-            sym::breakpoint => (0, 0, vec![], tcx.types.unit),
-            sym::size_of | sym::pref_align_of | sym::min_align_of | sym::variant_count => {
-                (1, 0, vec![], tcx.types.usize)
-            }
-            sym::size_of_val | sym::min_align_of_val => {
-                (1, 0, vec![Ty::new_imm_ptr(tcx, param(0))], tcx.types.usize)
-            }
-            sym::rustc_peek => (1, 0, vec![param(0)], param(0)),
-            sym::caller_location => (0, 0, vec![], tcx.caller_location_ty()),
-            sym::assert_inhabited
-            | sym::assert_zero_valid
-            | sym::assert_mem_uninitialized_valid => (1, 0, vec![], tcx.types.unit),
-            sym::forget => (1, 0, vec![param(0)], tcx.types.unit),
-            sym::transmute | sym::transmute_unchecked => (2, 0, vec![param(0)], param(1)),
-            sym::prefetch_read_data
-            | sym::prefetch_write_data
-            | sym::prefetch_read_instruction
-            | sym::prefetch_write_instruction => {
-                (1, 0, vec![Ty::new_imm_ptr(tcx, param(0)), tcx.types.i32], tcx.types.unit)
-            }
-            sym::needs_drop => (1, 0, vec![], tcx.types.bool),
-
-            sym::type_name => (1, 0, vec![], Ty::new_static_str(tcx)),
-            sym::type_id => (1, 0, vec![], tcx.types.u128),
-            sym::offset => (2, 0, vec![param(0), param(1)], param(0)),
-            sym::arith_offset => (
-                1,
-                0,
-                vec![Ty::new_imm_ptr(tcx, param(0)), tcx.types.isize],
-                Ty::new_imm_ptr(tcx, param(0)),
-            ),
-            sym::slice_get_unchecked => (3, 0, vec![param(1), tcx.types.usize], param(0)),
-            sym::ptr_mask => (
-                1,
-                0,
-                vec![Ty::new_imm_ptr(tcx, param(0)), tcx.types.usize],
-                Ty::new_imm_ptr(tcx, param(0)),
-            ),
+        sym::fmuladdf16 => (0, 0, vec![tcx.types.f16, tcx.types.f16, tcx.types.f16], tcx.types.f16),
+        sym::fmuladdf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32, tcx.types.f32], tcx.types.f32),
+        sym::fmuladdf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64, tcx.types.f64], tcx.types.f64),
+        sym::fmuladdf128 => {
+            (0, 0, vec![tcx.types.f128, tcx.types.f128, tcx.types.f128], tcx.types.f128)
+        }
 
-            sym::copy | sym::copy_nonoverlapping => (
-                1,
-                0,
-                vec![
-                    Ty::new_imm_ptr(tcx, param(0)),
-                    Ty::new_mut_ptr(tcx, param(0)),
-                    tcx.types.usize,
-                ],
-                tcx.types.unit,
-            ),
-            sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => (
+        sym::fabsf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
+        sym::fabsf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
+        sym::fabsf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
+        sym::fabsf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
+
+        sym::minnumf16 => (0, 0, vec![tcx.types.f16, tcx.types.f16], tcx.types.f16),
+        sym::minnumf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
+        sym::minnumf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
+        sym::minnumf128 => (0, 0, vec![tcx.types.f128, tcx.types.f128], tcx.types.f128),
+
+        sym::minimumf16 => (0, 0, vec![tcx.types.f16, tcx.types.f16], tcx.types.f16),
+        sym::minimumf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
+        sym::minimumf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
+        sym::minimumf128 => (0, 0, vec![tcx.types.f128, tcx.types.f128], tcx.types.f128),
+
+        sym::maxnumf16 => (0, 0, vec![tcx.types.f16, tcx.types.f16], tcx.types.f16),
+        sym::maxnumf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
+        sym::maxnumf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
+        sym::maxnumf128 => (0, 0, vec![tcx.types.f128, tcx.types.f128], tcx.types.f128),
+
+        sym::maximumf16 => (0, 0, vec![tcx.types.f16, tcx.types.f16], tcx.types.f16),
+        sym::maximumf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
+        sym::maximumf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
+        sym::maximumf128 => (0, 0, vec![tcx.types.f128, tcx.types.f128], tcx.types.f128),
+
+        sym::copysignf16 => (0, 0, vec![tcx.types.f16, tcx.types.f16], tcx.types.f16),
+        sym::copysignf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
+        sym::copysignf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
+        sym::copysignf128 => (0, 0, vec![tcx.types.f128, tcx.types.f128], tcx.types.f128),
+
+        sym::floorf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
+        sym::floorf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
+        sym::floorf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
+        sym::floorf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
+
+        sym::ceilf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
+        sym::ceilf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
+        sym::ceilf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
+        sym::ceilf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
+
+        sym::truncf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
+        sym::truncf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
+        sym::truncf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
+        sym::truncf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
+
+        sym::round_ties_even_f16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
+        sym::round_ties_even_f32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
+        sym::round_ties_even_f64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
+        sym::round_ties_even_f128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
+
+        sym::roundf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
+        sym::roundf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
+        sym::roundf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
+        sym::roundf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
+
+        sym::volatile_load | sym::unaligned_volatile_load => {
+            (1, 0, vec![Ty::new_imm_ptr(tcx, param(0))], param(0))
+        }
+        sym::volatile_store | sym::unaligned_volatile_store => {
+            (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit)
+        }
+
+        sym::ctpop | sym::ctlz | sym::ctlz_nonzero | sym::cttz | sym::cttz_nonzero => {
+            (1, 0, vec![param(0)], tcx.types.u32)
+        }
+
+        sym::bswap | sym::bitreverse => (1, 0, vec![param(0)], param(0)),
+
+        sym::three_way_compare => (1, 0, vec![param(0), param(0)], tcx.ty_ordering_enum(span)),
+
+        sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
+            (1, 0, vec![param(0), param(0)], Ty::new_tup(tcx, &[param(0), tcx.types.bool]))
+        }
+
+        sym::carrying_mul_add => (2, 0, vec![param(0); 4], Ty::new_tup(tcx, &[param(1), param(0)])),
+
+        sym::ptr_guaranteed_cmp => (
+            1,
+            0,
+            vec![Ty::new_imm_ptr(tcx, param(0)), Ty::new_imm_ptr(tcx, param(0))],
+            tcx.types.u8,
+        ),
+
+        sym::const_allocate => {
+            (0, 0, vec![tcx.types.usize, tcx.types.usize], Ty::new_mut_ptr(tcx, tcx.types.u8))
+        }
+        sym::const_deallocate => (
+            0,
+            0,
+            vec![Ty::new_mut_ptr(tcx, tcx.types.u8), tcx.types.usize, tcx.types.usize],
+            tcx.types.unit,
+        ),
+
+        sym::ptr_offset_from => (
+            1,
+            0,
+            vec![Ty::new_imm_ptr(tcx, param(0)), Ty::new_imm_ptr(tcx, param(0))],
+            tcx.types.isize,
+        ),
+        sym::ptr_offset_from_unsigned => (
+            1,
+            0,
+            vec![Ty::new_imm_ptr(tcx, param(0)), Ty::new_imm_ptr(tcx, param(0))],
+            tcx.types.usize,
+        ),
+        sym::unchecked_div | sym::unchecked_rem | sym::exact_div | sym::disjoint_bitor => {
+            (1, 0, vec![param(0), param(0)], param(0))
+        }
+        sym::unchecked_shl | sym::unchecked_shr => (2, 0, vec![param(0), param(1)], param(0)),
+        sym::rotate_left | sym::rotate_right => (1, 0, vec![param(0), tcx.types.u32], param(0)),
+        sym::unchecked_add | sym::unchecked_sub | sym::unchecked_mul => {
+            (1, 0, vec![param(0), param(0)], param(0))
+        }
+        sym::wrapping_add | sym::wrapping_sub | sym::wrapping_mul => {
+            (1, 0, vec![param(0), param(0)], param(0))
+        }
+        sym::saturating_add | sym::saturating_sub => (1, 0, vec![param(0), param(0)], param(0)),
+        sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
+            (1, 0, vec![param(0), param(0)], param(0))
+        }
+        sym::fadd_algebraic
+        | sym::fsub_algebraic
+        | sym::fmul_algebraic
+        | sym::fdiv_algebraic
+        | sym::frem_algebraic => (1, 0, vec![param(0), param(0)], param(0)),
+        sym::float_to_int_unchecked => (2, 0, vec![param(0)], param(1)),
+
+        sym::assume => (0, 0, vec![tcx.types.bool], tcx.types.unit),
+        sym::select_unpredictable => (1, 0, vec![tcx.types.bool, param(0), param(0)], param(0)),
+        sym::cold_path => (0, 0, vec![], tcx.types.unit),
+
+        sym::read_via_copy => (1, 0, vec![Ty::new_imm_ptr(tcx, param(0))], param(0)),
+        sym::write_via_move => {
+            (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit)
+        }
+
+        sym::typed_swap_nonoverlapping => {
+            (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)); 2], tcx.types.unit)
+        }
+
+        sym::discriminant_value => {
+            let assoc_items = tcx.associated_item_def_ids(
+                tcx.require_lang_item(hir::LangItem::DiscriminantKind, span),
+            );
+            let discriminant_def_id = assoc_items[0];
+
+            let br = ty::BoundRegion { var: ty::BoundVar::ZERO, kind: ty::BoundRegionKind::Anon };
+            (
                 1,
                 0,
-                vec![
-                    Ty::new_mut_ptr(tcx, param(0)),
-                    Ty::new_imm_ptr(tcx, param(0)),
-                    tcx.types.usize,
-                ],
+                vec![Ty::new_imm_ref(tcx, ty::Region::new_bound(tcx, ty::INNERMOST, br), param(0))],
+                Ty::new_projection_from_args(
+                    tcx,
+                    discriminant_def_id,
+                    tcx.mk_args(&[param(0).into()]),
+                ),
+            )
+        }
+
+        sym::catch_unwind => {
+            let mut_u8 = Ty::new_mut_ptr(tcx, tcx.types.u8);
+            let try_fn_ty = ty::Binder::dummy(tcx.mk_fn_sig(
+                [mut_u8],
                 tcx.types.unit,
-            ),
-            sym::compare_bytes => {
-                let byte_ptr = Ty::new_imm_ptr(tcx, tcx.types.u8);
-                (0, 0, vec![byte_ptr, byte_ptr, tcx.types.usize], tcx.types.i32)
-            }
-            sym::write_bytes | sym::volatile_set_memory => (
-                1,
-                0,
-                vec![Ty::new_mut_ptr(tcx, param(0)), tcx.types.u8, tcx.types.usize],
+                false,
+                hir::Safety::Safe,
+                ExternAbi::Rust,
+            ));
+            let catch_fn_ty = ty::Binder::dummy(tcx.mk_fn_sig(
+                [mut_u8, mut_u8],
                 tcx.types.unit,
-            ),
-
-            sym::sqrtf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
-            sym::sqrtf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
-            sym::sqrtf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
-            sym::sqrtf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
-
-            sym::powif16 => (0, 0, vec![tcx.types.f16, tcx.types.i32], tcx.types.f16),
-            sym::powif32 => (0, 0, vec![tcx.types.f32, tcx.types.i32], tcx.types.f32),
-            sym::powif64 => (0, 0, vec![tcx.types.f64, tcx.types.i32], tcx.types.f64),
-            sym::powif128 => (0, 0, vec![tcx.types.f128, tcx.types.i32], tcx.types.f128),
-
-            sym::sinf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
-            sym::sinf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
-            sym::sinf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
-            sym::sinf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
-
-            sym::cosf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
-            sym::cosf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
-            sym::cosf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
-            sym::cosf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
-
-            sym::powf16 => (0, 0, vec![tcx.types.f16, tcx.types.f16], tcx.types.f16),
-            sym::powf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
-            sym::powf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
-            sym::powf128 => (0, 0, vec![tcx.types.f128, tcx.types.f128], tcx.types.f128),
-
-            sym::expf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
-            sym::expf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
-            sym::expf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
-            sym::expf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
-
-            sym::exp2f16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
-            sym::exp2f32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
-            sym::exp2f64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
-            sym::exp2f128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
-
-            sym::logf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
-            sym::logf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
-            sym::logf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
-            sym::logf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
-
-            sym::log10f16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
-            sym::log10f32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
-            sym::log10f64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
-            sym::log10f128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
-
-            sym::log2f16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
-            sym::log2f32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
-            sym::log2f64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
-            sym::log2f128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
-
-            sym::fmaf16 => (0, 0, vec![tcx.types.f16, tcx.types.f16, tcx.types.f16], tcx.types.f16),
-            sym::fmaf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32, tcx.types.f32], tcx.types.f32),
-            sym::fmaf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64, tcx.types.f64], tcx.types.f64),
-            sym::fmaf128 => {
-                (0, 0, vec![tcx.types.f128, tcx.types.f128, tcx.types.f128], tcx.types.f128)
-            }
-
-            sym::fmuladdf16 => {
-                (0, 0, vec![tcx.types.f16, tcx.types.f16, tcx.types.f16], tcx.types.f16)
-            }
-            sym::fmuladdf32 => {
-                (0, 0, vec![tcx.types.f32, tcx.types.f32, tcx.types.f32], tcx.types.f32)
-            }
-            sym::fmuladdf64 => {
-                (0, 0, vec![tcx.types.f64, tcx.types.f64, tcx.types.f64], tcx.types.f64)
-            }
-            sym::fmuladdf128 => {
-                (0, 0, vec![tcx.types.f128, tcx.types.f128, tcx.types.f128], tcx.types.f128)
-            }
-
-            sym::fabsf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
-            sym::fabsf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
-            sym::fabsf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
-            sym::fabsf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
-
-            sym::minnumf16 => (0, 0, vec![tcx.types.f16, tcx.types.f16], tcx.types.f16),
-            sym::minnumf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
-            sym::minnumf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
-            sym::minnumf128 => (0, 0, vec![tcx.types.f128, tcx.types.f128], tcx.types.f128),
-
-            sym::minimumf16 => (0, 0, vec![tcx.types.f16, tcx.types.f16], tcx.types.f16),
-            sym::minimumf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
-            sym::minimumf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
-            sym::minimumf128 => (0, 0, vec![tcx.types.f128, tcx.types.f128], tcx.types.f128),
-
-            sym::maxnumf16 => (0, 0, vec![tcx.types.f16, tcx.types.f16], tcx.types.f16),
-            sym::maxnumf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
-            sym::maxnumf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
-            sym::maxnumf128 => (0, 0, vec![tcx.types.f128, tcx.types.f128], tcx.types.f128),
-
-            sym::maximumf16 => (0, 0, vec![tcx.types.f16, tcx.types.f16], tcx.types.f16),
-            sym::maximumf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
-            sym::maximumf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
-            sym::maximumf128 => (0, 0, vec![tcx.types.f128, tcx.types.f128], tcx.types.f128),
-
-            sym::copysignf16 => (0, 0, vec![tcx.types.f16, tcx.types.f16], tcx.types.f16),
-            sym::copysignf32 => (0, 0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
-            sym::copysignf64 => (0, 0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
-            sym::copysignf128 => (0, 0, vec![tcx.types.f128, tcx.types.f128], tcx.types.f128),
-
-            sym::floorf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
-            sym::floorf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
-            sym::floorf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
-            sym::floorf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
-
-            sym::ceilf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
-            sym::ceilf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
-            sym::ceilf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
-            sym::ceilf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
-
-            sym::truncf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
-            sym::truncf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
-            sym::truncf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
-            sym::truncf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
-
-            sym::round_ties_even_f16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
-            sym::round_ties_even_f32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
-            sym::round_ties_even_f64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
-            sym::round_ties_even_f128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
-
-            sym::roundf16 => (0, 0, vec![tcx.types.f16], tcx.types.f16),
-            sym::roundf32 => (0, 0, vec![tcx.types.f32], tcx.types.f32),
-            sym::roundf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
-            sym::roundf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
-
-            sym::volatile_load | sym::unaligned_volatile_load => {
-                (1, 0, vec![Ty::new_imm_ptr(tcx, param(0))], param(0))
-            }
-            sym::volatile_store | sym::unaligned_volatile_store => {
-                (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit)
-            }
-
-            sym::ctpop | sym::ctlz | sym::ctlz_nonzero | sym::cttz | sym::cttz_nonzero => {
-                (1, 0, vec![param(0)], tcx.types.u32)
-            }
-
-            sym::bswap | sym::bitreverse => (1, 0, vec![param(0)], param(0)),
-
-            sym::three_way_compare => (1, 0, vec![param(0), param(0)], tcx.ty_ordering_enum(span)),
-
-            sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
-                (1, 0, vec![param(0), param(0)], Ty::new_tup(tcx, &[param(0), tcx.types.bool]))
-            }
-
-            sym::carrying_mul_add => {
-                (2, 0, vec![param(0); 4], Ty::new_tup(tcx, &[param(1), param(0)]))
-            }
-
-            sym::ptr_guaranteed_cmp => (
-                1,
-                0,
-                vec![Ty::new_imm_ptr(tcx, param(0)), Ty::new_imm_ptr(tcx, param(0))],
-                tcx.types.u8,
-            ),
-
-            sym::const_allocate => {
-                (0, 0, vec![tcx.types.usize, tcx.types.usize], Ty::new_mut_ptr(tcx, tcx.types.u8))
-            }
-            sym::const_deallocate => (
+                false,
+                hir::Safety::Safe,
+                ExternAbi::Rust,
+            ));
+            (
                 0,
                 0,
-                vec![Ty::new_mut_ptr(tcx, tcx.types.u8), tcx.types.usize, tcx.types.usize],
-                tcx.types.unit,
-            ),
+                vec![Ty::new_fn_ptr(tcx, try_fn_ty), mut_u8, Ty::new_fn_ptr(tcx, catch_fn_ty)],
+                tcx.types.i32,
+            )
+        }
 
-            sym::ptr_offset_from => (
-                1,
-                0,
-                vec![Ty::new_imm_ptr(tcx, param(0)), Ty::new_imm_ptr(tcx, param(0))],
-                tcx.types.isize,
-            ),
-            sym::ptr_offset_from_unsigned => (
-                1,
-                0,
-                vec![Ty::new_imm_ptr(tcx, param(0)), Ty::new_imm_ptr(tcx, param(0))],
-                tcx.types.usize,
-            ),
-            sym::unchecked_div | sym::unchecked_rem | sym::exact_div | sym::disjoint_bitor => {
-                (1, 0, vec![param(0), param(0)], param(0))
-            }
-            sym::unchecked_shl | sym::unchecked_shr => (2, 0, vec![param(0), param(1)], param(0)),
-            sym::rotate_left | sym::rotate_right => (1, 0, vec![param(0), tcx.types.u32], param(0)),
-            sym::unchecked_add | sym::unchecked_sub | sym::unchecked_mul => {
-                (1, 0, vec![param(0), param(0)], param(0))
-            }
-            sym::wrapping_add | sym::wrapping_sub | sym::wrapping_mul => {
-                (1, 0, vec![param(0), param(0)], param(0))
-            }
-            sym::saturating_add | sym::saturating_sub => (1, 0, vec![param(0), param(0)], param(0)),
-            sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
-                (1, 0, vec![param(0), param(0)], param(0))
-            }
-            sym::fadd_algebraic
-            | sym::fsub_algebraic
-            | sym::fmul_algebraic
-            | sym::fdiv_algebraic
-            | sym::frem_algebraic => (1, 0, vec![param(0), param(0)], param(0)),
-            sym::float_to_int_unchecked => (2, 0, vec![param(0)], param(1)),
-
-            sym::assume => (0, 0, vec![tcx.types.bool], tcx.types.unit),
-            sym::select_unpredictable => (1, 0, vec![tcx.types.bool, param(0), param(0)], param(0)),
-            sym::cold_path => (0, 0, vec![], tcx.types.unit),
-
-            sym::read_via_copy => (1, 0, vec![Ty::new_imm_ptr(tcx, param(0))], param(0)),
-            sym::write_via_move => {
-                (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit)
-            }
-
-            sym::typed_swap_nonoverlapping => {
-                (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)); 2], tcx.types.unit)
-            }
-
-            sym::discriminant_value => {
-                let assoc_items = tcx.associated_item_def_ids(
-                    tcx.require_lang_item(hir::LangItem::DiscriminantKind, span),
-                );
-                let discriminant_def_id = assoc_items[0];
-
-                let br =
-                    ty::BoundRegion { var: ty::BoundVar::ZERO, kind: ty::BoundRegionKind::Anon };
-                (
-                    1,
-                    0,
-                    vec![Ty::new_imm_ref(
-                        tcx,
-                        ty::Region::new_bound(tcx, ty::INNERMOST, br),
-                        param(0),
-                    )],
-                    Ty::new_projection_from_args(
-                        tcx,
-                        discriminant_def_id,
-                        tcx.mk_args(&[param(0).into()]),
-                    ),
-                )
-            }
-
-            sym::catch_unwind => {
-                let mut_u8 = Ty::new_mut_ptr(tcx, tcx.types.u8);
-                let try_fn_ty = ty::Binder::dummy(tcx.mk_fn_sig(
-                    [mut_u8],
-                    tcx.types.unit,
-                    false,
-                    hir::Safety::Safe,
-                    ExternAbi::Rust,
-                ));
-                let catch_fn_ty = ty::Binder::dummy(tcx.mk_fn_sig(
-                    [mut_u8, mut_u8],
-                    tcx.types.unit,
-                    false,
-                    hir::Safety::Safe,
-                    ExternAbi::Rust,
-                ));
-                (
-                    0,
-                    0,
-                    vec![Ty::new_fn_ptr(tcx, try_fn_ty), mut_u8, Ty::new_fn_ptr(tcx, catch_fn_ty)],
-                    tcx.types.i32,
-                )
-            }
-
-            sym::va_start | sym::va_end => {
-                (0, 0, vec![mk_va_list_ty(hir::Mutability::Mut).0], tcx.types.unit)
-            }
-
-            sym::va_copy => {
-                let (va_list_ref_ty, va_list_ty) = mk_va_list_ty(hir::Mutability::Not);
-                let va_list_ptr_ty = Ty::new_mut_ptr(tcx, va_list_ty);
-                (0, 0, vec![va_list_ptr_ty, va_list_ref_ty], tcx.types.unit)
-            }
-
-            sym::va_arg => (1, 0, vec![mk_va_list_ty(hir::Mutability::Mut).0], param(0)),
-
-            sym::nontemporal_store => {
-                (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit)
-            }
-
-            sym::raw_eq => {
-                let br =
-                    ty::BoundRegion { var: ty::BoundVar::ZERO, kind: ty::BoundRegionKind::Anon };
-                let param_ty_lhs =
-                    Ty::new_imm_ref(tcx, ty::Region::new_bound(tcx, ty::INNERMOST, br), param(0));
-                let br = ty::BoundRegion {
-                    var: ty::BoundVar::from_u32(1),
-                    kind: ty::BoundRegionKind::Anon,
-                };
-                let param_ty_rhs =
-                    Ty::new_imm_ref(tcx, ty::Region::new_bound(tcx, ty::INNERMOST, br), param(0));
-                (1, 0, vec![param_ty_lhs, param_ty_rhs], tcx.types.bool)
-            }
-
-            sym::black_box => (1, 0, vec![param(0)], param(0)),
-
-            sym::is_val_statically_known => (1, 0, vec![param(0)], tcx.types.bool),
-
-            sym::const_eval_select => (4, 0, vec![param(0), param(1), param(2)], param(3)),
-
-            sym::vtable_size | sym::vtable_align => {
-                (0, 0, vec![Ty::new_imm_ptr(tcx, tcx.types.unit)], tcx.types.usize)
-            }
-
-            // This type check is not particularly useful, but the `where` bounds
-            // on the definition in `core` do the heavy lifting for checking it.
-            sym::aggregate_raw_ptr => (3, 0, vec![param(1), param(2)], param(0)),
-            sym::ptr_metadata => (2, 0, vec![Ty::new_imm_ptr(tcx, param(0))], param(1)),
-
-            sym::ub_checks => (0, 0, Vec::new(), tcx.types.bool),
-
-            sym::box_new => (1, 0, vec![param(0)], Ty::new_box(tcx, param(0))),
-
-            // contract_checks() -> bool
-            sym::contract_checks => (0, 0, Vec::new(), tcx.types.bool),
-            // contract_check_requires::<C>(C) -> bool, where C: impl Fn() -> bool
-            sym::contract_check_requires => (1, 0, vec![param(0)], tcx.types.unit),
-
-            sym::simd_eq
-            | sym::simd_ne
-            | sym::simd_lt
-            | sym::simd_le
-            | sym::simd_gt
-            | sym::simd_ge => (2, 0, vec![param(0), param(0)], param(1)),
-            sym::simd_add
-            | sym::simd_sub
-            | sym::simd_mul
-            | sym::simd_rem
-            | sym::simd_div
-            | sym::simd_shl
-            | sym::simd_shr
-            | sym::simd_and
-            | sym::simd_or
-            | sym::simd_xor
-            | sym::simd_fmin
-            | sym::simd_fmax
-            | sym::simd_saturating_add
-            | sym::simd_saturating_sub => (1, 0, vec![param(0), param(0)], param(0)),
-            sym::simd_arith_offset => (2, 0, vec![param(0), param(1)], param(0)),
-            sym::simd_neg
-            | sym::simd_bswap
-            | sym::simd_bitreverse
-            | sym::simd_ctlz
-            | sym::simd_cttz
-            | sym::simd_ctpop
-            | sym::simd_fsqrt
-            | sym::simd_fsin
-            | sym::simd_fcos
-            | sym::simd_fexp
-            | sym::simd_fexp2
-            | sym::simd_flog2
-            | sym::simd_flog10
-            | sym::simd_flog
-            | sym::simd_fabs
-            | sym::simd_ceil
-            | sym::simd_floor
-            | sym::simd_round
-            | sym::simd_trunc => (1, 0, vec![param(0)], param(0)),
-            sym::simd_fma | sym::simd_relaxed_fma => {
-                (1, 0, vec![param(0), param(0), param(0)], param(0))
-            }
-            sym::simd_gather => (3, 0, vec![param(0), param(1), param(2)], param(0)),
-            sym::simd_masked_load => (3, 0, vec![param(0), param(1), param(2)], param(2)),
-            sym::simd_masked_store => (3, 0, vec![param(0), param(1), param(2)], tcx.types.unit),
-            sym::simd_scatter => (3, 0, vec![param(0), param(1), param(2)], tcx.types.unit),
-            sym::simd_insert | sym::simd_insert_dyn => {
-                (2, 0, vec![param(0), tcx.types.u32, param(1)], param(0))
-            }
-            sym::simd_extract | sym::simd_extract_dyn => {
-                (2, 0, vec![param(0), tcx.types.u32], param(1))
-            }
-            sym::simd_cast
-            | sym::simd_as
-            | sym::simd_cast_ptr
-            | sym::simd_expose_provenance
-            | sym::simd_with_exposed_provenance => (2, 0, vec![param(0)], param(1)),
-            sym::simd_bitmask => (2, 0, vec![param(0)], param(1)),
-            sym::simd_select | sym::simd_select_bitmask => {
-                (2, 0, vec![param(0), param(1), param(1)], param(1))
-            }
-            sym::simd_reduce_all | sym::simd_reduce_any => (1, 0, vec![param(0)], tcx.types.bool),
-            sym::simd_reduce_add_ordered | sym::simd_reduce_mul_ordered => {
-                (2, 0, vec![param(0), param(1)], param(1))
-            }
-            sym::simd_reduce_add_unordered
-            | sym::simd_reduce_mul_unordered
-            | sym::simd_reduce_and
-            | sym::simd_reduce_or
-            | sym::simd_reduce_xor
-            | sym::simd_reduce_min
-            | sym::simd_reduce_max => (2, 0, vec![param(0)], param(1)),
-            sym::simd_shuffle => (3, 0, vec![param(0), param(0), param(1)], param(2)),
-            sym::simd_shuffle_const_generic => (2, 1, vec![param(0), param(0)], param(1)),
-
-            other => {
-                tcx.dcx().emit_err(UnrecognizedIntrinsicFunction { span, name: other });
-                return;
-            }
-        };
-        (n_tps, 0, n_cts, inputs, output, safety)
+        sym::va_start | sym::va_end => {
+            (0, 0, vec![mk_va_list_ty(hir::Mutability::Mut).0], tcx.types.unit)
+        }
+
+        sym::va_copy => {
+            let (va_list_ref_ty, va_list_ty) = mk_va_list_ty(hir::Mutability::Not);
+            let va_list_ptr_ty = Ty::new_mut_ptr(tcx, va_list_ty);
+            (0, 0, vec![va_list_ptr_ty, va_list_ref_ty], tcx.types.unit)
+        }
+
+        sym::va_arg => (1, 0, vec![mk_va_list_ty(hir::Mutability::Mut).0], param(0)),
+
+        sym::nontemporal_store => {
+            (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit)
+        }
+
+        sym::raw_eq => {
+            let br = ty::BoundRegion { var: ty::BoundVar::ZERO, kind: ty::BoundRegionKind::Anon };
+            let param_ty_lhs =
+                Ty::new_imm_ref(tcx, ty::Region::new_bound(tcx, ty::INNERMOST, br), param(0));
+            let br =
+                ty::BoundRegion { var: ty::BoundVar::from_u32(1), kind: ty::BoundRegionKind::Anon };
+            let param_ty_rhs =
+                Ty::new_imm_ref(tcx, ty::Region::new_bound(tcx, ty::INNERMOST, br), param(0));
+            (1, 0, vec![param_ty_lhs, param_ty_rhs], tcx.types.bool)
+        }
+
+        sym::black_box => (1, 0, vec![param(0)], param(0)),
+
+        sym::is_val_statically_known => (1, 0, vec![param(0)], tcx.types.bool),
+
+        sym::const_eval_select => (4, 0, vec![param(0), param(1), param(2)], param(3)),
+
+        sym::vtable_size | sym::vtable_align => {
+            (0, 0, vec![Ty::new_imm_ptr(tcx, tcx.types.unit)], tcx.types.usize)
+        }
+
+        // This type check is not particularly useful, but the `where` bounds
+        // on the definition in `core` do the heavy lifting for checking it.
+        sym::aggregate_raw_ptr => (3, 0, vec![param(1), param(2)], param(0)),
+        sym::ptr_metadata => (2, 0, vec![Ty::new_imm_ptr(tcx, param(0))], param(1)),
+
+        sym::ub_checks => (0, 0, Vec::new(), tcx.types.bool),
+
+        sym::box_new => (1, 0, vec![param(0)], Ty::new_box(tcx, param(0))),
+
+        // contract_checks() -> bool
+        sym::contract_checks => (0, 0, Vec::new(), tcx.types.bool),
+        // contract_check_requires::<C>(C) -> bool, where C: impl Fn() -> bool
+        sym::contract_check_requires => (1, 0, vec![param(0)], tcx.types.unit),
+        sym::contract_check_ensures => (2, 0, vec![param(0), param(1)], param(1)),
+
+        sym::simd_eq | sym::simd_ne | sym::simd_lt | sym::simd_le | sym::simd_gt | sym::simd_ge => {
+            (2, 0, vec![param(0), param(0)], param(1))
+        }
+        sym::simd_add
+        | sym::simd_sub
+        | sym::simd_mul
+        | sym::simd_rem
+        | sym::simd_div
+        | sym::simd_shl
+        | sym::simd_shr
+        | sym::simd_and
+        | sym::simd_or
+        | sym::simd_xor
+        | sym::simd_fmin
+        | sym::simd_fmax
+        | sym::simd_saturating_add
+        | sym::simd_saturating_sub => (1, 0, vec![param(0), param(0)], param(0)),
+        sym::simd_arith_offset => (2, 0, vec![param(0), param(1)], param(0)),
+        sym::simd_neg
+        | sym::simd_bswap
+        | sym::simd_bitreverse
+        | sym::simd_ctlz
+        | sym::simd_cttz
+        | sym::simd_ctpop
+        | sym::simd_fsqrt
+        | sym::simd_fsin
+        | sym::simd_fcos
+        | sym::simd_fexp
+        | sym::simd_fexp2
+        | sym::simd_flog2
+        | sym::simd_flog10
+        | sym::simd_flog
+        | sym::simd_fabs
+        | sym::simd_ceil
+        | sym::simd_floor
+        | sym::simd_round
+        | sym::simd_trunc => (1, 0, vec![param(0)], param(0)),
+        sym::simd_fma | sym::simd_relaxed_fma => {
+            (1, 0, vec![param(0), param(0), param(0)], param(0))
+        }
+        sym::simd_gather => (3, 0, vec![param(0), param(1), param(2)], param(0)),
+        sym::simd_masked_load => (3, 0, vec![param(0), param(1), param(2)], param(2)),
+        sym::simd_masked_store => (3, 0, vec![param(0), param(1), param(2)], tcx.types.unit),
+        sym::simd_scatter => (3, 0, vec![param(0), param(1), param(2)], tcx.types.unit),
+        sym::simd_insert | sym::simd_insert_dyn => {
+            (2, 0, vec![param(0), tcx.types.u32, param(1)], param(0))
+        }
+        sym::simd_extract | sym::simd_extract_dyn => {
+            (2, 0, vec![param(0), tcx.types.u32], param(1))
+        }
+        sym::simd_cast
+        | sym::simd_as
+        | sym::simd_cast_ptr
+        | sym::simd_expose_provenance
+        | sym::simd_with_exposed_provenance => (2, 0, vec![param(0)], param(1)),
+        sym::simd_bitmask => (2, 0, vec![param(0)], param(1)),
+        sym::simd_select | sym::simd_select_bitmask => {
+            (2, 0, vec![param(0), param(1), param(1)], param(1))
+        }
+        sym::simd_reduce_all | sym::simd_reduce_any => (1, 0, vec![param(0)], tcx.types.bool),
+        sym::simd_reduce_add_ordered | sym::simd_reduce_mul_ordered => {
+            (2, 0, vec![param(0), param(1)], param(1))
+        }
+        sym::simd_reduce_add_unordered
+        | sym::simd_reduce_mul_unordered
+        | sym::simd_reduce_and
+        | sym::simd_reduce_or
+        | sym::simd_reduce_xor
+        | sym::simd_reduce_min
+        | sym::simd_reduce_max => (2, 0, vec![param(0)], param(1)),
+        sym::simd_shuffle => (3, 0, vec![param(0), param(0), param(1)], param(2)),
+        sym::simd_shuffle_const_generic => (2, 1, vec![param(0), param(0)], param(1)),
+
+        sym::atomic_cxchg | sym::atomic_cxchgweak => (
+            1,
+            2,
+            vec![Ty::new_mut_ptr(tcx, param(0)), param(0), param(0)],
+            Ty::new_tup(tcx, &[param(0), tcx.types.bool]),
+        ),
+        sym::atomic_load => (1, 1, vec![Ty::new_imm_ptr(tcx, param(0))], param(0)),
+        sym::atomic_store => (1, 1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit),
+
+        sym::atomic_xchg
+        | sym::atomic_xadd
+        | sym::atomic_xsub
+        | sym::atomic_and
+        | sym::atomic_nand
+        | sym::atomic_or
+        | sym::atomic_xor
+        | sym::atomic_max
+        | sym::atomic_min
+        | sym::atomic_umax
+        | sym::atomic_umin => (1, 1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], param(0)),
+        sym::atomic_fence | sym::atomic_singlethreadfence => (0, 1, Vec::new(), tcx.types.unit),
+
+        other => {
+            tcx.dcx().emit_err(UnrecognizedIntrinsicFunction { span, name: other });
+            return;
+        }
     };
     let sig = tcx.mk_fn_sig(inputs, output, false, safety, ExternAbi::Rust);
     let sig = ty::Binder::bind_with_vars(sig, bound_vars);
diff --git a/compiler/rustc_hir_analysis/src/errors.rs b/compiler/rustc_hir_analysis/src/errors.rs
index 152714b3407..a27d1ed6c53 100644
--- a/compiler/rustc_hir_analysis/src/errors.rs
+++ b/compiler/rustc_hir_analysis/src/errors.rs
@@ -162,15 +162,6 @@ pub(crate) enum AssocItemNotFoundSugg<'a> {
 }
 
 #[derive(Diagnostic)]
-#[diag(hir_analysis_unrecognized_atomic_operation, code = E0092)]
-pub(crate) struct UnrecognizedAtomicOperation<'a> {
-    #[primary_span]
-    #[label]
-    pub span: Span,
-    pub op: &'a str,
-}
-
-#[derive(Diagnostic)]
 #[diag(hir_analysis_wrong_number_of_generic_arguments_to_intrinsic, code = E0094)]
 pub(crate) struct WrongNumberOfGenericArgumentsToIntrinsic<'a> {
     #[primary_span]
diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs
index 4e842a8f93a..d66f98871b9 100644
--- a/compiler/rustc_span/src/symbol.rs
+++ b/compiler/rustc_span/src/symbol.rs
@@ -515,8 +515,24 @@ symbols! {
         async_iterator_poll_next,
         async_trait_bounds,
         atomic,
+        atomic_and,
+        atomic_cxchg,
+        atomic_cxchgweak,
+        atomic_fence,
         atomic_load,
+        atomic_max,
+        atomic_min,
         atomic_mod,
+        atomic_nand,
+        atomic_or,
+        atomic_singlethreadfence,
+        atomic_store,
+        atomic_umax,
+        atomic_umin,
+        atomic_xadd,
+        atomic_xchg,
+        atomic_xor,
+        atomic_xsub,
         atomics,
         att_syntax,
         attr,