about summary refs log tree commit diff
diff options
context:
space:
mode:
authorRalf Jung <post@ralfj.de>2025-05-24 16:35:20 +0200
committerRalf Jung <post@ralfj.de>2025-05-28 22:57:55 +0200
commit4794ea176be0d61f3ac08c367971c032e7abe7af (patch)
tree41f4d9eeb2185a14f67ef01baf65b804f463028d
parent6f69710780d579b180ab38da4c1384d630f7bd31 (diff)
downloadrust-4794ea176be0d61f3ac08c367971c032e7abe7af.tar.gz
rust-4794ea176be0d61f3ac08c367971c032e7abe7af.zip
atomic_load intrinsic: use const generic parameter for ordering
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs3
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/intrinsic.rs93
-rw-r--r--compiler/rustc_hir_analysis/src/check/intrinsic.rs13
-rw-r--r--compiler/rustc_middle/src/ty/consts/int.rs34
-rw-r--r--compiler/rustc_middle/src/ty/mod.rs4
-rw-r--r--compiler/rustc_span/src/symbol.rs1
-rw-r--r--library/core/src/intrinsics/mod.rs30
-rw-r--r--library/core/src/sync/atomic.rs18
-rw-r--r--src/tools/miri/src/intrinsics/atomic.rs21
-rw-r--r--src/tools/miri/src/intrinsics/mod.rs2
-rw-r--r--src/tools/miri/tests/fail/unaligned_pointers/atomic_unaligned.rs3
-rw-r--r--src/tools/miri/tests/fail/unaligned_pointers/atomic_unaligned.stderr4
-rw-r--r--tests/ui/intrinsics/intrinsic-atomics.rs6
-rw-r--r--tests/ui/intrinsics/non-integer-atomic.rs16
-rw-r--r--tests/ui/intrinsics/non-integer-atomic.stderr24
15 files changed, 198 insertions, 74 deletions
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
index b21ca32c9a2..0de23e55e81 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
@@ -870,11 +870,12 @@ fn codegen_regular_intrinsic_call<'tcx>(
             // FIXME use a compiler fence once Cranelift supports it
             fx.bcx.ins().fence();
         }
-        _ if intrinsic.as_str().starts_with("atomic_load") => {
+        sym::atomic_load => {
             intrinsic_args!(fx, args => (ptr); intrinsic);
             let ptr = ptr.load_scalar(fx);
 
             let ty = generic_args.type_at(0);
+            let _ord = generic_args.const_at(1).to_value(); // FIXME: forward this to cranelift once they support that
             match ty.kind() {
                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
                     // FIXME implement 128bit atomics
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
index a6d159c51e1..1047082df42 100644
--- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -99,6 +99,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
         let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
 
+        let ret_llval = |bx: &mut Bx, llval| {
+            if result.layout.ty.is_bool() {
+                OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
+                    .val
+                    .store(bx, result);
+            } else if !result.layout.ty.is_unit() {
+                bx.store_to_place(llval, result.val);
+            }
+            Ok(())
+        };
+
         let llval = match name {
             sym::abort => {
                 bx.abort();
@@ -337,6 +348,53 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 use crate::common::AtomicOrdering::*;
                 use crate::common::{AtomicRmwBinOp, SynchronizationScope};
 
+                let invalid_monomorphization = |ty| {
+                    bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
+                        span,
+                        name,
+                        ty,
+                    });
+                };
+
+                let parse_const_generic_ordering = |ord: ty::Value<'tcx>| {
+                    let discr = ord.valtree.unwrap_branch()[0].unwrap_leaf();
+                    let ord = discr.to_atomic_ordering();
+                    // We have to translate from the intrinsic ordering to the backend ordering.
+                    use rustc_middle::ty::AtomicOrdering;
+                    match ord {
+                        AtomicOrdering::Relaxed => Relaxed,
+                        AtomicOrdering::Release => Release,
+                        AtomicOrdering::Acquire => Acquire,
+                        AtomicOrdering::AcqRel => AcquireRelease,
+                        AtomicOrdering::SeqCst => SequentiallyConsistent,
+                    }
+                };
+
+                // Some intrinsics have the ordering already converted to a const generic parameter, we handle those first.
+                match name {
+                    sym::atomic_load => {
+                        let ty = fn_args.type_at(0);
+                        let ordering = fn_args.const_at(1).to_value();
+                        if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
+                            invalid_monomorphization(ty);
+                            return Ok(());
+                        }
+                        let layout = bx.layout_of(ty);
+                        let source = args[0].immediate();
+                        let llval = bx.atomic_load(
+                            bx.backend_type(layout),
+                            source,
+                            parse_const_generic_ordering(ordering),
+                            layout.size,
+                        );
+
+                        return ret_llval(bx, llval);
+                    }
+
+                    // The rest falls back to below.
+                    _ => {}
+                }
+
                 let Some((instruction, ordering)) = atomic.split_once('_') else {
                     bx.sess().dcx().emit_fatal(errors::MissingMemoryOrdering);
                 };
@@ -350,14 +408,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     _ => bx.sess().dcx().emit_fatal(errors::UnknownAtomicOrdering),
                 };
 
-                let invalid_monomorphization = |ty| {
-                    bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
-                        span,
-                        name,
-                        ty,
-                    });
-                };
-
                 match instruction {
                     "cxchg" | "cxchgweak" => {
                         let Some((success, failure)) = ordering.split_once('_') else {
@@ -390,24 +440,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         return Ok(());
                     }
 
-                    "load" => {
-                        let ty = fn_args.type_at(0);
-                        if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
-                            let layout = bx.layout_of(ty);
-                            let size = layout.size;
-                            let source = args[0].immediate();
-                            bx.atomic_load(
-                                bx.backend_type(layout),
-                                source,
-                                parse_ordering(bx, ordering),
-                                size,
-                            )
-                        } else {
-                            invalid_monomorphization(ty);
-                            return Ok(());
-                        }
-                    }
-
                     "store" => {
                         let ty = fn_args.type_at(0);
                         if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
@@ -538,14 +570,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
         };
 
-        if result.layout.ty.is_bool() {
-            OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
-                .val
-                .store(bx, result);
-        } else if !result.layout.ty.is_unit() {
-            bx.store_to_place(llval, result.val);
-        }
-        Ok(())
+        ret_llval(bx, llval)
     }
 }
 
diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
index 9fd158ad154..54bb3ac4113 100644
--- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs
+++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
@@ -204,24 +204,25 @@ pub(crate) fn check_intrinsic_type(
 
         // Each atomic op has variants with different suffixes (`_seq_cst`, `_acquire`, etc.). Use
         // string ops to strip the suffixes, because the variants all get the same treatment here.
-        let (n_tps, inputs, output) = match split[1] {
+        let (n_tps, n_cts, inputs, output) = match split[1] {
             "cxchg" | "cxchgweak" => (
                 1,
+                0,
                 vec![Ty::new_mut_ptr(tcx, param(0)), param(0), param(0)],
                 Ty::new_tup(tcx, &[param(0), tcx.types.bool]),
             ),
-            "load" => (1, vec![Ty::new_imm_ptr(tcx, param(0))], param(0)),
-            "store" => (1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit),
+            "load" => (1, 1, vec![Ty::new_imm_ptr(tcx, param(0))], param(0)),
+            "store" => (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit),
 
             "xchg" | "xadd" | "xsub" | "and" | "nand" | "or" | "xor" | "max" | "min" | "umax"
-            | "umin" => (1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], param(0)),
-            "fence" | "singlethreadfence" => (0, Vec::new(), tcx.types.unit),
+            | "umin" => (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], param(0)),
+            "fence" | "singlethreadfence" => (0, 0, Vec::new(), tcx.types.unit),
             op => {
                 tcx.dcx().emit_err(UnrecognizedAtomicOperation { span, op });
                 return;
             }
         };
-        (n_tps, 0, 0, inputs, output, hir::Safety::Unsafe)
+        (n_tps, 0, n_cts, inputs, output, hir::Safety::Unsafe)
     } else if intrinsic_name == sym::contract_check_ensures {
         // contract_check_ensures::<Ret, C>(Ret, C) -> Ret
         // where C: for<'a> Fn(&'a Ret) -> bool,
diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs
index 9c9cd695339..0383814cc96 100644
--- a/compiler/rustc_middle/src/ty/consts/int.rs
+++ b/compiler/rustc_middle/src/ty/consts/int.rs
@@ -26,6 +26,19 @@ impl ConstInt {
     }
 }
 
+/// An enum to represent the compiler-side view of `intrinsics::AtomicOrdering`.
+/// This lives here because there's a method in this file that needs it and it is entirely unclear
+/// where else to put this...
+#[derive(Debug)]
+pub enum AtomicOrdering {
+    // These values must match `intrinsics::AtomicOrdering`!
+    Relaxed = 0,
+    Release = 1,
+    Acquire = 2,
+    AcqRel = 3,
+    SeqCst = 4,
+}
+
 impl std::fmt::Debug for ConstInt {
     fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
         let Self { int, signed, is_ptr_sized_integral } = *self;
@@ -318,6 +331,25 @@ impl ScalarInt {
         self.to_uint(tcx.data_layout.pointer_size).try_into().unwrap()
     }
 
+    #[inline]
+    pub fn to_atomic_ordering(self) -> AtomicOrdering {
+        use AtomicOrdering::*;
+        let val = self.to_u32();
+        if val == Relaxed as u32 {
+            Relaxed
+        } else if val == Release as u32 {
+            Release
+        } else if val == Acquire as u32 {
+            Acquire
+        } else if val == AcqRel as u32 {
+            AcqRel
+        } else if val == SeqCst as u32 {
+            SeqCst
+        } else {
+            panic!("not a valid atomic ordering")
+        }
+    }
+
     /// Converts the `ScalarInt` to `bool`.
     /// Panics if the `size` of the `ScalarInt` is not equal to 1 byte.
     /// Errors if it is not a valid `bool`.
@@ -488,7 +520,7 @@ from_scalar_int_for_x_signed!(i8, i16, i32, i64, i128);
 impl From<std::cmp::Ordering> for ScalarInt {
     #[inline]
     fn from(c: std::cmp::Ordering) -> Self {
-        // Here we rely on `Ordering` having the same values in host and target!
+        // Here we rely on `cmp::Ordering` having the same values in host and target!
         ScalarInt::from(c as i8)
     }
 }
diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs
index 78c0812b08f..af31f7ed33b 100644
--- a/compiler/rustc_middle/src/ty/mod.rs
+++ b/compiler/rustc_middle/src/ty/mod.rs
@@ -74,8 +74,8 @@ pub use self::closure::{
     place_to_string_for_capture,
 };
 pub use self::consts::{
-    AnonConstKind, Const, ConstInt, ConstKind, Expr, ExprKind, ScalarInt, UnevaluatedConst,
-    ValTree, ValTreeKind, Value,
+    AnonConstKind, AtomicOrdering, Const, ConstInt, ConstKind, Expr, ExprKind, ScalarInt,
+    UnevaluatedConst, ValTree, ValTreeKind, Value,
 };
 pub use self::context::{
     CtxtInterners, CurrentGcx, DeducedParamAttrs, Feed, FreeRegionInfo, GlobalCtxt, Lift, TyCtxt,
diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs
index 8f7e72f0ae1..cabd43ebbdc 100644
--- a/compiler/rustc_span/src/symbol.rs
+++ b/compiler/rustc_span/src/symbol.rs
@@ -515,6 +515,7 @@ symbols! {
         async_iterator_poll_next,
         async_trait_bounds,
         atomic,
+        atomic_load,
         atomic_mod,
         atomics,
         att_syntax,
diff --git a/library/core/src/intrinsics/mod.rs b/library/core/src/intrinsics/mod.rs
index 32642a13b42..8a0f80f8ce7 100644
--- a/library/core/src/intrinsics/mod.rs
+++ b/library/core/src/intrinsics/mod.rs
@@ -30,7 +30,7 @@
 //!
 //! The atomic intrinsics provide common atomic operations on machine
 //! words, with multiple possible memory orderings. See the
-//! [atomic types][crate::sync::atomic] docs for details.
+//! [atomic types][atomic] docs for details.
 //!
 //! # Unwinding
 //!
@@ -50,7 +50,7 @@
 )]
 #![allow(missing_docs)]
 
-use crate::marker::{DiscriminantKind, Tuple};
+use crate::marker::{ConstParamTy, DiscriminantKind, Tuple};
 use crate::ptr;
 
 pub mod fallback;
@@ -62,6 +62,20 @@ pub mod simd;
 #[cfg(all(target_has_atomic = "8", target_has_atomic = "32", target_has_atomic = "ptr"))]
 use crate::sync::atomic::{self, AtomicBool, AtomicI32, AtomicIsize, AtomicU32, Ordering};
 
+/// A type for atomic ordering parameters for intrinsics. This is a separate type from
+/// `atomic::Ordering` so that we can make it `ConstParamTy` and fix the values used here without a
+/// risk of leaking that to stable code.
+#[derive(Debug, ConstParamTy, PartialEq, Eq)]
+pub enum AtomicOrdering {
+    // These values must match the compiler's `AtomicOrdering` defined in
+    // `rustc_middle/src/ty/consts/int.rs`!
+    Relaxed = 0,
+    Release = 1,
+    Acquire = 2,
+    AcqRel = 3,
+    SeqCst = 4,
+}
+
 // N.B., these intrinsics take raw pointers because they mutate aliased
 // memory, which is not valid for either `&` or `&mut`.
 
@@ -395,10 +409,20 @@ pub unsafe fn atomic_cxchgweak_seqcst_seqcst<T: Copy>(dst: *mut T, old: T, src:
 /// `T` must be an integer or pointer type.
 ///
 /// The stabilized version of this intrinsic is available on the
+/// [`atomic`] types via the `load` method. For example, [`AtomicBool::load`].
+#[rustc_intrinsic]
+#[rustc_nounwind]
+#[cfg(not(bootstrap))]
+pub unsafe fn atomic_load<T: Copy, const ORD: AtomicOrdering>(src: *const T) -> T;
+/// Loads the current value of the pointer.
+/// `T` must be an integer or pointer type.
+///
+/// The stabilized version of this intrinsic is available on the
 /// [`atomic`] types via the `load` method by passing
 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::load`].
 #[rustc_intrinsic]
 #[rustc_nounwind]
+#[cfg(bootstrap)]
 pub unsafe fn atomic_load_seqcst<T: Copy>(src: *const T) -> T;
 /// Loads the current value of the pointer.
 /// `T` must be an integer or pointer type.
@@ -408,6 +432,7 @@ pub unsafe fn atomic_load_seqcst<T: Copy>(src: *const T) -> T;
 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::load`].
 #[rustc_intrinsic]
 #[rustc_nounwind]
+#[cfg(bootstrap)]
 pub unsafe fn atomic_load_acquire<T: Copy>(src: *const T) -> T;
 /// Loads the current value of the pointer.
 /// `T` must be an integer or pointer type.
@@ -417,6 +442,7 @@ pub unsafe fn atomic_load_acquire<T: Copy>(src: *const T) -> T;
 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::load`].
 #[rustc_intrinsic]
 #[rustc_nounwind]
+#[cfg(bootstrap)]
 pub unsafe fn atomic_load_relaxed<T: Copy>(src: *const T) -> T;
 
 /// Stores the value at the specified memory location.
diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
index bd5a58d74ba..a3fbc71162b 100644
--- a/library/core/src/sync/atomic.rs
+++ b/library/core/src/sync/atomic.rs
@@ -3822,6 +3822,7 @@ unsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) {
 
 #[inline]
 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+#[cfg(bootstrap)]
 unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
     // SAFETY: the caller must uphold the safety contract for `atomic_load`.
     unsafe {
@@ -3836,6 +3837,23 @@ unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
 }
 
 #[inline]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+#[cfg(not(bootstrap))]
+unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
+    use intrinsics::AtomicOrdering;
+    // SAFETY: the caller must uphold the safety contract for `atomic_load`.
+    unsafe {
+        match order {
+            Relaxed => intrinsics::atomic_load::<T, { AtomicOrdering::Relaxed }>(dst),
+            Acquire => intrinsics::atomic_load::<T, { AtomicOrdering::Acquire }>(dst),
+            SeqCst => intrinsics::atomic_load::<T, { AtomicOrdering::SeqCst }>(dst),
+            Release => panic!("there is no such thing as a release load"),
+            AcqRel => panic!("there is no such thing as an acquire-release load"),
+        }
+    }
+}
+
+#[inline]
 #[cfg(target_has_atomic)]
 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
 unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
diff --git a/src/tools/miri/src/intrinsics/atomic.rs b/src/tools/miri/src/intrinsics/atomic.rs
index 2eb8086f578..a61226eeed9 100644
--- a/src/tools/miri/src/intrinsics/atomic.rs
+++ b/src/tools/miri/src/intrinsics/atomic.rs
@@ -1,4 +1,5 @@
 use rustc_middle::mir::BinOp;
+use rustc_middle::ty::AtomicOrdering;
 use rustc_middle::{mir, ty};
 
 use self::helpers::check_intrinsic_arg_count;
@@ -19,6 +20,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
     fn emulate_atomic_intrinsic(
         &mut self,
         intrinsic_name: &str,
+        generic_args: ty::GenericArgsRef<'tcx>,
         args: &[OpTy<'tcx>],
         dest: &MPlaceTy<'tcx>,
     ) -> InterpResult<'tcx, EmulateItemResult> {
@@ -35,6 +37,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
             }
         }
 
+        fn read_ord_const_generic(o: AtomicOrdering) -> AtomicReadOrd {
+            match o {
+                AtomicOrdering::SeqCst => AtomicReadOrd::SeqCst,
+                AtomicOrdering::Acquire => AtomicReadOrd::Acquire,
+                AtomicOrdering::Relaxed => AtomicReadOrd::Relaxed,
+                _ => panic!("invalid read ordering `{o:?}`"),
+            }
+        }
+
         fn write_ord(ord: &str) -> AtomicWriteOrd {
             match ord {
                 "seqcst" => AtomicWriteOrd::SeqCst,
@@ -66,7 +77,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
         }
 
         match &*intrinsic_structure {
-            ["load", ord] => this.atomic_load(args, dest, read_ord(ord))?,
+            // New-style intrinsics that use const generics
+            ["load"] => {
+                let ordering = generic_args.const_at(1).to_value();
+                let ordering =
+                    ordering.valtree.unwrap_branch()[0].unwrap_leaf().to_atomic_ordering();
+                this.atomic_load(args, dest, read_ord_const_generic(ordering))?;
+            }
+
+            // Old-style intrinsics that have the ordering in the intrinsic name
             ["store", ord] => this.atomic_store(args, write_ord(ord))?,
 
             ["fence", ord] => this.atomic_fence_intrinsic(args, fence_ord(ord))?,
diff --git a/src/tools/miri/src/intrinsics/mod.rs b/src/tools/miri/src/intrinsics/mod.rs
index 69baa472cd6..581005bc9a1 100644
--- a/src/tools/miri/src/intrinsics/mod.rs
+++ b/src/tools/miri/src/intrinsics/mod.rs
@@ -97,7 +97,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
         let this = self.eval_context_mut();
 
         if let Some(name) = intrinsic_name.strip_prefix("atomic_") {
-            return this.emulate_atomic_intrinsic(name, args, dest);
+            return this.emulate_atomic_intrinsic(name, generic_args, args, dest);
         }
         if let Some(name) = intrinsic_name.strip_prefix("simd_") {
             return this.emulate_simd_intrinsic(name, generic_args, args, dest);
diff --git a/src/tools/miri/tests/fail/unaligned_pointers/atomic_unaligned.rs b/src/tools/miri/tests/fail/unaligned_pointers/atomic_unaligned.rs
index 29976836b0b..37c64c81944 100644
--- a/src/tools/miri/tests/fail/unaligned_pointers/atomic_unaligned.rs
+++ b/src/tools/miri/tests/fail/unaligned_pointers/atomic_unaligned.rs
@@ -1,5 +1,6 @@
 //@compile-flags: -Zmiri-symbolic-alignment-check -Cdebug-assertions=no
 #![feature(core_intrinsics)]
+use std::intrinsics;
 
 fn main() {
     // Do a 4-aligned u64 atomic access. That should be UB on all platforms,
@@ -7,7 +8,7 @@ fn main() {
     let z = [0u32; 2];
     let zptr = &z as *const _ as *const u64;
     unsafe {
-        ::std::intrinsics::atomic_load_seqcst(zptr);
+        intrinsics::atomic_load::<_, { intrinsics::AtomicOrdering::SeqCst }>(zptr);
         //~^ERROR: accessing memory with alignment 4, but alignment 8 is required
     }
 }
diff --git a/src/tools/miri/tests/fail/unaligned_pointers/atomic_unaligned.stderr b/src/tools/miri/tests/fail/unaligned_pointers/atomic_unaligned.stderr
index a9da740be1d..e0f9d011ce4 100644
--- a/src/tools/miri/tests/fail/unaligned_pointers/atomic_unaligned.stderr
+++ b/src/tools/miri/tests/fail/unaligned_pointers/atomic_unaligned.stderr
@@ -1,8 +1,8 @@
 error: Undefined Behavior: accessing memory with alignment ALIGN, but alignment ALIGN is required
   --> tests/fail/unaligned_pointers/atomic_unaligned.rs:LL:CC
    |
-LL |         ::std::intrinsics::atomic_load_seqcst(zptr);
-   |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ accessing memory with alignment ALIGN, but alignment ALIGN is required
+LL |         intrinsics::atomic_load::<_, { intrinsics::AtomicOrdering::SeqCst }>(zptr);
+   |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ accessing memory with alignment ALIGN, but alignment ALIGN is required
    |
    = help: this usually indicates that your program performed an invalid operation and caused Undefined Behavior
    = help: but due to `-Zmiri-symbolic-alignment-check`, alignment errors can also be false positives
diff --git a/tests/ui/intrinsics/intrinsic-atomics.rs b/tests/ui/intrinsics/intrinsic-atomics.rs
index 9127cc649e6..f96c6dc832e 100644
--- a/tests/ui/intrinsics/intrinsic-atomics.rs
+++ b/tests/ui/intrinsics/intrinsic-atomics.rs
@@ -1,14 +1,14 @@
 //@ run-pass
 #![feature(core_intrinsics)]
-use std::intrinsics as rusti;
+use std::intrinsics::{self as rusti, AtomicOrdering};
 
 pub fn main() {
     unsafe {
         let mut x: Box<_> = Box::new(1);
 
-        assert_eq!(rusti::atomic_load_seqcst(&*x), 1);
+        assert_eq!(rusti::atomic_load::<_, { AtomicOrdering::SeqCst }>(&*x), 1);
         *x = 5;
-        assert_eq!(rusti::atomic_load_acquire(&*x), 5);
+        assert_eq!(rusti::atomic_load::<_, { AtomicOrdering::Acquire }>(&*x), 5);
 
         rusti::atomic_store_seqcst(&mut *x, 3);
         assert_eq!(*x, 3);
diff --git a/tests/ui/intrinsics/non-integer-atomic.rs b/tests/ui/intrinsics/non-integer-atomic.rs
index 2d1d0882084..dd129e55945 100644
--- a/tests/ui/intrinsics/non-integer-atomic.rs
+++ b/tests/ui/intrinsics/non-integer-atomic.rs
@@ -4,7 +4,7 @@
 #![allow(warnings)]
 #![crate_type = "rlib"]
 
-use std::intrinsics;
+use std::intrinsics::{self, AtomicOrdering};
 
 #[derive(Copy, Clone)]
 pub struct Foo(i64);
@@ -12,8 +12,8 @@ pub type Bar = &'static Fn();
 pub type Quux = [u8; 100];
 
 pub unsafe fn test_bool_load(p: &mut bool, v: bool) {
-    intrinsics::atomic_load_seqcst(p);
-    //~^ ERROR `atomic_load_seqcst` intrinsic: expected basic integer type, found `bool`
+    intrinsics::atomic_load::<_, { AtomicOrdering::SeqCst }>(p);
+    //~^ ERROR `atomic_load` intrinsic: expected basic integer type, found `bool`
 }
 
 pub unsafe fn test_bool_store(p: &mut bool, v: bool) {
@@ -32,8 +32,8 @@ pub unsafe fn test_bool_cxchg(p: &mut bool, v: bool) {
 }
 
 pub unsafe fn test_Foo_load(p: &mut Foo, v: Foo) {
-    intrinsics::atomic_load_seqcst(p);
-    //~^ ERROR `atomic_load_seqcst` intrinsic: expected basic integer type, found `Foo`
+    intrinsics::atomic_load::<_, { AtomicOrdering::SeqCst }>(p);
+    //~^ ERROR `atomic_load` intrinsic: expected basic integer type, found `Foo`
 }
 
 pub unsafe fn test_Foo_store(p: &mut Foo, v: Foo) {
@@ -52,7 +52,7 @@ pub unsafe fn test_Foo_cxchg(p: &mut Foo, v: Foo) {
 }
 
 pub unsafe fn test_Bar_load(p: &mut Bar, v: Bar) {
-    intrinsics::atomic_load_seqcst(p);
+    intrinsics::atomic_load::<_, { AtomicOrdering::SeqCst }>(p);
     //~^ ERROR expected basic integer type, found `&dyn Fn()`
 }
 
@@ -72,8 +72,8 @@ pub unsafe fn test_Bar_cxchg(p: &mut Bar, v: Bar) {
 }
 
 pub unsafe fn test_Quux_load(p: &mut Quux, v: Quux) {
-    intrinsics::atomic_load_seqcst(p);
-    //~^ ERROR `atomic_load_seqcst` intrinsic: expected basic integer type, found `[u8; 100]`
+    intrinsics::atomic_load::<_, { AtomicOrdering::SeqCst }>(p);
+    //~^ ERROR `atomic_load` intrinsic: expected basic integer type, found `[u8; 100]`
 }
 
 pub unsafe fn test_Quux_store(p: &mut Quux, v: Quux) {
diff --git a/tests/ui/intrinsics/non-integer-atomic.stderr b/tests/ui/intrinsics/non-integer-atomic.stderr
index 32791a8e8b7..58c2dc00c66 100644
--- a/tests/ui/intrinsics/non-integer-atomic.stderr
+++ b/tests/ui/intrinsics/non-integer-atomic.stderr
@@ -1,8 +1,8 @@
-error[E0511]: invalid monomorphization of `atomic_load_seqcst` intrinsic: expected basic integer type, found `bool`
+error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer type, found `bool`
   --> $DIR/non-integer-atomic.rs:15:5
    |
-LL |     intrinsics::atomic_load_seqcst(p);
-   |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+LL |     intrinsics::atomic_load::<_, { AtomicOrdering::SeqCst }>(p);
+   |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 error[E0511]: invalid monomorphization of `atomic_store_seqcst` intrinsic: expected basic integer type, found `bool`
   --> $DIR/non-integer-atomic.rs:20:5
@@ -22,11 +22,11 @@ error[E0511]: invalid monomorphization of `atomic_cxchg_seqcst_seqcst` intrinsic
 LL |     intrinsics::atomic_cxchg_seqcst_seqcst(p, v, v);
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-error[E0511]: invalid monomorphization of `atomic_load_seqcst` intrinsic: expected basic integer type, found `Foo`
+error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer type, found `Foo`
   --> $DIR/non-integer-atomic.rs:35:5
    |
-LL |     intrinsics::atomic_load_seqcst(p);
-   |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+LL |     intrinsics::atomic_load::<_, { AtomicOrdering::SeqCst }>(p);
+   |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 error[E0511]: invalid monomorphization of `atomic_store_seqcst` intrinsic: expected basic integer type, found `Foo`
   --> $DIR/non-integer-atomic.rs:40:5
@@ -46,11 +46,11 @@ error[E0511]: invalid monomorphization of `atomic_cxchg_seqcst_seqcst` intrinsic
 LL |     intrinsics::atomic_cxchg_seqcst_seqcst(p, v, v);
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-error[E0511]: invalid monomorphization of `atomic_load_seqcst` intrinsic: expected basic integer type, found `&dyn Fn()`
+error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer type, found `&dyn Fn()`
   --> $DIR/non-integer-atomic.rs:55:5
    |
-LL |     intrinsics::atomic_load_seqcst(p);
-   |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+LL |     intrinsics::atomic_load::<_, { AtomicOrdering::SeqCst }>(p);
+   |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 error[E0511]: invalid monomorphization of `atomic_store_seqcst` intrinsic: expected basic integer type, found `&dyn Fn()`
   --> $DIR/non-integer-atomic.rs:60:5
@@ -70,11 +70,11 @@ error[E0511]: invalid monomorphization of `atomic_cxchg_seqcst_seqcst` intrinsic
 LL |     intrinsics::atomic_cxchg_seqcst_seqcst(p, v, v);
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-error[E0511]: invalid monomorphization of `atomic_load_seqcst` intrinsic: expected basic integer type, found `[u8; 100]`
+error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer type, found `[u8; 100]`
   --> $DIR/non-integer-atomic.rs:75:5
    |
-LL |     intrinsics::atomic_load_seqcst(p);
-   |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+LL |     intrinsics::atomic_load::<_, { AtomicOrdering::SeqCst }>(p);
+   |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 error[E0511]: invalid monomorphization of `atomic_store_seqcst` intrinsic: expected basic integer type, found `[u8; 100]`
   --> $DIR/non-integer-atomic.rs:80:5