diff options
Diffstat (limited to 'compiler')
| -rw-r--r-- | compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs | 28 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/messages.ftl | 8 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/errors.rs | 16 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/intrinsic.rs | 322 | ||||
| -rw-r--r-- | compiler/rustc_error_codes/src/error_codes/E0092.md | 15 | ||||
| -rw-r--r-- | compiler/rustc_error_codes/src/error_codes/E0093.md | 17 | ||||
| -rw-r--r-- | compiler/rustc_error_codes/src/error_codes/E0622.md | 4 | ||||
| -rw-r--r-- | compiler/rustc_hir_analysis/messages.ftl | 4 | ||||
| -rw-r--r-- | compiler/rustc_hir_analysis/src/check/intrinsic.rs | 59 | ||||
| -rw-r--r-- | compiler/rustc_hir_analysis/src/errors.rs | 9 | ||||
| -rw-r--r-- | compiler/rustc_span/src/symbol.rs | 16 |
11 files changed, 208 insertions, 290 deletions
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs index 27a5df8b152..a0f96d85dc3 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs @@ -875,7 +875,6 @@ fn codegen_regular_intrinsic_call<'tcx>( let ptr = ptr.load_scalar(fx); let ty = generic_args.type_at(0); - let _ord = generic_args.const_at(1).to_value(); // FIXME: forward this to cranelift once they support that match ty.kind() { ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => { // FIXME implement 128bit atomics @@ -906,7 +905,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let val = CValue::by_val(val, fx.layout_of(ty)); ret.write_cvalue(fx, val); } - _ if intrinsic.as_str().starts_with("atomic_store") => { + sym::atomic_store => { intrinsic_args!(fx, args => (ptr, val); intrinsic); let ptr = ptr.load_scalar(fx); @@ -939,7 +938,7 @@ fn codegen_regular_intrinsic_call<'tcx>( fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr); } - _ if intrinsic.as_str().starts_with("atomic_xchg") => { + sym::atomic_xchg => { intrinsic_args!(fx, args => (ptr, new); intrinsic); let ptr = ptr.load_scalar(fx); @@ -960,8 +959,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_cxchg") => { - // both atomic_cxchg_* and atomic_cxchgweak_* + sym::atomic_cxchg | sym::atomic_cxchgweak => { intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic); let ptr = ptr.load_scalar(fx); @@ -984,7 +982,7 @@ fn codegen_regular_intrinsic_call<'tcx>( ret.write_cvalue(fx, ret_val) } - _ if intrinsic.as_str().starts_with("atomic_xadd") => { + sym::atomic_xadd => { intrinsic_args!(fx, args => (ptr, amount); intrinsic); let ptr = ptr.load_scalar(fx); @@ -1006,7 +1004,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_xsub") => { + sym::atomic_xsub => { intrinsic_args!(fx, args => (ptr, amount); intrinsic); let ptr = ptr.load_scalar(fx); @@ -1028,7 +1026,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_and") => { + sym::atomic_and => { intrinsic_args!(fx, args => (ptr, src); intrinsic); let ptr = ptr.load_scalar(fx); @@ -1049,7 +1047,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_or") => { + sym::atomic_or => { intrinsic_args!(fx, args => (ptr, src); intrinsic); let ptr = ptr.load_scalar(fx); @@ -1070,7 +1068,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_xor") => { + sym::atomic_xor => { intrinsic_args!(fx, args => (ptr, src); intrinsic); let ptr = ptr.load_scalar(fx); @@ -1091,7 +1089,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_nand") => { + sym::atomic_nand => { intrinsic_args!(fx, args => (ptr, src); intrinsic); let ptr = ptr.load_scalar(fx); @@ -1112,7 +1110,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_max") => { + sym::atomic_max => { intrinsic_args!(fx, args => (ptr, src); intrinsic); let ptr = ptr.load_scalar(fx); @@ -1133,7 +1131,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_umax") => { + sym::atomic_umax => { intrinsic_args!(fx, args => (ptr, src); intrinsic); let ptr = ptr.load_scalar(fx); @@ -1154,7 +1152,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_min") => { + sym::atomic_min => { intrinsic_args!(fx, args => (ptr, src); intrinsic); let ptr = ptr.load_scalar(fx); @@ -1175,7 +1173,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_umin") => { + sym::atomic_umin => { intrinsic_args!(fx, args => (ptr, src); intrinsic); let ptr = ptr.load_scalar(fx); diff --git a/compiler/rustc_codegen_ssa/messages.ftl b/compiler/rustc_codegen_ssa/messages.ftl index acb4cbaa13f..91f6af7fb93 100644 --- a/compiler/rustc_codegen_ssa/messages.ftl +++ b/compiler/rustc_codegen_ssa/messages.ftl @@ -8,8 +8,6 @@ codegen_ssa_aix_strip_not_used = using host's `strip` binary to cross-compile to codegen_ssa_archive_build_failure = failed to build archive at `{$path}`: {$error} -codegen_ssa_atomic_compare_exchange = Atomic compare-exchange intrinsic missing failure memory ordering - codegen_ssa_autodiff_without_lto = using the autodiff feature requires using fat-lto codegen_ssa_bare_instruction_set = `#[instruction_set]` requires an argument @@ -206,8 +204,6 @@ codegen_ssa_missing_cpp_build_tool_component = or a necessary component may be m codegen_ssa_missing_features = add the missing features in a `target_feature` attribute -codegen_ssa_missing_memory_ordering = Atomic intrinsic missing memory ordering - codegen_ssa_missing_query_depgraph = found CGU-reuse attribute but `-Zquery-dep-graph` was not specified @@ -374,10 +370,6 @@ codegen_ssa_unexpected_parameter_name = unexpected parameter name codegen_ssa_unknown_archive_kind = Don't know how to build archive of type: {$kind} -codegen_ssa_unknown_atomic_operation = unknown atomic operation - -codegen_ssa_unknown_atomic_ordering = unknown ordering in atomic intrinsic - codegen_ssa_unknown_reuse_kind = unknown cgu-reuse-kind `{$kind}` specified codegen_ssa_unsupported_instruction_set = target does not support `#[instruction_set]` diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs index 572d7b1e06a..f843347db92 100644 --- a/compiler/rustc_codegen_ssa/src/errors.rs +++ b/compiler/rustc_codegen_ssa/src/errors.rs @@ -797,22 +797,6 @@ pub(crate) struct ShuffleIndicesEvaluation { } #[derive(Diagnostic)] -#[diag(codegen_ssa_missing_memory_ordering)] -pub(crate) struct MissingMemoryOrdering; - -#[derive(Diagnostic)] -#[diag(codegen_ssa_unknown_atomic_ordering)] -pub(crate) struct UnknownAtomicOrdering; - -#[derive(Diagnostic)] -#[diag(codegen_ssa_atomic_compare_exchange)] -pub(crate) struct AtomicCompareExchange; - -#[derive(Diagnostic)] -#[diag(codegen_ssa_unknown_atomic_operation)] -pub(crate) struct UnknownAtomicOperation; - -#[derive(Diagnostic)] pub enum InvalidMonomorphization<'tcx> { #[diag(codegen_ssa_invalid_monomorphization_basic_integer_type, code = E0511)] BasicIntegerType { diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 8c6f52084c2..a3f09f64a3e 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -8,9 +8,10 @@ use rustc_span::sym; use super::FunctionCx; use super::operand::OperandRef; use super::place::PlaceRef; +use crate::common::{AtomicRmwBinOp, SynchronizationScope}; use crate::errors::InvalidMonomorphization; use crate::traits::*; -use crate::{MemFlags, errors, meth, size_of_val}; +use crate::{MemFlags, meth, size_of_val}; fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( bx: &mut Bx, @@ -62,7 +63,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let span = source_info.span; let name = bx.tcx().item_name(instance.def_id()); - let name_str = name.as_str(); let fn_args = instance.args; // If we're swapping something that's *not* an `OperandValue::Ref`, @@ -89,14 +89,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } - let ret_llval = |bx: &mut Bx, llval| { - if result.layout.ty.is_bool() { - let val = bx.from_immediate(llval); - bx.store_to_place(val, result.val); - } else if !result.layout.ty.is_unit() { - bx.store_to_place(llval, result.val); - } - Ok(()) + let invalid_monomorphization_int_type = |ty| { + bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty }); + }; + + let parse_atomic_ordering = |ord: ty::Value<'tcx>| { + let discr = ord.valtree.unwrap_branch()[0].unwrap_leaf(); + discr.to_atomic_ordering() }; let llval = match name { @@ -336,184 +335,145 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } - // This requires that atomic intrinsics follow a specific naming pattern: - // "atomic_<operation>[_<ordering>]" - name if let Some(atomic) = name_str.strip_prefix("atomic_") => { - use rustc_middle::ty::AtomicOrdering::*; - - use crate::common::{AtomicRmwBinOp, SynchronizationScope}; + sym::atomic_load => { + let ty = fn_args.type_at(0); + if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) { + invalid_monomorphization_int_type(ty); + return Ok(()); + } + let ordering = fn_args.const_at(1).to_value(); + let layout = bx.layout_of(ty); + let source = args[0].immediate(); + bx.atomic_load( + bx.backend_type(layout), + source, + parse_atomic_ordering(ordering), + layout.size, + ) + } + sym::atomic_store => { + let ty = fn_args.type_at(0); + if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) { + invalid_monomorphization_int_type(ty); + return Ok(()); + } + let ordering = fn_args.const_at(1).to_value(); + let size = bx.layout_of(ty).size; + let val = args[1].immediate(); + let ptr = args[0].immediate(); + bx.atomic_store(val, ptr, parse_atomic_ordering(ordering), size); + return Ok(()); + } + sym::atomic_cxchg | sym::atomic_cxchgweak => { + let ty = fn_args.type_at(0); + if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) { + invalid_monomorphization_int_type(ty); + return Ok(()); + } + let succ_ordering = fn_args.const_at(1).to_value(); + let fail_ordering = fn_args.const_at(2).to_value(); + let weak = name == sym::atomic_cxchgweak; + let dst = args[0].immediate(); + let cmp = args[1].immediate(); + let src = args[2].immediate(); + let (val, success) = bx.atomic_cmpxchg( + dst, + cmp, + src, + parse_atomic_ordering(succ_ordering), + parse_atomic_ordering(fail_ordering), + weak, + ); + let val = bx.from_immediate(val); + let success = bx.from_immediate(success); - let invalid_monomorphization = |ty| { - bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType { - span, - name, - ty, - }); - }; + let dest = result.project_field(bx, 0); + bx.store_to_place(val, dest.val); + let dest = result.project_field(bx, 1); + bx.store_to_place(success, dest.val); - let parse_const_generic_ordering = |ord: ty::Value<'tcx>| { - let discr = ord.valtree.unwrap_branch()[0].unwrap_leaf(); - discr.to_atomic_ordering() + return Ok(()); + } + // These are all AtomicRMW ops + sym::atomic_max | sym::atomic_min => { + let atom_op = if name == sym::atomic_max { + AtomicRmwBinOp::AtomicMax + } else { + AtomicRmwBinOp::AtomicMin }; - // Some intrinsics have the ordering already converted to a const generic parameter, we handle those first. - match name { - sym::atomic_load => { - let ty = fn_args.type_at(0); - let ordering = fn_args.const_at(1).to_value(); - if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) { - invalid_monomorphization(ty); - return Ok(()); - } - let layout = bx.layout_of(ty); - let source = args[0].immediate(); - let llval = bx.atomic_load( - bx.backend_type(layout), - source, - parse_const_generic_ordering(ordering), - layout.size, - ); - - return ret_llval(bx, llval); - } - - // The rest falls back to below. - _ => {} + let ty = fn_args.type_at(0); + if matches!(ty.kind(), ty::Int(_)) { + let ordering = fn_args.const_at(1).to_value(); + let ptr = args[0].immediate(); + let val = args[1].immediate(); + bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering)) + } else { + invalid_monomorphization_int_type(ty); + return Ok(()); } - - let Some((instruction, ordering)) = atomic.split_once('_') else { - bx.sess().dcx().emit_fatal(errors::MissingMemoryOrdering); + } + sym::atomic_umax | sym::atomic_umin => { + let atom_op = if name == sym::atomic_umax { + AtomicRmwBinOp::AtomicUMax + } else { + AtomicRmwBinOp::AtomicUMin }; - let parse_ordering = |bx: &Bx, s| match s { - "relaxed" => Relaxed, - "acquire" => Acquire, - "release" => Release, - "acqrel" => AcqRel, - "seqcst" => SeqCst, - _ => bx.sess().dcx().emit_fatal(errors::UnknownAtomicOrdering), + let ty = fn_args.type_at(0); + if matches!(ty.kind(), ty::Uint(_)) { + let ordering = fn_args.const_at(1).to_value(); + let ptr = args[0].immediate(); + let val = args[1].immediate(); + bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering)) + } else { + invalid_monomorphization_int_type(ty); + return Ok(()); + } + } + sym::atomic_xchg + | sym::atomic_xadd + | sym::atomic_xsub + | sym::atomic_and + | sym::atomic_nand + | sym::atomic_or + | sym::atomic_xor => { + let atom_op = match name { + sym::atomic_xchg => AtomicRmwBinOp::AtomicXchg, + sym::atomic_xadd => AtomicRmwBinOp::AtomicAdd, + sym::atomic_xsub => AtomicRmwBinOp::AtomicSub, + sym::atomic_and => AtomicRmwBinOp::AtomicAnd, + sym::atomic_nand => AtomicRmwBinOp::AtomicNand, + sym::atomic_or => AtomicRmwBinOp::AtomicOr, + sym::atomic_xor => AtomicRmwBinOp::AtomicXor, + _ => unreachable!(), }; - match instruction { - "cxchg" | "cxchgweak" => { - let Some((success, failure)) = ordering.split_once('_') else { - bx.sess().dcx().emit_fatal(errors::AtomicCompareExchange); - }; - let ty = fn_args.type_at(0); - if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() { - let weak = instruction == "cxchgweak"; - let dst = args[0].immediate(); - let cmp = args[1].immediate(); - let src = args[2].immediate(); - let (val, success) = bx.atomic_cmpxchg( - dst, - cmp, - src, - parse_ordering(bx, success), - parse_ordering(bx, failure), - weak, - ); - let val = bx.from_immediate(val); - let success = bx.from_immediate(success); - - let dest = result.project_field(bx, 0); - bx.store_to_place(val, dest.val); - let dest = result.project_field(bx, 1); - bx.store_to_place(success, dest.val); - } else { - invalid_monomorphization(ty); - } - return Ok(()); - } - - "store" => { - let ty = fn_args.type_at(0); - if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() { - let size = bx.layout_of(ty).size; - let val = args[1].immediate(); - let ptr = args[0].immediate(); - bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size); - } else { - invalid_monomorphization(ty); - } - return Ok(()); - } - - "fence" => { - bx.atomic_fence( - parse_ordering(bx, ordering), - SynchronizationScope::CrossThread, - ); - return Ok(()); - } - - "singlethreadfence" => { - bx.atomic_fence( - parse_ordering(bx, ordering), - SynchronizationScope::SingleThread, - ); - return Ok(()); - } - - // These are all AtomicRMW ops - "max" | "min" => { - let atom_op = if instruction == "max" { - AtomicRmwBinOp::AtomicMax - } else { - AtomicRmwBinOp::AtomicMin - }; - - let ty = fn_args.type_at(0); - if matches!(ty.kind(), ty::Int(_)) { - let ptr = args[0].immediate(); - let val = args[1].immediate(); - bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering)) - } else { - invalid_monomorphization(ty); - return Ok(()); - } - } - "umax" | "umin" => { - let atom_op = if instruction == "umax" { - AtomicRmwBinOp::AtomicUMax - } else { - AtomicRmwBinOp::AtomicUMin - }; - - let ty = fn_args.type_at(0); - if matches!(ty.kind(), ty::Uint(_)) { - let ptr = args[0].immediate(); - let val = args[1].immediate(); - bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering)) - } else { - invalid_monomorphization(ty); - return Ok(()); - } - } - op => { - let atom_op = match op { - "xchg" => AtomicRmwBinOp::AtomicXchg, - "xadd" => AtomicRmwBinOp::AtomicAdd, - "xsub" => AtomicRmwBinOp::AtomicSub, - "and" => AtomicRmwBinOp::AtomicAnd, - "nand" => AtomicRmwBinOp::AtomicNand, - "or" => AtomicRmwBinOp::AtomicOr, - "xor" => AtomicRmwBinOp::AtomicXor, - _ => bx.sess().dcx().emit_fatal(errors::UnknownAtomicOperation), - }; - - let ty = fn_args.type_at(0); - if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() { - let ptr = args[0].immediate(); - let val = args[1].immediate(); - bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering)) - } else { - invalid_monomorphization(ty); - return Ok(()); - } - } + let ty = fn_args.type_at(0); + if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() { + let ordering = fn_args.const_at(1).to_value(); + let ptr = args[0].immediate(); + let val = args[1].immediate(); + bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering)) + } else { + invalid_monomorphization_int_type(ty); + return Ok(()); } } + sym::atomic_fence => { + let ordering = fn_args.const_at(0).to_value(); + bx.atomic_fence(parse_atomic_ordering(ordering), SynchronizationScope::CrossThread); + return Ok(()); + } + + sym::atomic_singlethreadfence => { + let ordering = fn_args.const_at(0).to_value(); + bx.atomic_fence( + parse_atomic_ordering(ordering), + SynchronizationScope::SingleThread, + ); + return Ok(()); + } sym::nontemporal_store => { let dst = args[0].deref(bx.cx()); @@ -556,7 +516,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } }; - ret_llval(bx, llval) + if result.layout.ty.is_bool() { + let val = bx.from_immediate(llval); + bx.store_to_place(val, result.val); + } else if !result.layout.ty.is_unit() { + bx.store_to_place(llval, result.val); + } + Ok(()) } } diff --git a/compiler/rustc_error_codes/src/error_codes/E0092.md b/compiler/rustc_error_codes/src/error_codes/E0092.md index be459d040c2..9c63798ded7 100644 --- a/compiler/rustc_error_codes/src/error_codes/E0092.md +++ b/compiler/rustc_error_codes/src/error_codes/E0092.md @@ -1,8 +1,10 @@ +#### Note: this error code is no longer emitted by the compiler. + An undefined atomic operation function was declared. Erroneous code example: -```compile_fail,E0092 +```ignore (no longer emitted) #![feature(intrinsics)] #![allow(internal_features)] @@ -12,13 +14,4 @@ unsafe fn atomic_foo(); // error: unrecognized atomic operation ``` Please check you didn't make a mistake in the function's name. All intrinsic -functions are defined in `compiler/rustc_codegen_llvm/src/intrinsic.rs` and in -`library/core/src/intrinsics.rs` in the Rust source code. Example: - -``` -#![feature(intrinsics)] -#![allow(internal_features)] - -#[rustc_intrinsic] -unsafe fn atomic_fence_seqcst(); // ok! -``` +functions are defined in `library/core/src/intrinsics` in the Rust source code. diff --git a/compiler/rustc_error_codes/src/error_codes/E0093.md b/compiler/rustc_error_codes/src/error_codes/E0093.md index 9929a069927..3552c2db4cc 100644 --- a/compiler/rustc_error_codes/src/error_codes/E0093.md +++ b/compiler/rustc_error_codes/src/error_codes/E0093.md @@ -17,19 +17,4 @@ fn main() { ``` Please check you didn't make a mistake in the function's name. All intrinsic -functions are defined in `compiler/rustc_codegen_llvm/src/intrinsic.rs` and in -`library/core/src/intrinsics.rs` in the Rust source code. Example: - -``` -#![feature(intrinsics)] -#![allow(internal_features)] - -#[rustc_intrinsic] -unsafe fn atomic_fence_seqcst(); // ok! - -fn main() { - unsafe { - atomic_fence_seqcst(); - } -} -``` +functions are defined in `library/core/src/intrinsics` in the Rust source code. diff --git a/compiler/rustc_error_codes/src/error_codes/E0622.md b/compiler/rustc_error_codes/src/error_codes/E0622.md index 9b8131a061e..cc66e067990 100644 --- a/compiler/rustc_error_codes/src/error_codes/E0622.md +++ b/compiler/rustc_error_codes/src/error_codes/E0622.md @@ -4,7 +4,7 @@ An intrinsic was declared without being a function. Erroneous code example: -```no_run +```ignore (no longer emitted) #![feature(intrinsics)] #![allow(internal_features)] @@ -21,7 +21,7 @@ An intrinsic is a function available for use in a given programming language whose implementation is handled specially by the compiler. In order to fix this error, just declare a function. Example: -```no_run +```ignore (no longer emitted) #![feature(intrinsics)] #![allow(internal_features)] diff --git a/compiler/rustc_hir_analysis/messages.ftl b/compiler/rustc_hir_analysis/messages.ftl index a3a0e276f74..4fcd9f8a646 100644 --- a/compiler/rustc_hir_analysis/messages.ftl +++ b/compiler/rustc_hir_analysis/messages.ftl @@ -565,10 +565,6 @@ hir_analysis_unconstrained_generic_parameter = the {$param_def_kind} `{$param_na hir_analysis_unconstrained_opaque_type = unconstrained opaque type .note = `{$name}` must be used in combination with a concrete type within the same {$what} -hir_analysis_unrecognized_atomic_operation = - unrecognized atomic operation function: `{$op}` - .label = unrecognized atomic operation - hir_analysis_unrecognized_intrinsic_function = unrecognized intrinsic function: `{$name}` .label = unrecognized intrinsic diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs index 234520c1583..d8080d2537b 100644 --- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs +++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs @@ -9,10 +9,7 @@ use rustc_span::def_id::LocalDefId; use rustc_span::{Span, Symbol, sym}; use crate::check::check_function_signature; -use crate::errors::{ - UnrecognizedAtomicOperation, UnrecognizedIntrinsicFunction, - WrongNumberOfGenericArgumentsToIntrinsic, -}; +use crate::errors::{UnrecognizedIntrinsicFunction, WrongNumberOfGenericArgumentsToIntrinsic}; fn equate_intrinsic_type<'tcx>( tcx: TyCtxt<'tcx>, @@ -172,7 +169,6 @@ pub(crate) fn check_intrinsic_type( Ty::new_error_with_message(tcx, span, "expected param") } }; - let name_str = intrinsic_name.as_str(); let bound_vars = tcx.mk_bound_variable_kinds(&[ ty::BoundVariableKind::Region(ty::BoundRegionKind::Anon), @@ -198,32 +194,9 @@ pub(crate) fn check_intrinsic_type( (Ty::new_ref(tcx, env_region, va_list_ty, mutbl), va_list_ty) }; - let (n_tps, n_lts, n_cts, inputs, output, safety) = if name_str.starts_with("atomic_") { - let split: Vec<&str> = name_str.split('_').collect(); - assert!(split.len() >= 2, "Atomic intrinsic in an incorrect format"); - - // Each atomic op has variants with different suffixes (`_seq_cst`, `_acquire`, etc.). Use - // string ops to strip the suffixes, because the variants all get the same treatment here. - let (n_tps, n_cts, inputs, output) = match split[1] { - "cxchg" | "cxchgweak" => ( - 1, - 0, - vec![Ty::new_mut_ptr(tcx, param(0)), param(0), param(0)], - Ty::new_tup(tcx, &[param(0), tcx.types.bool]), - ), - "load" => (1, 1, vec![Ty::new_imm_ptr(tcx, param(0))], param(0)), - "store" => (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit), - - "xchg" | "xadd" | "xsub" | "and" | "nand" | "or" | "xor" | "max" | "min" | "umax" - | "umin" => (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], param(0)), - "fence" | "singlethreadfence" => (0, 0, Vec::new(), tcx.types.unit), - op => { - tcx.dcx().emit_err(UnrecognizedAtomicOperation { span, op }); - return; - } - }; - (n_tps, 0, n_cts, inputs, output, hir::Safety::Unsafe) - } else if intrinsic_name == sym::contract_check_ensures { + let (n_tps, n_lts, n_cts, inputs, output, safety) = if intrinsic_name + == sym::contract_check_ensures + { // contract_check_ensures::<Ret, C>(Ret, C) -> Ret // where C: for<'a> Fn(&'a Ret) -> bool, // @@ -694,6 +667,30 @@ pub(crate) fn check_intrinsic_type( sym::simd_shuffle => (3, 0, vec![param(0), param(0), param(1)], param(2)), sym::simd_shuffle_const_generic => (2, 1, vec![param(0), param(0)], param(1)), + sym::atomic_cxchg | sym::atomic_cxchgweak => ( + 1, + 2, + vec![Ty::new_mut_ptr(tcx, param(0)), param(0), param(0)], + Ty::new_tup(tcx, &[param(0), tcx.types.bool]), + ), + sym::atomic_load => (1, 1, vec![Ty::new_imm_ptr(tcx, param(0))], param(0)), + sym::atomic_store => { + (1, 1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit) + } + + sym::atomic_xchg + | sym::atomic_xadd + | sym::atomic_xsub + | sym::atomic_and + | sym::atomic_nand + | sym::atomic_or + | sym::atomic_xor + | sym::atomic_max + | sym::atomic_min + | sym::atomic_umax + | sym::atomic_umin => (1, 1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], param(0)), + sym::atomic_fence | sym::atomic_singlethreadfence => (0, 1, Vec::new(), tcx.types.unit), + other => { tcx.dcx().emit_err(UnrecognizedIntrinsicFunction { span, name: other }); return; diff --git a/compiler/rustc_hir_analysis/src/errors.rs b/compiler/rustc_hir_analysis/src/errors.rs index 152714b3407..a27d1ed6c53 100644 --- a/compiler/rustc_hir_analysis/src/errors.rs +++ b/compiler/rustc_hir_analysis/src/errors.rs @@ -162,15 +162,6 @@ pub(crate) enum AssocItemNotFoundSugg<'a> { } #[derive(Diagnostic)] -#[diag(hir_analysis_unrecognized_atomic_operation, code = E0092)] -pub(crate) struct UnrecognizedAtomicOperation<'a> { - #[primary_span] - #[label] - pub span: Span, - pub op: &'a str, -} - -#[derive(Diagnostic)] #[diag(hir_analysis_wrong_number_of_generic_arguments_to_intrinsic, code = E0094)] pub(crate) struct WrongNumberOfGenericArgumentsToIntrinsic<'a> { #[primary_span] diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index 4e842a8f93a..d66f98871b9 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -515,8 +515,24 @@ symbols! { async_iterator_poll_next, async_trait_bounds, atomic, + atomic_and, + atomic_cxchg, + atomic_cxchgweak, + atomic_fence, atomic_load, + atomic_max, + atomic_min, atomic_mod, + atomic_nand, + atomic_or, + atomic_singlethreadfence, + atomic_store, + atomic_umax, + atomic_umin, + atomic_xadd, + atomic_xchg, + atomic_xor, + atomic_xsub, atomics, att_syntax, attr, |
