diff options
Diffstat (limited to 'compiler/rustc_codegen_gcc/src')
23 files changed, 1163 insertions, 712 deletions
diff --git a/compiler/rustc_codegen_gcc/src/abi.rs b/compiler/rustc_codegen_gcc/src/abi.rs index ed78d4ef19f..14fc23593f0 100644 --- a/compiler/rustc_codegen_gcc/src/abi.rs +++ b/compiler/rustc_codegen_gcc/src/abi.rs @@ -4,8 +4,8 @@ use gccjit::{ToLValue, ToRValue, Type}; use rustc_codegen_ssa::traits::{AbiBuilderMethods, BaseTypeCodegenMethods}; use rustc_data_structures::fx::FxHashSet; use rustc_middle::bug; -use rustc_middle::ty::layout::LayoutOf; use rustc_middle::ty::Ty; +use rustc_middle::ty::layout::LayoutOf; #[cfg(feature = "master")] use rustc_session::config; use rustc_target::abi::call::{ArgAttributes, CastTarget, FnAbi, PassMode, Reg, RegKind}; diff --git a/compiler/rustc_codegen_gcc/src/allocator.rs b/compiler/rustc_codegen_gcc/src/allocator.rs index deeb55e9d12..f13a75648ae 100644 --- a/compiler/rustc_codegen_gcc/src/allocator.rs +++ b/compiler/rustc_codegen_gcc/src/allocator.rs @@ -2,8 +2,8 @@ use gccjit::FnAttribute; use gccjit::{Context, FunctionType, GlobalKind, ToRValue, Type}; use rustc_ast::expand::allocator::{ - alloc_error_handler_name, default_fn_name, global_fn_name, AllocatorKind, AllocatorTy, - ALLOCATOR_METHODS, NO_ALLOC_SHIM_IS_UNSTABLE, + ALLOCATOR_METHODS, AllocatorKind, AllocatorTy, NO_ALLOC_SHIM_IS_UNSTABLE, + alloc_error_handler_name, default_fn_name, global_fn_name, }; use rustc_middle::bug; use rustc_middle::ty::TyCtxt; @@ -104,10 +104,17 @@ fn create_wrapper_function( false, ); - if tcx.sess.default_hidden_visibility() { - #[cfg(feature = "master")] - func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); + #[cfg(feature = "master")] + match tcx.sess.default_visibility() { + rustc_target::spec::SymbolVisibility::Hidden => { + func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)) + } + rustc_target::spec::SymbolVisibility::Protected => { + func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Protected)) + } + rustc_target::spec::SymbolVisibility::Interposable => {} } + if tcx.sess.must_emit_unwind_tables() { // TODO(antoyo): emit unwind tables. } diff --git a/compiler/rustc_codegen_gcc/src/asm.rs b/compiler/rustc_codegen_gcc/src/asm.rs index 13a00f7e08d..6b067b35e71 100644 --- a/compiler/rustc_codegen_gcc/src/asm.rs +++ b/compiler/rustc_codegen_gcc/src/asm.rs @@ -654,7 +654,8 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b", InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f", InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr) - | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => { + | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) + | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::vreg) => { unreachable!("clobber-only") } InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r", @@ -682,6 +683,13 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r", InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg_addr) => "a", InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f", + InlineAsmRegClass::S390x( + S390xInlineAsmRegClass::vreg | S390xInlineAsmRegClass::areg, + ) => { + unreachable!("clobber-only") + } + InlineAsmRegClass::Sparc(SparcInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::Sparc(SparcInlineAsmRegClass::yreg) => unreachable!("clobber-only"), InlineAsmRegClass::Err => unreachable!(), }, }; @@ -724,7 +732,8 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(), InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(), InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr) - | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => { + | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) + | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::vreg) => { unreachable!("clobber-only") } InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(), @@ -757,6 +766,11 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl S390xInlineAsmRegClass::reg | S390xInlineAsmRegClass::reg_addr, ) => cx.type_i32(), InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(), + InlineAsmRegClass::S390x(S390xInlineAsmRegClass::vreg | S390xInlineAsmRegClass::areg) => { + unreachable!("clobber-only") + } + InlineAsmRegClass::Sparc(SparcInlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::Sparc(SparcInlineAsmRegClass::yreg) => unreachable!("clobber-only"), InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => cx.type_i16(), InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(), InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(), @@ -936,6 +950,7 @@ fn modifier_to_gcc( }, InlineAsmRegClass::Avr(_) => None, InlineAsmRegClass::S390x(_) => None, + InlineAsmRegClass::Sparc(_) => None, InlineAsmRegClass::Msp430(_) => None, InlineAsmRegClass::M68k(_) => None, InlineAsmRegClass::CSKY(_) => None, diff --git a/compiler/rustc_codegen_gcc/src/attributes.rs b/compiler/rustc_codegen_gcc/src/attributes.rs index 5fdf2680aac..d20e13e15b9 100644 --- a/compiler/rustc_codegen_gcc/src/attributes.rs +++ b/compiler/rustc_codegen_gcc/src/attributes.rs @@ -7,11 +7,9 @@ use rustc_attr::InstructionSetAttr; #[cfg(feature = "master")] use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::ty; -use rustc_span::symbol::sym; use crate::context::CodegenCx; -use crate::errors::TiedTargetFeatures; -use crate::gcc_util::{check_tied_features, to_gcc_features}; +use crate::gcc_util::to_gcc_features; /// Get GCC attribute for the provided inline heuristic. #[cfg(feature = "master")] @@ -72,26 +70,10 @@ pub fn from_fn_attrs<'gcc, 'tcx>( } } - let function_features = codegen_fn_attrs + let mut function_features = codegen_fn_attrs .target_features .iter() .map(|features| features.name.as_str()) - .collect::<Vec<&str>>(); - - if let Some(features) = check_tied_features( - cx.tcx.sess, - &function_features.iter().map(|features| (*features, true)).collect(), - ) { - let span = cx - .tcx - .get_attr(instance.def_id(), sym::target_feature) - .map_or_else(|| cx.tcx.def_span(instance.def_id()), |a| a.span); - cx.tcx.dcx().create_err(TiedTargetFeatures { features: features.join(", "), span }).emit(); - return; - } - - let mut function_features = function_features - .iter() .flat_map(|feat| to_gcc_features(cx.tcx.sess, feat).into_iter()) .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match *x { InstructionSetAttr::ArmA32 => "-thumb-mode", // TODO(antoyo): support removing feature. diff --git a/compiler/rustc_codegen_gcc/src/back/lto.rs b/compiler/rustc_codegen_gcc/src/back/lto.rs index c2adab7137f..ed92f9c5241 100644 --- a/compiler/rustc_codegen_gcc/src/back/lto.rs +++ b/compiler/rustc_codegen_gcc/src/back/lto.rs @@ -27,7 +27,7 @@ use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModul use rustc_codegen_ssa::back::symbol_export; use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput}; use rustc_codegen_ssa::traits::*; -use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind}; +use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file}; use rustc_data_structures::memmap::Mmap; use rustc_errors::{DiagCtxtHandle, FatalError}; use rustc_hir::def_id::LOCAL_CRATE; @@ -35,11 +35,11 @@ use rustc_middle::bug; use rustc_middle::dep_graph::WorkProduct; use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel}; use rustc_session::config::{CrateType, Lto}; -use tempfile::{tempdir, TempDir}; +use tempfile::{TempDir, tempdir}; use crate::back::write::save_temp_bitcode; use crate::errors::{DynamicLinkingWithLTO, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib}; -use crate::{to_gcc_opt_level, GccCodegenBackend, GccContext, SyncContext}; +use crate::{GccCodegenBackend, GccContext, SyncContext, to_gcc_opt_level}; /// We keep track of the computed LTO cache keys from the previous /// session to determine which CGUs we can reuse. diff --git a/compiler/rustc_codegen_gcc/src/base.rs b/compiler/rustc_codegen_gcc/src/base.rs index c0443faf24a..18aa32754e1 100644 --- a/compiler/rustc_codegen_gcc/src/base.rs +++ b/compiler/rustc_codegen_gcc/src/base.rs @@ -19,7 +19,7 @@ use rustc_target::spec::PanicStrategy; use crate::builder::Builder; use crate::context::CodegenCx; -use crate::{gcc_util, new_context, GccContext, LockedTargetInfo, SyncContext}; +use crate::{GccContext, LockedTargetInfo, SyncContext, gcc_util, new_context}; #[cfg(feature = "master")] pub fn visibility_to_gcc(linkage: Visibility) -> gccjit::Visibility { @@ -128,8 +128,19 @@ pub fn compile_codegen_unit( // NOTE: Rust relies on LLVM doing wrapping on overflow. context.add_command_line_option("-fwrapv"); + if let Some(model) = tcx.sess.code_model() { + use rustc_target::spec::CodeModel; + + context.add_command_line_option(match model { + CodeModel::Tiny => "-mcmodel=tiny", + CodeModel::Small => "-mcmodel=small", + CodeModel::Kernel => "-mcmodel=kernel", + CodeModel::Medium => "-mcmodel=medium", + CodeModel::Large => "-mcmodel=large", + }); + } + if tcx.sess.relocation_model() == rustc_target::spec::RelocModel::Static { - context.add_command_line_option("-mcmodel=kernel"); context.add_command_line_option("-fno-pie"); } diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index 9282d8699eb..e6ae7cf174d 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -7,7 +7,10 @@ use gccjit::{ BinaryOp, Block, ComparisonOp, Context, Function, LValue, Location, RValue, ToRValue, Type, UnaryOp, }; -use rustc_apfloat::{ieee, Float, Round, Status}; +use rustc_abi as abi; +use rustc_abi::{Align, HasDataLayout, Size, TargetDataLayout, WrappingRange}; +use rustc_apfloat::{Float, Round, Status, ieee}; +use rustc_codegen_ssa::MemFlags; use rustc_codegen_ssa::common::{ AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind, }; @@ -17,7 +20,6 @@ use rustc_codegen_ssa::traits::{ BackendTypes, BaseTypeCodegenMethods, BuilderMethods, ConstCodegenMethods, LayoutTypeCodegenMethods, OverflowOp, StaticBuilderMethods, }; -use rustc_codegen_ssa::MemFlags; use rustc_data_structures::fx::FxHashSet; use rustc_middle::bug; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; @@ -25,13 +27,12 @@ use rustc_middle::ty::layout::{ FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, }; use rustc_middle::ty::{Instance, ParamEnv, Ty, TyCtxt}; -use rustc_span::def_id::DefId; use rustc_span::Span; +use rustc_span::def_id::DefId; use rustc_target::abi::call::FnAbi; -use rustc_target::abi::{self, Align, HasDataLayout, Size, TargetDataLayout, WrappingRange}; -use rustc_target::spec::{HasTargetSpec, HasWasmCAbiOpt, Target, WasmCAbi}; +use rustc_target::spec::{HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, Target, WasmCAbi, X86Abi}; -use crate::common::{type_is_pointer, SignType, TypeReflection}; +use crate::common::{SignType, TypeReflection, type_is_pointer}; use crate::context::CodegenCx; use crate::intrinsic::llvm; use crate::type_of::LayoutGccExt; @@ -39,9 +40,6 @@ use crate::type_of::LayoutGccExt; // TODO(antoyo) type Funclet = (); -// TODO(antoyo): remove this variable. -static mut RETURN_VALUE_COUNT: usize = 0; - enum ExtremumOperation { Max, Min, @@ -50,13 +48,18 @@ enum ExtremumOperation { pub struct Builder<'a: 'gcc, 'gcc, 'tcx> { pub cx: &'a CodegenCx<'gcc, 'tcx>, pub block: Block<'gcc>, - stack_var_count: Cell<usize>, pub location: Option<Location<'gcc>>, + value_counter: Cell<u64>, } impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self { - Builder { cx, block, stack_var_count: Cell::new(0), location: None } + Builder { cx, block, location: None, value_counter: Cell::new(0) } + } + + fn next_value_counter(&self) -> u64 { + self.value_counter.set(self.value_counter.get() + 1); + self.value_counter.get() } fn atomic_extremum( @@ -138,7 +141,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { ) -> RValue<'gcc> { let size = get_maybe_pointer_size(src); let compare_exchange = - self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size)); + self.context.get_builtin_function(format!("__atomic_compare_exchange_{}", size)); let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc()); let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32); @@ -152,11 +155,14 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { // NOTE: not sure why, but we have the wrong type here. let int_type = compare_exchange.get_param(2).to_rvalue().get_type(); let src = self.context.new_bitcast(self.location, src, int_type); - self.context.new_call( - self.location, - compare_exchange, - &[dst, expected, src, weak, order, failure_order], - ) + self.context.new_call(self.location, compare_exchange, &[ + dst, + expected, + src, + weak, + order, + failure_order, + ]) } pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) { @@ -267,10 +273,12 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { actual_val.dereference(self.location).to_rvalue() } } else { + // FIXME: this condition seems wrong: it will pass when both types are not + // a vector. assert!( (!expected_ty.is_vector() || actual_ty.is_vector()) && (expected_ty.is_vector() || !actual_ty.is_vector()), - "{:?} ({}) -> {:?} ({}), index: {:?}[{}]", + "{:?} (is vector: {}) -> {:?} (is vector: {}), Function: {:?}[{}]", actual_ty, actual_ty.is_vector(), expected_ty, @@ -280,6 +288,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { ); // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. // TODO: remove bitcast now that vector types can be compared? + // ==> We use bitcast to avoid having to do many manual casts from e.g. __m256i to __v32qi (in + // the case of _mm256_aesenc_epi128). self.bitcast(actual_val, expected_ty) } } else { @@ -322,11 +332,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let void_type = self.context.new_type::<()>(); let current_func = self.block.get_function(); if return_type != void_type { - unsafe { RETURN_VALUE_COUNT += 1 }; let result = current_func.new_local( self.location, return_type, - &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }), + format!("returnValue{}", self.next_value_counter()), ); self.block.add_assignment( self.location, @@ -338,7 +347,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { self.block .add_eval(self.location, self.cx.context.new_call(self.location, func, &args)); // Return dummy value when not having return value. - self.context.new_rvalue_from_long(self.isize_type, 0) + self.context.new_rvalue_zero(self.isize_type) } } @@ -364,6 +373,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let args = { let function_address_names = self.function_address_names.borrow(); let original_function_name = function_address_names.get(&func_ptr); + func_ptr = llvm::adjust_function(self.context, &func_name, func_ptr, args); llvm::adjust_intrinsic_arguments( self, gcc_func, @@ -382,7 +392,6 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let current_func = self.block.get_function(); if return_type != void_type { - unsafe { RETURN_VALUE_COUNT += 1 }; let return_value = self.cx.context.new_call_through_ptr(self.location, func_ptr, &args); let return_value = llvm::adjust_intrinsic_return_value( self, @@ -395,7 +404,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let result = current_func.new_local( self.location, return_value.get_type(), - &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT }), + format!("ptrReturnValue{}", self.next_value_counter()), ); self.block.add_assignment(self.location, result, return_value); result.to_rvalue() @@ -419,17 +428,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { self.cx.context.new_call_through_ptr(self.location, func_ptr, &args), ); // Return dummy value when not having return value. - let result = current_func.new_local( - self.location, - self.isize_type, - "dummyValueThatShouldNeverBeUsed", - ); - self.block.add_assignment( - self.location, - result, - self.context.new_rvalue_from_long(self.isize_type, 0), - ); - result.to_rvalue() + self.context.new_rvalue_zero(self.isize_type) } } @@ -444,11 +443,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let return_type = self.context.new_type::<bool>(); let current_func = self.block.get_function(); // TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects. - unsafe { RETURN_VALUE_COUNT += 1 }; let result = current_func.new_local( self.location, return_type, - &format!("overflowReturnValue{}", unsafe { RETURN_VALUE_COUNT }), + format!("overflowReturnValue{}", self.next_value_counter()), ); self.block.add_assignment( self.location, @@ -500,6 +498,7 @@ impl<'a, 'gcc, 'tcx> Deref for Builder<'a, 'gcc, 'tcx> { impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> { type Value = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Value; + type Metadata = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Metadata; type Function = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Function; type BasicBlock = <CodegenCx<'gcc, 'tcx> as BackendTypes>::BasicBlock; type Type = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Type; @@ -926,9 +925,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { fn alloca(&mut self, size: Size, align: Align) -> RValue<'gcc> { let ty = self.cx.type_array(self.cx.type_i8(), size.bytes()).get_aligned(align.bytes()); // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial. - self.stack_var_count.set(self.stack_var_count.get() + 1); self.current_func() - .new_local(self.location, ty, &format!("stack_var_{}", self.stack_var_count.get())) + .new_local(self.location, ty, format!("stack_var_{}", self.next_value_counter())) .get_address(self.location) } @@ -951,11 +949,10 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { }; let ptr = self.context.new_cast(self.location, ptr, aligned_type.make_pointer()); let deref = ptr.dereference(self.location).to_rvalue(); - unsafe { RETURN_VALUE_COUNT += 1 }; let loaded_value = function.new_local( self.location, aligned_type, - &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }), + format!("loadedValue{}", self.next_value_counter()), ); block.add_assignment(self.location, loaded_value, deref); loaded_value.to_rvalue() @@ -976,7 +973,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { // TODO(antoyo): use ty. // TODO(antoyo): handle alignment. let atomic_load = - self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes())); + self.context.get_builtin_function(format!("__atomic_load_{}", size.bytes())); let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); let volatile_const_void_ptr_type = @@ -1002,12 +999,12 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { ) { let vr = scalar.valid_range(bx); match scalar.primitive() { - abi::Int(..) => { + abi::Primitive::Int(..) => { if !scalar.is_always_valid(bx) { bx.range_metadata(load, vr); } } - abi::Pointer(_) if vr.start < vr.end && !vr.contains(0) => { + abi::Primitive::Pointer(_) if vr.start < vr.end && !vr.contains(0) => { bx.nonnull_metadata(load); } _ => {} @@ -1019,11 +1016,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { OperandValue::Ref(place.val) } else if place.layout.is_gcc_immediate() { let load = self.load(place.layout.gcc_type(self), place.val.llval, place.val.align); - if let abi::Abi::Scalar(ref scalar) = place.layout.abi { + if let abi::BackendRepr::Scalar(ref scalar) = place.layout.backend_repr { scalar_load_metadata(self, load, scalar); } OperandValue::Immediate(self.to_immediate(load, place.layout)) - } else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi { + } else if let abi::BackendRepr::ScalarPair(ref a, ref b) = place.layout.backend_repr { let b_offset = a.size(self).align_to(b.align(self).abi); let mut load = |i, scalar: &abi::Scalar, align| { @@ -1079,11 +1076,9 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let align = dest.val.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size); cg_elem.val.store(self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align)); - let next = self.inbounds_gep( - self.backend_type(cg_elem.layout), - current.to_rvalue(), - &[self.const_usize(1)], - ); + let next = self.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[ + self.const_usize(1), + ]); self.llbb().add_assignment(self.location, current, next); self.br(header_bb); @@ -1134,7 +1129,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { ) { // TODO(antoyo): handle alignment. let atomic_store = - self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes())); + self.context.get_builtin_function(format!("__atomic_store_{}", size.bytes())); let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); let volatile_const_void_ptr_type = self.context.new_type::<()>().make_volatile().make_pointer(); @@ -1730,16 +1725,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { fn fptosi_sat(&mut self, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { self.fptoint_sat(true, val, dest_ty) } - - fn instrprof_increment( - &mut self, - _fn_name: RValue<'gcc>, - _hash: RValue<'gcc>, - _num_counters: RValue<'gcc>, - _index: RValue<'gcc>, - ) { - unimplemented!(); - } } impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { @@ -2352,6 +2337,12 @@ impl<'tcx> HasWasmCAbiOpt for Builder<'_, '_, 'tcx> { } } +impl<'tcx> HasX86AbiOpt for Builder<'_, '_, 'tcx> { + fn x86_abi_opt(&self) -> X86Abi { + self.cx.x86_abi_opt() + } +} + pub trait ToGccComp { fn to_gcc_comparison(&self) -> ComparisonOp; } diff --git a/compiler/rustc_codegen_gcc/src/callee.rs b/compiler/rustc_codegen_gcc/src/callee.rs index 9ad2e90122f..65972a03e83 100644 --- a/compiler/rustc_codegen_gcc/src/callee.rs +++ b/compiler/rustc_codegen_gcc/src/callee.rs @@ -98,8 +98,7 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) // whether we are sharing generics or not. The important thing here is // that the visibility we apply to the declaration is the same one that // has been applied to the definition (wherever that definition may be). - let is_generic = - instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some(); + let is_generic = instance.args.non_erasable_generics().next().is_some(); if is_generic { // This is a monomorphization. Its expected visibility depends diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs index 8e2220a2a9b..0d3e7083d56 100644 --- a/compiler/rustc_codegen_gcc/src/common.rs +++ b/compiler/rustc_codegen_gcc/src/common.rs @@ -1,11 +1,13 @@ use gccjit::{LValue, RValue, ToRValue, Type}; +use rustc_abi as abi; +use rustc_abi::HasDataLayout; +use rustc_abi::Primitive::Pointer; use rustc_codegen_ssa::traits::{ BaseTypeCodegenMethods, ConstCodegenMethods, MiscCodegenMethods, StaticCodegenMethods, }; -use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar}; use rustc_middle::mir::Mutability; +use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar}; use rustc_middle::ty::layout::LayoutOf; -use rustc_target::abi::{self, HasDataLayout, Pointer}; use crate::consts::const_alloc_to_gcc; use crate::context::CodegenCx; @@ -80,22 +82,14 @@ impl<'gcc, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { self.const_undef(typ) } - fn const_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> { - self.gcc_int(typ, int) - } - - fn const_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> { - self.gcc_uint(typ, int) - } - - fn const_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> { - self.gcc_uint_big(typ, num) - } - fn const_bool(&self, val: bool) -> RValue<'gcc> { self.const_uint(self.type_i1(), val as u64) } + fn const_i8(&self, i: i8) -> RValue<'gcc> { + self.const_int(self.type_i8(), i as i64) + } + fn const_i16(&self, i: i16) -> RValue<'gcc> { self.const_int(self.type_i16(), i as i64) } @@ -104,8 +98,12 @@ impl<'gcc, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { self.const_int(self.type_i32(), i as i64) } - fn const_i8(&self, i: i8) -> RValue<'gcc> { - self.const_int(self.type_i8(), i as i64) + fn const_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> { + self.gcc_int(typ, int) + } + + fn const_u8(&self, i: u8) -> RValue<'gcc> { + self.const_uint(self.type_u8(), i as u64) } fn const_u32(&self, i: u32) -> RValue<'gcc> { @@ -130,8 +128,12 @@ impl<'gcc, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { self.const_uint(self.usize_type, i) } - fn const_u8(&self, i: u8) -> RValue<'gcc> { - self.const_uint(self.type_u8(), i as u64) + fn const_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> { + self.gcc_uint(typ, int) + } + + fn const_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> { + self.gcc_uint_big(typ, num) } fn const_real(&self, typ: Type<'gcc>, val: f64) -> RValue<'gcc> { @@ -224,10 +226,10 @@ impl<'gcc, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { value } GlobalAlloc::Function { instance, .. } => self.get_fn_addr(instance), - GlobalAlloc::VTable(ty, trait_ref) => { + GlobalAlloc::VTable(ty, dyn_ty) => { let alloc = self .tcx - .global_alloc(self.tcx.vtable_allocation((ty, trait_ref))) + .global_alloc(self.tcx.vtable_allocation((ty, dyn_ty.principal()))) .unwrap_memory(); let init = const_alloc_to_gcc(self, alloc); self.static_addr_of(init, alloc.inner().align, None) diff --git a/compiler/rustc_codegen_gcc/src/consts.rs b/compiler/rustc_codegen_gcc/src/consts.rs index 68b9df946d0..07c7a54de1c 100644 --- a/compiler/rustc_codegen_gcc/src/consts.rs +++ b/compiler/rustc_codegen_gcc/src/consts.rs @@ -7,8 +7,9 @@ use rustc_codegen_ssa::traits::{ use rustc_hir::def::DefKind; use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; use rustc_middle::mir::interpret::{ - self, read_target_uint, ConstAllocation, ErrorHandled, Scalar as InterpScalar, + self, ConstAllocation, ErrorHandled, Scalar as InterpScalar, read_target_uint, }; +use rustc_middle::mir::mono::Linkage; use rustc_middle::ty::layout::LayoutOf; use rustc_middle::ty::{self, Instance}; use rustc_middle::{bug, span_bug}; @@ -145,7 +146,7 @@ impl<'gcc, 'tcx> StaticCodegenMethods for CodegenCx<'gcc, 'tcx> { // Wasm statics with custom link sections get special treatment as they // go into custom sections of the wasm executable. - if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") { + if self.tcx.sess.target.is_like_wasm { if let Some(_section) = attrs.link_section { unimplemented!(); } @@ -258,7 +259,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { if !self.tcx.is_reachable_non_generic(def_id) { #[cfg(feature = "master")] - global.add_string_attribute(VarAttribute::Visibility(Visibility::Hidden)); + global.add_attribute(VarAttribute::Visibility(Visibility::Hidden)); } global @@ -386,6 +387,11 @@ fn check_and_apply_linkage<'gcc, 'tcx>( let global1 = cx.declare_global_with_linkage(sym, cx.type_i8(), base::global_linkage_to_gcc(linkage)); + if linkage == Linkage::ExternalWeak { + #[cfg(feature = "master")] + global1.add_attribute(VarAttribute::Weak); + } + // Declare an internal global `extern_with_linkage_foo` which // is initialized with the address of `foo`. If `foo` is // discarded during linking (for example, if `foo` has weak diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs index 8e1a5b61285..707b35967a6 100644 --- a/compiler/rustc_codegen_gcc/src/context.rs +++ b/compiler/rustc_codegen_gcc/src/context.rs @@ -6,7 +6,7 @@ use gccjit::{ use rustc_codegen_ssa::base::wants_msvc_seh; use rustc_codegen_ssa::errors as ssa_errors; use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeCodegenMethods, MiscCodegenMethods}; -use rustc_data_structures::base_n::{ToBaseN, ALPHANUMERIC_ONLY}; +use rustc_data_structures::base_n::{ALPHANUMERIC_ONLY, ToBaseN}; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_middle::mir::mono::CodegenUnit; use rustc_middle::span_bug; @@ -17,13 +17,16 @@ use rustc_middle::ty::layout::{ use rustc_middle::ty::{self, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt}; use rustc_session::Session; use rustc_span::source_map::respan; -use rustc_span::{Span, DUMMY_SP}; +use rustc_span::{DUMMY_SP, Span}; use rustc_target::abi::{HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx}; -use rustc_target::spec::{HasTargetSpec, HasWasmCAbiOpt, Target, TlsModel, WasmCAbi}; +use rustc_target::spec::{ + HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, Target, TlsModel, WasmCAbi, X86Abi, +}; use crate::callee::get_fn; use crate::common::SignType; +#[cfg_attr(not(feature = "master"), allow(dead_code))] pub struct CodegenCx<'gcc, 'tcx> { pub codegen_unit: &'tcx CodegenUnit<'tcx>, pub context: &'gcc Context<'gcc>, @@ -226,48 +229,14 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { "__builtin_umul_overflow", "__builtin_usubll_overflow", "__builtin_usub_overflow", - "sqrtf", - "sqrt", "__builtin_powif", "__builtin_powi", - "sinf", - "sin", - "cosf", - "cos", - "powf", - "pow", - "expf", - "exp", - "exp2f", - "exp2", - "logf", - "log", - "log10f", - "log10", - "log2f", - "log2", - "fmaf", - "fma", "fabsf", "fabs", - "fminf", - "fmin", - "fmaxf", - "fmax", "copysignf", "copysign", - "floorf", - "floor", - "ceilf", - "ceil", - "truncf", - "trunc", - "rintf", - "rint", "nearbyintf", "nearbyint", - "roundf", - "round", ]; for builtin in builtins.iter() { @@ -414,6 +383,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { impl<'gcc, 'tcx> BackendTypes for CodegenCx<'gcc, 'tcx> { type Value = RValue<'gcc>; + type Metadata = RValue<'gcc>; type Function = RValue<'gcc>; type BasicBlock = Block<'gcc>; @@ -570,6 +540,12 @@ impl<'gcc, 'tcx> HasWasmCAbiOpt for CodegenCx<'gcc, 'tcx> { } } +impl<'gcc, 'tcx> HasX86AbiOpt for CodegenCx<'gcc, 'tcx> { + fn x86_abi_opt(&self) -> X86Abi { + X86Abi { regparm: self.tcx.sess.opts.unstable_opts.regparm } + } +} + impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> { #[inline] fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! { diff --git a/compiler/rustc_codegen_gcc/src/debuginfo.rs b/compiler/rustc_codegen_gcc/src/debuginfo.rs index 1d859656b3e..9d62ccc95d5 100644 --- a/compiler/rustc_codegen_gcc/src/debuginfo.rs +++ b/compiler/rustc_codegen_gcc/src/debuginfo.rs @@ -10,8 +10,8 @@ use rustc_middle::mir::{self, Body, SourceScope}; use rustc_middle::ty::{Instance, PolyExistentialTraitRef, Ty}; use rustc_session::config::DebugInfo; use rustc_span::{BytePos, Pos, SourceFile, SourceFileAndLine, Span, Symbol}; -use rustc_target::abi::call::FnAbi; use rustc_target::abi::Size; +use rustc_target::abi::call::FnAbi; use crate::builder::Builder; use crate::context::CodegenCx; @@ -55,7 +55,7 @@ impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> { } /// Generate the `debug_context` in an MIR Body. -/// # Souce of Origin +/// # Source of Origin /// Copied from `create_scope_map.rs` of rustc_codegen_llvm fn compute_mir_scopes<'gcc, 'tcx>( cx: &CodegenCx<'gcc, 'tcx>, @@ -90,7 +90,7 @@ fn compute_mir_scopes<'gcc, 'tcx>( /// Update the `debug_context`, adding new scope to it, /// if it's not added as is denoted in `instantiated`. /// -/// # Souce of Origin +/// # Source of Origin /// Copied from `create_scope_map.rs` of rustc_codegen_llvm /// FIXME(tempdragon/?): Add Scope Support Here. fn make_mir_scope<'gcc, 'tcx>( diff --git a/compiler/rustc_codegen_gcc/src/declare.rs b/compiler/rustc_codegen_gcc/src/declare.rs index 46818045f0b..442488b7fd6 100644 --- a/compiler/rustc_codegen_gcc/src/declare.rs +++ b/compiler/rustc_codegen_gcc/src/declare.rs @@ -168,7 +168,15 @@ fn declare_raw_fn<'gcc>( variadic: bool, ) -> Function<'gcc> { if name.starts_with("llvm.") { - let intrinsic = llvm::intrinsic(name, cx); + let intrinsic = match name { + "llvm.fma.f16" => { + // fma is not a target builtin, but a normal builtin, so we handle it differently + // here. + cx.context.get_builtin_function("fma") + } + _ => llvm::intrinsic(name, cx), + }; + cx.intrinsics.borrow_mut().insert(name.to_string(), intrinsic); return intrinsic; } diff --git a/compiler/rustc_codegen_gcc/src/errors.rs b/compiler/rustc_codegen_gcc/src/errors.rs index 6bada3d334c..7a586b5b04c 100644 --- a/compiler/rustc_codegen_gcc/src/errors.rs +++ b/compiler/rustc_codegen_gcc/src/errors.rs @@ -1,9 +1,6 @@ -use rustc_errors::{Diag, DiagCtxtHandle, Diagnostic, EmissionGuarantee, Level}; use rustc_macros::{Diagnostic, Subdiagnostic}; use rustc_span::Span; -use crate::fluent_generated as fluent; - #[derive(Diagnostic)] #[diag(codegen_gcc_unknown_ctarget_feature_prefix)] #[note] @@ -20,6 +17,19 @@ pub(crate) struct UnknownCTargetFeature<'a> { pub rust_feature: PossibleFeature<'a>, } +#[derive(Diagnostic)] +#[diag(codegen_gcc_unstable_ctarget_feature)] +#[note] +pub(crate) struct UnstableCTargetFeature<'a> { + pub feature: &'a str, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_forbidden_ctarget_feature)] +pub(crate) struct ForbiddenCTargetFeature<'a> { + pub feature: &'a str, +} + #[derive(Subdiagnostic)] pub(crate) enum PossibleFeature<'a> { #[help(codegen_gcc_possible_feature)] @@ -46,15 +56,6 @@ pub(crate) struct InvalidMinimumAlignment { } #[derive(Diagnostic)] -#[diag(codegen_gcc_tied_target_features)] -#[help] -pub(crate) struct TiedTargetFeatures { - #[primary_span] - pub span: Span, - pub features: String, -} - -#[derive(Diagnostic)] #[diag(codegen_gcc_copy_bitcode)] pub(crate) struct CopyBitcode { pub err: std::io::Error, @@ -78,27 +79,3 @@ pub(crate) struct LtoDylib; pub(crate) struct LtoBitcodeFromRlib { pub gcc_err: String, } - -pub(crate) struct TargetFeatureDisableOrEnable<'a> { - pub features: &'a [&'a str], - pub span: Option<Span>, - pub missing_features: Option<MissingFeatures>, -} - -#[derive(Subdiagnostic)] -#[help(codegen_gcc_missing_features)] -pub(crate) struct MissingFeatures; - -impl<G: EmissionGuarantee> Diagnostic<'_, G> for TargetFeatureDisableOrEnable<'_> { - fn into_diag(self, dcx: DiagCtxtHandle<'_>, level: Level) -> Diag<'_, G> { - let mut diag = Diag::new(dcx, level, fluent::codegen_gcc_target_feature_disable_or_enable); - if let Some(span) = self.span { - diag.span(span); - }; - if let Some(missing_features) = self.missing_features { - diag.subdiagnostic(missing_features); - } - diag.arg("features", self.features.join(", ")); - diag - } -} diff --git a/compiler/rustc_codegen_gcc/src/gcc_util.rs b/compiler/rustc_codegen_gcc/src/gcc_util.rs index 5308ccdb614..65279c9495a 100644 --- a/compiler/rustc_codegen_gcc/src/gcc_util.rs +++ b/compiler/rustc_codegen_gcc/src/gcc_util.rs @@ -1,14 +1,16 @@ #[cfg(feature = "master")] use gccjit::Context; +use rustc_codegen_ssa::codegen_attrs::check_tied_features; +use rustc_codegen_ssa::errors::TargetFeatureDisableOrEnable; use rustc_data_structures::fx::FxHashMap; use rustc_middle::bug; use rustc_session::Session; -use rustc_target::target_features::RUSTC_SPECIFIC_FEATURES; -use smallvec::{smallvec, SmallVec}; +use rustc_target::target_features::{RUSTC_SPECIFIC_FEATURES, Stability}; +use smallvec::{SmallVec, smallvec}; use crate::errors::{ - PossibleFeature, TargetFeatureDisableOrEnable, UnknownCTargetFeature, - UnknownCTargetFeaturePrefix, + ForbiddenCTargetFeature, PossibleFeature, UnknownCTargetFeature, UnknownCTargetFeaturePrefix, + UnstableCTargetFeature, }; /// The list of GCC features computed from CLI flags (`-Ctarget-cpu`, `-Ctarget-feature`, @@ -44,7 +46,7 @@ pub(crate) fn global_gcc_features(sess: &Session, diagnostics: bool) -> Vec<Stri ); // -Ctarget-features - let supported_features = sess.target.supported_target_features(); + let known_features = sess.target.rust_target_features(); let mut featsmap = FxHashMap::default(); let feats = sess .opts @@ -63,37 +65,49 @@ pub(crate) fn global_gcc_features(sess: &Session, diagnostics: bool) -> Vec<Stri } }; + // Get the backend feature name, if any. + // This excludes rustc-specific features, that do not get passed down to GCC. let feature = backend_feature_name(s)?; // Warn against use of GCC specific feature names on the CLI. - if diagnostics && !supported_features.iter().any(|&(v, _, _)| v == feature) { - let rust_feature = supported_features.iter().find_map(|&(rust_feature, _, _)| { - let gcc_features = to_gcc_features(sess, rust_feature); - if gcc_features.contains(&feature) && !gcc_features.contains(&rust_feature) { - Some(rust_feature) - } else { - None + if diagnostics { + let feature_state = known_features.iter().find(|&&(v, _, _)| v == feature); + match feature_state { + None => { + let rust_feature = + known_features.iter().find_map(|&(rust_feature, _, _)| { + let gcc_features = to_gcc_features(sess, rust_feature); + if gcc_features.contains(&feature) + && !gcc_features.contains(&rust_feature) + { + Some(rust_feature) + } else { + None + } + }); + let unknown_feature = if let Some(rust_feature) = rust_feature { + UnknownCTargetFeature { + feature, + rust_feature: PossibleFeature::Some { rust_feature }, + } + } else { + UnknownCTargetFeature { feature, rust_feature: PossibleFeature::None } + }; + sess.dcx().emit_warn(unknown_feature); } - }); - let unknown_feature = if let Some(rust_feature) = rust_feature { - UnknownCTargetFeature { - feature, - rust_feature: PossibleFeature::Some { rust_feature }, + Some((_, Stability::Stable, _)) => {} + Some((_, Stability::Unstable(_), _)) => { + // An unstable feature. Warn about using it. + sess.dcx().emit_warn(UnstableCTargetFeature { feature }); } - } else { - UnknownCTargetFeature { feature, rust_feature: PossibleFeature::None } - }; - sess.dcx().emit_warn(unknown_feature); - } + Some((_, Stability::Forbidden { .. }, _)) => { + sess.dcx().emit_err(ForbiddenCTargetFeature { feature }); + } + } - if diagnostics { // FIXME(nagisa): figure out how to not allocate a full hashset here. featsmap.insert(feature, enable_disable == '+'); } - // rustc-specific features do not get passed down to GCC… - if RUSTC_SPECIFIC_FEATURES.contains(&feature) { - return None; - } // ... otherwise though we run through `to_gcc_features` when // passing requests down to GCC. This means that all in-language // features also work on the command line instead of having two @@ -185,23 +199,6 @@ pub fn to_gcc_features<'a>(sess: &Session, s: &'a str) -> SmallVec<[&'a str; 2]> } } -// Given a map from target_features to whether they are enabled or disabled, -// ensure only valid combinations are allowed. -pub fn check_tied_features( - sess: &Session, - features: &FxHashMap<&str, bool>, -) -> Option<&'static [&'static str]> { - for tied in sess.target.tied_target_features() { - // Tied features must be set to the same value, or not set at all - let mut tied_iter = tied.iter(); - let enabled = features.get(tied_iter.next().unwrap()); - if tied_iter.any(|feature| enabled != features.get(feature)) { - return Some(tied); - } - } - None -} - fn arch_to_gcc(name: &str) -> &str { match name { "M68020" => "68020", diff --git a/compiler/rustc_codegen_gcc/src/int.rs b/compiler/rustc_codegen_gcc/src/int.rs index 29f4db6738b..5ca440f4c9b 100644 --- a/compiler/rustc_codegen_gcc/src/int.rs +++ b/compiler/rustc_codegen_gcc/src/int.rs @@ -6,8 +6,8 @@ use gccjit::{BinaryOp, ComparisonOp, FunctionType, Location, RValue, ToRValue, T use rustc_codegen_ssa::common::{IntPredicate, TypeKind}; use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeCodegenMethods, BuilderMethods, OverflowOp}; use rustc_middle::ty::{ParamEnv, Ty}; -use rustc_target::abi::call::{ArgAbi, ArgAttributes, Conv, FnAbi, PassMode}; use rustc_target::abi::Endian; +use rustc_target::abi::call::{ArgAbi, ArgAttributes, Conv, FnAbi, PassMode}; use rustc_target::spec; use crate::builder::{Builder, ToGccComp}; @@ -395,11 +395,9 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let indirect = matches!(fn_abi.ret.mode, PassMode::Indirect { .. }); - let return_type = self.context.new_struct_type( - self.location, - "result_overflow", - &[result_field, overflow_field], - ); + let return_type = self + .context + .new_struct_type(self.location, "result_overflow", &[result_field, overflow_field]); let result = if indirect { let return_value = self.current_func().new_local(self.location, return_type.as_type(), "return_value"); @@ -416,11 +414,11 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { ); self.llbb().add_eval( self.location, - self.context.new_call( - self.location, - func, - &[return_value.get_address(self.location), lhs, rhs], - ), + self.context.new_call(self.location, func, &[ + return_value.get_address(self.location), + lhs, + rhs, + ]), ); return_value.to_rvalue() } else { @@ -735,7 +733,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { // TODO(antoyo): check if it's faster to use string literals and a // match instead of format!. - let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width)); + let bswap = self.cx.context.get_builtin_function(format!("__builtin_bswap{}", width)); // FIXME(antoyo): this cast should not be necessary. Remove // when having proper sized integer types. let param_type = bswap.get_param(0).to_rvalue().get_type(); diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs b/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs index f7500933789..b8d1cde1d5d 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs @@ -31,8 +31,11 @@ match name { "llvm.AMDGPU.trig.preop.v2f64" => "__builtin_amdgpu_trig_preop", "llvm.AMDGPU.trig.preop.v4f32" => "__builtin_amdgpu_trig_preop", // aarch64 + "llvm.aarch64.chkfeat" => "__builtin_arm_chkfeat", "llvm.aarch64.dmb" => "__builtin_arm_dmb", "llvm.aarch64.dsb" => "__builtin_arm_dsb", + "llvm.aarch64.gcspopm" => "__builtin_arm_gcspopm", + "llvm.aarch64.gcsss" => "__builtin_arm_gcsss", "llvm.aarch64.isb" => "__builtin_arm_isb", "llvm.aarch64.prefetch" => "__builtin_arm_prefetch", "llvm.aarch64.sve.aesd" => "__builtin_sve_svaesd_u8", @@ -80,7 +83,6 @@ match name { "llvm.amdgcn.dot4.f32.fp8.fp8" => "__builtin_amdgcn_dot4_f32_fp8_fp8", "llvm.amdgcn.ds.add.gs.reg.rtn" => "__builtin_amdgcn_ds_add_gs_reg_rtn", "llvm.amdgcn.ds.bpermute" => "__builtin_amdgcn_ds_bpermute", - "llvm.amdgcn.ds.fadd.v2bf16" => "__builtin_amdgcn_ds_atomic_fadd_v2bf16", "llvm.amdgcn.ds.gws.barrier" => "__builtin_amdgcn_ds_gws_barrier", "llvm.amdgcn.ds.gws.init" => "__builtin_amdgcn_ds_gws_init", "llvm.amdgcn.ds.gws.sema.br" => "__builtin_amdgcn_ds_gws_sema_br", @@ -96,6 +98,7 @@ match name { "llvm.amdgcn.fdot2.f16.f16" => "__builtin_amdgcn_fdot2_f16_f16", "llvm.amdgcn.fdot2.f32.bf16" => "__builtin_amdgcn_fdot2_f32_bf16", "llvm.amdgcn.fmul.legacy" => "__builtin_amdgcn_fmul_legacy", + "llvm.amdgcn.global.load.lds" => "__builtin_amdgcn_global_load_lds", "llvm.amdgcn.groupstaticsize" => "__builtin_amdgcn_groupstaticsize", "llvm.amdgcn.iglp.opt" => "__builtin_amdgcn_iglp_opt", "llvm.amdgcn.implicit.buffer.ptr" => "__builtin_amdgcn_implicit_buffer_ptr", @@ -154,16 +157,11 @@ match name { "llvm.amdgcn.mqsad.u32.u8" => "__builtin_amdgcn_mqsad_u32_u8", "llvm.amdgcn.msad.u8" => "__builtin_amdgcn_msad_u8", "llvm.amdgcn.perm" => "__builtin_amdgcn_perm", - "llvm.amdgcn.permlane16" => "__builtin_amdgcn_permlane16", "llvm.amdgcn.permlane16.var" => "__builtin_amdgcn_permlane16_var", - "llvm.amdgcn.permlane64" => "__builtin_amdgcn_permlane64", - "llvm.amdgcn.permlanex16" => "__builtin_amdgcn_permlanex16", "llvm.amdgcn.permlanex16.var" => "__builtin_amdgcn_permlanex16_var", "llvm.amdgcn.qsad.pk.u16.u8" => "__builtin_amdgcn_qsad_pk_u16_u8", "llvm.amdgcn.queue.ptr" => "__builtin_amdgcn_queue_ptr", "llvm.amdgcn.rcp.legacy" => "__builtin_amdgcn_rcp_legacy", - "llvm.amdgcn.readfirstlane" => "__builtin_amdgcn_readfirstlane", - "llvm.amdgcn.readlane" => "__builtin_amdgcn_readlane", "llvm.amdgcn.rsq.legacy" => "__builtin_amdgcn_rsq_legacy", "llvm.amdgcn.s.barrier" => "__builtin_amdgcn_s_barrier", "llvm.amdgcn.s.barrier.init" => "__builtin_amdgcn_s_barrier_init", @@ -192,6 +190,8 @@ match name { "llvm.amdgcn.s.setreg" => "__builtin_amdgcn_s_setreg", "llvm.amdgcn.s.sleep" => "__builtin_amdgcn_s_sleep", "llvm.amdgcn.s.sleep.var" => "__builtin_amdgcn_s_sleep_var", + "llvm.amdgcn.s.ttracedata" => "__builtin_amdgcn_s_ttracedata", + "llvm.amdgcn.s.ttracedata.imm" => "__builtin_amdgcn_s_ttracedata_imm", "llvm.amdgcn.s.wait.event.export.ready" => "__builtin_amdgcn_s_wait_event_export_ready", "llvm.amdgcn.s.waitcnt" => "__builtin_amdgcn_s_waitcnt", "llvm.amdgcn.s.wakeup.barrier" => "__builtin_amdgcn_s_wakeup_barrier", @@ -227,7 +227,6 @@ match name { "llvm.amdgcn.workgroup.id.x" => "__builtin_amdgcn_workgroup_id_x", "llvm.amdgcn.workgroup.id.y" => "__builtin_amdgcn_workgroup_id_y", "llvm.amdgcn.workgroup.id.z" => "__builtin_amdgcn_workgroup_id_z", - "llvm.amdgcn.writelane" => "__builtin_amdgcn_writelane", // arm "llvm.arm.cdp" => "__builtin_arm_cdp", "llvm.arm.cdp2" => "__builtin_arm_cdp2", @@ -4536,10 +4535,18 @@ match name { "llvm.nvvm.div.rz.d" => "__nvvm_div_rz_d", "llvm.nvvm.div.rz.f" => "__nvvm_div_rz_f", "llvm.nvvm.div.rz.ftz.f" => "__nvvm_div_rz_ftz_f", + "llvm.nvvm.e4m3x2.to.f16x2.rn" => "__nvvm_e4m3x2_to_f16x2_rn", + "llvm.nvvm.e4m3x2.to.f16x2.rn.relu" => "__nvvm_e4m3x2_to_f16x2_rn_relu", + "llvm.nvvm.e5m2x2.to.f16x2.rn" => "__nvvm_e5m2x2_to_f16x2_rn", + "llvm.nvvm.e5m2x2.to.f16x2.rn.relu" => "__nvvm_e5m2x2_to_f16x2_rn_relu", "llvm.nvvm.ex2.approx.d" => "__nvvm_ex2_approx_d", "llvm.nvvm.ex2.approx.f" => "__nvvm_ex2_approx_f", "llvm.nvvm.ex2.approx.ftz.f" => "__nvvm_ex2_approx_ftz_f", "llvm.nvvm.exit" => "__nvvm_exit", + "llvm.nvvm.f16x2.to.e4m3x2.rn" => "__nvvm_f16x2_to_e4m3x2_rn", + "llvm.nvvm.f16x2.to.e4m3x2.rn.relu" => "__nvvm_f16x2_to_e4m3x2_rn_relu", + "llvm.nvvm.f16x2.to.e5m2x2.rn" => "__nvvm_f16x2_to_e5m2x2_rn", + "llvm.nvvm.f16x2.to.e5m2x2.rn.relu" => "__nvvm_f16x2_to_e5m2x2_rn_relu", "llvm.nvvm.f2bf16.rn" => "__nvvm_f2bf16_rn", "llvm.nvvm.f2bf16.rn.relu" => "__nvvm_f2bf16_rn_relu", "llvm.nvvm.f2bf16.rz" => "__nvvm_f2bf16_rz", @@ -4582,6 +4589,10 @@ match name { "llvm.nvvm.fabs.d" => "__nvvm_fabs_d", "llvm.nvvm.fabs.f" => "__nvvm_fabs_f", "llvm.nvvm.fabs.ftz.f" => "__nvvm_fabs_ftz_f", + "llvm.nvvm.ff.to.e4m3x2.rn" => "__nvvm_ff_to_e4m3x2_rn", + "llvm.nvvm.ff.to.e4m3x2.rn.relu" => "__nvvm_ff_to_e4m3x2_rn_relu", + "llvm.nvvm.ff.to.e5m2x2.rn" => "__nvvm_ff_to_e5m2x2_rn", + "llvm.nvvm.ff.to.e5m2x2.rn.relu" => "__nvvm_ff_to_e5m2x2_rn_relu", "llvm.nvvm.ff2bf16x2.rn" => "__nvvm_ff2bf16x2_rn", "llvm.nvvm.ff2bf16x2.rn.relu" => "__nvvm_ff2bf16x2_rn_relu", "llvm.nvvm.ff2bf16x2.rz" => "__nvvm_ff2bf16x2_rz", @@ -4866,6 +4877,7 @@ match name { "llvm.nvvm.round.ftz.f" => "__nvvm_round_ftz_f", "llvm.nvvm.rsqrt.approx.d" => "__nvvm_rsqrt_approx_d", "llvm.nvvm.rsqrt.approx.f" => "__nvvm_rsqrt_approx_f", + "llvm.nvvm.rsqrt.approx.ftz.d" => "__nvvm_rsqrt_approx_ftz_d", "llvm.nvvm.rsqrt.approx.ftz.f" => "__nvvm_rsqrt_approx_ftz_f", "llvm.nvvm.sad.i" => "__nvvm_sad_i", "llvm.nvvm.sad.ll" => "__nvvm_sad_ll", @@ -5164,6 +5176,8 @@ match name { // ppc "llvm.ppc.addex" => "__builtin_ppc_addex", "llvm.ppc.addf128.round.to.odd" => "__builtin_addf128_round_to_odd", + "llvm.ppc.addg6s" => "__builtin_addg6s", + "llvm.ppc.addg6sd" => "__builtin_ppc_addg6s", "llvm.ppc.altivec.crypto.vcipher" => "__builtin_altivec_crypto_vcipher", "llvm.ppc.altivec.crypto.vcipherlast" => "__builtin_altivec_crypto_vcipherlast", "llvm.ppc.altivec.crypto.vncipher" => "__builtin_altivec_crypto_vncipher", @@ -5461,6 +5475,10 @@ match name { "llvm.ppc.bcdsub" => "__builtin_ppc_bcdsub", "llvm.ppc.bcdsub.p" => "__builtin_ppc_bcdsub_p", "llvm.ppc.bpermd" => "__builtin_bpermd", + "llvm.ppc.cbcdtd" => "__builtin_cbcdtd", + "llvm.ppc.cbcdtdd" => "__builtin_ppc_cbcdtd", + "llvm.ppc.cdtbcd" => "__builtin_cdtbcd", + "llvm.ppc.cdtbcdd" => "__builtin_ppc_cdtbcd", "llvm.ppc.cfuged" => "__builtin_cfuged", "llvm.ppc.cmpeqb" => "__builtin_ppc_cmpeqb", "llvm.ppc.cmprb" => "__builtin_ppc_cmprb", @@ -5627,7 +5645,6 @@ match name { "llvm.ppc.qpx.qvstfs" => "__builtin_qpx_qvstfs", "llvm.ppc.qpx.qvstfsa" => "__builtin_qpx_qvstfsa", "llvm.ppc.readflm" => "__builtin_readflm", - "llvm.ppc.rldimi" => "__builtin_ppc_rldimi", "llvm.ppc.rlwimi" => "__builtin_ppc_rlwimi", "llvm.ppc.rlwnm" => "__builtin_ppc_rlwnm", "llvm.ppc.scalar.extract.expq" => "__builtin_vsx_scalar_extract_expq", @@ -7210,29 +7227,6 @@ match name { "llvm.ve.vl.xorm.MMM" => "__builtin_ve_vl_xorm_MMM", "llvm.ve.vl.xorm.mmm" => "__builtin_ve_vl_xorm_mmm", // x86 - "llvm.x86.3dnow.pavgusb" => "__builtin_ia32_pavgusb", - "llvm.x86.3dnow.pf2id" => "__builtin_ia32_pf2id", - "llvm.x86.3dnow.pfacc" => "__builtin_ia32_pfacc", - "llvm.x86.3dnow.pfadd" => "__builtin_ia32_pfadd", - "llvm.x86.3dnow.pfcmpeq" => "__builtin_ia32_pfcmpeq", - "llvm.x86.3dnow.pfcmpge" => "__builtin_ia32_pfcmpge", - "llvm.x86.3dnow.pfcmpgt" => "__builtin_ia32_pfcmpgt", - "llvm.x86.3dnow.pfmax" => "__builtin_ia32_pfmax", - "llvm.x86.3dnow.pfmin" => "__builtin_ia32_pfmin", - "llvm.x86.3dnow.pfmul" => "__builtin_ia32_pfmul", - "llvm.x86.3dnow.pfrcp" => "__builtin_ia32_pfrcp", - "llvm.x86.3dnow.pfrcpit1" => "__builtin_ia32_pfrcpit1", - "llvm.x86.3dnow.pfrcpit2" => "__builtin_ia32_pfrcpit2", - "llvm.x86.3dnow.pfrsqit1" => "__builtin_ia32_pfrsqit1", - "llvm.x86.3dnow.pfrsqrt" => "__builtin_ia32_pfrsqrt", - "llvm.x86.3dnow.pfsub" => "__builtin_ia32_pfsub", - "llvm.x86.3dnow.pfsubr" => "__builtin_ia32_pfsubr", - "llvm.x86.3dnow.pi2fd" => "__builtin_ia32_pi2fd", - "llvm.x86.3dnow.pmulhrw" => "__builtin_ia32_pmulhrw", - "llvm.x86.3dnowa.pf2iw" => "__builtin_ia32_pf2iw", - "llvm.x86.3dnowa.pfnacc" => "__builtin_ia32_pfnacc", - "llvm.x86.3dnowa.pfpnacc" => "__builtin_ia32_pfpnacc", - "llvm.x86.3dnowa.pi2fw" => "__builtin_ia32_pi2fw", "llvm.x86.aadd32" => "__builtin_ia32_aadd32", "llvm.x86.aadd64" => "__builtin_ia32_aadd64", "llvm.x86.aand32" => "__builtin_ia32_aand32", @@ -7334,6 +7328,207 @@ match name { "llvm.x86.avx.vtestz.ps.256" => "__builtin_ia32_vtestzps256", "llvm.x86.avx.vzeroall" => "__builtin_ia32_vzeroall", "llvm.x86.avx.vzeroupper" => "__builtin_ia32_vzeroupper", + "llvm.x86.avx10.mask.vcvt2ps2phx.128" => "__builtin_ia32_vcvt2ps2phx128_mask", + "llvm.x86.avx10.mask.vcvt2ps2phx.256" => "__builtin_ia32_vcvt2ps2phx256_mask", + "llvm.x86.avx10.mask.vcvt2ps2phx.512" => "__builtin_ia32_vcvt2ps2phx512_mask", + "llvm.x86.avx10.mask.vcvtbiasph2bf8128" => "__builtin_ia32_vcvtbiasph2bf8_128_mask", + "llvm.x86.avx10.mask.vcvtbiasph2bf8256" => "__builtin_ia32_vcvtbiasph2bf8_256_mask", + "llvm.x86.avx10.mask.vcvtbiasph2bf8512" => "__builtin_ia32_vcvtbiasph2bf8_512_mask", + "llvm.x86.avx10.mask.vcvtbiasph2bf8s128" => "__builtin_ia32_vcvtbiasph2bf8s_128_mask", + "llvm.x86.avx10.mask.vcvtbiasph2bf8s256" => "__builtin_ia32_vcvtbiasph2bf8s_256_mask", + "llvm.x86.avx10.mask.vcvtbiasph2bf8s512" => "__builtin_ia32_vcvtbiasph2bf8s_512_mask", + "llvm.x86.avx10.mask.vcvtbiasph2hf8128" => "__builtin_ia32_vcvtbiasph2hf8_128_mask", + "llvm.x86.avx10.mask.vcvtbiasph2hf8256" => "__builtin_ia32_vcvtbiasph2hf8_256_mask", + "llvm.x86.avx10.mask.vcvtbiasph2hf8512" => "__builtin_ia32_vcvtbiasph2hf8_512_mask", + "llvm.x86.avx10.mask.vcvtbiasph2hf8s128" => "__builtin_ia32_vcvtbiasph2hf8s_128_mask", + "llvm.x86.avx10.mask.vcvtbiasph2hf8s256" => "__builtin_ia32_vcvtbiasph2hf8s_256_mask", + "llvm.x86.avx10.mask.vcvtbiasph2hf8s512" => "__builtin_ia32_vcvtbiasph2hf8s_512_mask", + "llvm.x86.avx10.mask.vcvthf82ph128" => "__builtin_ia32_vcvthf8_2ph128_mask", + "llvm.x86.avx10.mask.vcvthf82ph256" => "__builtin_ia32_vcvthf8_2ph256_mask", + "llvm.x86.avx10.mask.vcvthf82ph512" => "__builtin_ia32_vcvthf8_2ph512_mask", + "llvm.x86.avx10.mask.vcvtneph2bf8128" => "__builtin_ia32_vcvtneph2bf8_128_mask", + "llvm.x86.avx10.mask.vcvtneph2bf8256" => "__builtin_ia32_vcvtneph2bf8_256_mask", + "llvm.x86.avx10.mask.vcvtneph2bf8512" => "__builtin_ia32_vcvtneph2bf8_512_mask", + "llvm.x86.avx10.mask.vcvtneph2bf8s128" => "__builtin_ia32_vcvtneph2bf8s_128_mask", + "llvm.x86.avx10.mask.vcvtneph2bf8s256" => "__builtin_ia32_vcvtneph2bf8s_256_mask", + "llvm.x86.avx10.mask.vcvtneph2bf8s512" => "__builtin_ia32_vcvtneph2bf8s_512_mask", + "llvm.x86.avx10.mask.vcvtneph2hf8128" => "__builtin_ia32_vcvtneph2hf8_128_mask", + "llvm.x86.avx10.mask.vcvtneph2hf8256" => "__builtin_ia32_vcvtneph2hf8_256_mask", + "llvm.x86.avx10.mask.vcvtneph2hf8512" => "__builtin_ia32_vcvtneph2hf8_512_mask", + "llvm.x86.avx10.mask.vcvtneph2hf8s128" => "__builtin_ia32_vcvtneph2hf8s_128_mask", + "llvm.x86.avx10.mask.vcvtneph2hf8s256" => "__builtin_ia32_vcvtneph2hf8s_256_mask", + "llvm.x86.avx10.mask.vcvtneph2hf8s512" => "__builtin_ia32_vcvtneph2hf8s_512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtpd2dq256" => "__builtin_ia32_vcvtpd2dq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtpd2ph256" => "__builtin_ia32_vcvtpd2ph256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtpd2ps256" => "__builtin_ia32_vcvtpd2ps256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtpd2qq256" => "__builtin_ia32_vcvtpd2qq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtpd2udq256" => "__builtin_ia32_vcvtpd2udq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtpd2uqq256" => "__builtin_ia32_vcvtpd2uqq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtph2dq256" => "__builtin_ia32_vcvtph2dq256_round_mask", + "llvm.x86.avx10.mask.vcvtph2ibs128" => "__builtin_ia32_vcvtph2ibs128_mask", + "llvm.x86.avx10.mask.vcvtph2ibs256" => "__builtin_ia32_vcvtph2ibs256_mask", + "llvm.x86.avx10.mask.vcvtph2ibs512" => "__builtin_ia32_vcvtph2ibs512_mask", + "llvm.x86.avx10.mask.vcvtph2iubs128" => "__builtin_ia32_vcvtph2iubs128_mask", + "llvm.x86.avx10.mask.vcvtph2iubs256" => "__builtin_ia32_vcvtph2iubs256_mask", + "llvm.x86.avx10.mask.vcvtph2iubs512" => "__builtin_ia32_vcvtph2iubs512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtph2pd256" => "__builtin_ia32_vcvtph2pd256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtph2psx256" => "__builtin_ia32_vcvtph2psx256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtph2qq256" => "__builtin_ia32_vcvtph2qq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtph2udq256" => "__builtin_ia32_vcvtph2udq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtph2uqq256" => "__builtin_ia32_vcvtph2uqq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtph2uw256" => "__builtin_ia32_vcvtph2uw256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtph2w256" => "__builtin_ia32_vcvtph2w256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtps2dq256" => "__builtin_ia32_vcvtps2dq256_round_mask", + "llvm.x86.avx10.mask.vcvtps2ibs128" => "__builtin_ia32_vcvtps2ibs128_mask", + "llvm.x86.avx10.mask.vcvtps2ibs256" => "__builtin_ia32_vcvtps2ibs256_mask", + "llvm.x86.avx10.mask.vcvtps2ibs512" => "__builtin_ia32_vcvtps2ibs512_mask", + "llvm.x86.avx10.mask.vcvtps2iubs128" => "__builtin_ia32_vcvtps2iubs128_mask", + "llvm.x86.avx10.mask.vcvtps2iubs256" => "__builtin_ia32_vcvtps2iubs256_mask", + "llvm.x86.avx10.mask.vcvtps2iubs512" => "__builtin_ia32_vcvtps2iubs512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtps2pd256" => "__builtin_ia32_vcvtps2pd256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtps2ph256" => "__builtin_ia32_vcvtps2ph256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtps2phx256" => "__builtin_ia32_vcvtps2phx256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtps2qq256" => "__builtin_ia32_vcvtps2qq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtps2udq256" => "__builtin_ia32_vcvtps2udq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtps2uqq256" => "__builtin_ia32_vcvtps2uqq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttpd2dq256" => "__builtin_ia32_vcvttpd2dq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttpd2qq256" => "__builtin_ia32_vcvttpd2qq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttpd2udq256" => "__builtin_ia32_vcvttpd2udq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttpd2uqq256" => "__builtin_ia32_vcvttpd2uqq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttph2dq256" => "__builtin_ia32_vcvttph2dq256_round_mask", + "llvm.x86.avx10.mask.vcvttph2ibs128" => "__builtin_ia32_vcvttph2ibs128_mask", + "llvm.x86.avx10.mask.vcvttph2ibs256" => "__builtin_ia32_vcvttph2ibs256_mask", + "llvm.x86.avx10.mask.vcvttph2ibs512" => "__builtin_ia32_vcvttph2ibs512_mask", + "llvm.x86.avx10.mask.vcvttph2iubs128" => "__builtin_ia32_vcvttph2iubs128_mask", + "llvm.x86.avx10.mask.vcvttph2iubs256" => "__builtin_ia32_vcvttph2iubs256_mask", + "llvm.x86.avx10.mask.vcvttph2iubs512" => "__builtin_ia32_vcvttph2iubs512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttph2qq256" => "__builtin_ia32_vcvttph2qq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttph2udq256" => "__builtin_ia32_vcvttph2udq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttph2uqq256" => "__builtin_ia32_vcvttph2uqq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttph2uw256" => "__builtin_ia32_vcvttph2uw256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttph2w256" => "__builtin_ia32_vcvttph2w256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttps2dq256" => "__builtin_ia32_vcvttps2dq256_round_mask", + "llvm.x86.avx10.mask.vcvttps2ibs128" => "__builtin_ia32_vcvttps2ibs128_mask", + "llvm.x86.avx10.mask.vcvttps2ibs256" => "__builtin_ia32_vcvttps2ibs256_mask", + "llvm.x86.avx10.mask.vcvttps2ibs512" => "__builtin_ia32_vcvttps2ibs512_mask", + "llvm.x86.avx10.mask.vcvttps2iubs128" => "__builtin_ia32_vcvttps2iubs128_mask", + "llvm.x86.avx10.mask.vcvttps2iubs256" => "__builtin_ia32_vcvttps2iubs256_mask", + "llvm.x86.avx10.mask.vcvttps2iubs512" => "__builtin_ia32_vcvttps2iubs512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttps2qq256" => "__builtin_ia32_vcvttps2qq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttps2udq256" => "__builtin_ia32_vcvttps2udq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttps2uqq256" => "__builtin_ia32_vcvttps2uqq256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vfcmaddcph256" => "__builtin_ia32_vfcmaddcph256_round_mask3", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vfcmulcph256" => "__builtin_ia32_vfcmulcph256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vfixupimmpd256" => "__builtin_ia32_vfixupimmpd256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vfixupimmps256" => "__builtin_ia32_vfixupimmps256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vfmaddcph256" => "__builtin_ia32_vfmaddcph256_round_mask3", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vfmulcph256" => "__builtin_ia32_vfmulcph256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vgetexppd256" => "__builtin_ia32_vgetexppd256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vgetexpph256" => "__builtin_ia32_vgetexpph256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vgetexpps256" => "__builtin_ia32_vgetexpps256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vgetmantpd256" => "__builtin_ia32_vgetmantpd256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vgetmantph256" => "__builtin_ia32_vgetmantph256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vgetmantps256" => "__builtin_ia32_vgetmantps256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vminmaxpd.round" => "__builtin_ia32_vminmaxpd512_round_mask", + "llvm.x86.avx10.mask.vminmaxpd128" => "__builtin_ia32_vminmaxpd128_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vminmaxpd256.round" => "__builtin_ia32_vminmaxpd256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vminmaxph.round" => "__builtin_ia32_vminmaxph512_round_mask", + "llvm.x86.avx10.mask.vminmaxph128" => "__builtin_ia32_vminmaxph128_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vminmaxph256.round" => "__builtin_ia32_vminmaxph256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vminmaxps.round" => "__builtin_ia32_vminmaxps512_round_mask", + "llvm.x86.avx10.mask.vminmaxps128" => "__builtin_ia32_vminmaxps128_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vminmaxps256.round" => "__builtin_ia32_vminmaxps256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vminmaxsd.round" => "__builtin_ia32_vminmaxsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vminmaxsh.round" => "__builtin_ia32_vminmaxsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vminmaxss.round" => "__builtin_ia32_vminmaxss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vrangepd256" => "__builtin_ia32_vrangepd256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vrangeps256" => "__builtin_ia32_vrangeps256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vreducepd256" => "__builtin_ia32_vreducepd256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vreduceph256" => "__builtin_ia32_vreduceph256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vreduceps256" => "__builtin_ia32_vreduceps256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vrndscalepd256" => "__builtin_ia32_vrndscalepd256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vrndscaleph256" => "__builtin_ia32_vrndscaleph256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vrndscaleps256" => "__builtin_ia32_vrndscaleps256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vscalefpd256" => "__builtin_ia32_vscalefpd256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vscalefph256" => "__builtin_ia32_vscalefph256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vscalefps256" => "__builtin_ia32_vscalefps256_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx10.maskz.vfcmaddcph256" => "__builtin_ia32_vfcmaddcph256_round_maskz", + // [INVALID CONVERSION]: "llvm.x86.avx10.maskz.vfixupimmpd256" => "__builtin_ia32_vfixupimmpd256_round_maskz", + // [INVALID CONVERSION]: "llvm.x86.avx10.maskz.vfixupimmps256" => "__builtin_ia32_vfixupimmps256_round_maskz", + // [INVALID CONVERSION]: "llvm.x86.avx10.maskz.vfmaddcph256" => "__builtin_ia32_vfmaddcph256_round_maskz", + "llvm.x86.avx10.vaddpd256" => "__builtin_ia32_vaddpd256_round", + "llvm.x86.avx10.vaddph256" => "__builtin_ia32_vaddph256_round", + "llvm.x86.avx10.vaddps256" => "__builtin_ia32_vaddps256_round", + "llvm.x86.avx10.vcvtne2ph2bf8128" => "__builtin_ia32_vcvtne2ph2bf8_128", + "llvm.x86.avx10.vcvtne2ph2bf8256" => "__builtin_ia32_vcvtne2ph2bf8_256", + "llvm.x86.avx10.vcvtne2ph2bf8512" => "__builtin_ia32_vcvtne2ph2bf8_512", + "llvm.x86.avx10.vcvtne2ph2bf8s128" => "__builtin_ia32_vcvtne2ph2bf8s_128", + "llvm.x86.avx10.vcvtne2ph2bf8s256" => "__builtin_ia32_vcvtne2ph2bf8s_256", + "llvm.x86.avx10.vcvtne2ph2bf8s512" => "__builtin_ia32_vcvtne2ph2bf8s_512", + "llvm.x86.avx10.vcvtne2ph2hf8128" => "__builtin_ia32_vcvtne2ph2hf8_128", + "llvm.x86.avx10.vcvtne2ph2hf8256" => "__builtin_ia32_vcvtne2ph2hf8_256", + "llvm.x86.avx10.vcvtne2ph2hf8512" => "__builtin_ia32_vcvtne2ph2hf8_512", + "llvm.x86.avx10.vcvtne2ph2hf8s128" => "__builtin_ia32_vcvtne2ph2hf8s_128", + "llvm.x86.avx10.vcvtne2ph2hf8s256" => "__builtin_ia32_vcvtne2ph2hf8s_256", + "llvm.x86.avx10.vcvtne2ph2hf8s512" => "__builtin_ia32_vcvtne2ph2hf8s_512", + "llvm.x86.avx10.vcvtnebf162ibs128" => "__builtin_ia32_vcvtnebf162ibs128", + "llvm.x86.avx10.vcvtnebf162ibs256" => "__builtin_ia32_vcvtnebf162ibs256", + "llvm.x86.avx10.vcvtnebf162ibs512" => "__builtin_ia32_vcvtnebf162ibs512", + "llvm.x86.avx10.vcvtnebf162iubs128" => "__builtin_ia32_vcvtnebf162iubs128", + "llvm.x86.avx10.vcvtnebf162iubs256" => "__builtin_ia32_vcvtnebf162iubs256", + "llvm.x86.avx10.vcvtnebf162iubs512" => "__builtin_ia32_vcvtnebf162iubs512", + "llvm.x86.avx10.vcvttnebf162ibs128" => "__builtin_ia32_vcvttnebf162ibs128", + "llvm.x86.avx10.vcvttnebf162ibs256" => "__builtin_ia32_vcvttnebf162ibs256", + "llvm.x86.avx10.vcvttnebf162ibs512" => "__builtin_ia32_vcvttnebf162ibs512", + "llvm.x86.avx10.vcvttnebf162iubs128" => "__builtin_ia32_vcvttnebf162iubs128", + "llvm.x86.avx10.vcvttnebf162iubs256" => "__builtin_ia32_vcvttnebf162iubs256", + "llvm.x86.avx10.vcvttnebf162iubs512" => "__builtin_ia32_vcvttnebf162iubs512", + "llvm.x86.avx10.vdivpd256" => "__builtin_ia32_vdivpd256_round", + "llvm.x86.avx10.vdivph256" => "__builtin_ia32_vdivph256_round", + "llvm.x86.avx10.vdivps256" => "__builtin_ia32_vdivps256_round", + "llvm.x86.avx10.vdpphps.128" => "__builtin_ia32_vdpphps128", + "llvm.x86.avx10.vdpphps.256" => "__builtin_ia32_vdpphps256", + "llvm.x86.avx10.vdpphps.512" => "__builtin_ia32_vdpphps512", + "llvm.x86.avx10.vfmaddsubpd256" => "__builtin_ia32_vfmaddsubpd256_round", + "llvm.x86.avx10.vfmaddsubph256" => "__builtin_ia32_vfmaddsubph256_round", + "llvm.x86.avx10.vfmaddsubps256" => "__builtin_ia32_vfmaddsubps256_round", + "llvm.x86.avx10.vmaxpd256" => "__builtin_ia32_vmaxpd256_round", + "llvm.x86.avx10.vmaxph256" => "__builtin_ia32_vmaxph256_round", + "llvm.x86.avx10.vmaxps256" => "__builtin_ia32_vmaxps256_round", + "llvm.x86.avx10.vminmaxnepbf16128" => "__builtin_ia32_vminmaxnepbf16128", + "llvm.x86.avx10.vminmaxnepbf16256" => "__builtin_ia32_vminmaxnepbf16256", + "llvm.x86.avx10.vminmaxnepbf16512" => "__builtin_ia32_vminmaxnepbf16512", + "llvm.x86.avx10.vminmaxpd128" => "__builtin_ia32_vminmaxpd128", + "llvm.x86.avx10.vminmaxpd256" => "__builtin_ia32_vminmaxpd256", + "llvm.x86.avx10.vminmaxph128" => "__builtin_ia32_vminmaxph128", + "llvm.x86.avx10.vminmaxph256" => "__builtin_ia32_vminmaxph256", + "llvm.x86.avx10.vminmaxps128" => "__builtin_ia32_vminmaxps128", + "llvm.x86.avx10.vminmaxps256" => "__builtin_ia32_vminmaxps256", + "llvm.x86.avx10.vminpd256" => "__builtin_ia32_vminpd256_round", + "llvm.x86.avx10.vminph256" => "__builtin_ia32_vminph256_round", + "llvm.x86.avx10.vminps256" => "__builtin_ia32_vminps256_round", + "llvm.x86.avx10.vmpsadbw.512" => "__builtin_ia32_mpsadbw512", + "llvm.x86.avx10.vmulpd256" => "__builtin_ia32_vmulpd256_round", + "llvm.x86.avx10.vmulph256" => "__builtin_ia32_vmulph256_round", + "llvm.x86.avx10.vmulps256" => "__builtin_ia32_vmulps256_round", + "llvm.x86.avx10.vpdpbssd.512" => "__builtin_ia32_vpdpbssd512", + "llvm.x86.avx10.vpdpbssds.512" => "__builtin_ia32_vpdpbssds512", + "llvm.x86.avx10.vpdpbsud.512" => "__builtin_ia32_vpdpbsud512", + "llvm.x86.avx10.vpdpbsuds.512" => "__builtin_ia32_vpdpbsuds512", + "llvm.x86.avx10.vpdpbuud.512" => "__builtin_ia32_vpdpbuud512", + "llvm.x86.avx10.vpdpbuuds.512" => "__builtin_ia32_vpdpbuuds512", + "llvm.x86.avx10.vpdpwsud.512" => "__builtin_ia32_vpdpwsud512", + "llvm.x86.avx10.vpdpwsuds.512" => "__builtin_ia32_vpdpwsuds512", + "llvm.x86.avx10.vpdpwusd.512" => "__builtin_ia32_vpdpwusd512", + "llvm.x86.avx10.vpdpwusds.512" => "__builtin_ia32_vpdpwusds512", + "llvm.x86.avx10.vpdpwuud.512" => "__builtin_ia32_vpdpwuud512", + "llvm.x86.avx10.vpdpwuuds.512" => "__builtin_ia32_vpdpwuuds512", + "llvm.x86.avx10.vsqrtpd256" => "__builtin_ia32_vsqrtpd256_round", + "llvm.x86.avx10.vsqrtph256" => "__builtin_ia32_vsqrtph256_round", + "llvm.x86.avx10.vsqrtps256" => "__builtin_ia32_vsqrtps256_round", + "llvm.x86.avx10.vsubpd256" => "__builtin_ia32_vsubpd256_round", + "llvm.x86.avx10.vsubph256" => "__builtin_ia32_vsubph256_round", + "llvm.x86.avx10.vsubps256" => "__builtin_ia32_vsubps256_round", "llvm.x86.avx2.gather.d.d" => "__builtin_ia32_gatherd_d", "llvm.x86.avx2.gather.d.d.256" => "__builtin_ia32_gatherd_d256", "llvm.x86.avx2.gather.d.pd" => "__builtin_ia32_gatherd_pd", @@ -8738,10 +8933,10 @@ match name { "llvm.x86.avx512.rcp14.ss" => "__builtin_ia32_rcp14ss_mask", "llvm.x86.avx512.rcp28.pd" => "__builtin_ia32_rcp28pd_mask", "llvm.x86.avx512.rcp28.ps" => "__builtin_ia32_rcp28ps_mask", - // [INVALID CONVERSION]: "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_round_mask", - // [DUPLICATE]: "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_mask", - // [INVALID CONVERSION]: "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_round_mask", - // [DUPLICATE]: "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_mask", + "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_mask", + // [DUPLICATE]: "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_round_mask", + "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_mask", + // [DUPLICATE]: "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_round_mask", "llvm.x86.avx512.rndscale.sd" => "__builtin_ia32_rndscalesd", "llvm.x86.avx512.rndscale.ss" => "__builtin_ia32_rndscaless", "llvm.x86.avx512.rsqrt14.pd.128" => "__builtin_ia32_rsqrt14pd128_mask", @@ -8754,10 +8949,10 @@ match name { "llvm.x86.avx512.rsqrt14.ss" => "__builtin_ia32_rsqrt14ss_mask", "llvm.x86.avx512.rsqrt28.pd" => "__builtin_ia32_rsqrt28pd_mask", "llvm.x86.avx512.rsqrt28.ps" => "__builtin_ia32_rsqrt28ps_mask", - // [INVALID CONVERSION]: "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_round_mask", - // [DUPLICATE]: "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_mask", - // [INVALID CONVERSION]: "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_round_mask", - // [DUPLICATE]: "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_mask", + "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_mask", + // [DUPLICATE]: "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_round_mask", + "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_mask", + // [DUPLICATE]: "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_round_mask", "llvm.x86.avx512.scatter.dpd.512" => "__builtin_ia32_scattersiv8df", "llvm.x86.avx512.scatter.dpi.512" => "__builtin_ia32_scattersiv16si", "llvm.x86.avx512.scatter.dpq.512" => "__builtin_ia32_scattersiv8di", @@ -9082,75 +9277,6 @@ match name { "llvm.x86.lwpval64" => "__builtin_ia32_lwpval64", "llvm.x86.mmx.emms" => "__builtin_ia32_emms", "llvm.x86.mmx.femms" => "__builtin_ia32_femms", - "llvm.x86.mmx.maskmovq" => "__builtin_ia32_maskmovq", - "llvm.x86.mmx.movnt.dq" => "__builtin_ia32_movntq", - "llvm.x86.mmx.packssdw" => "__builtin_ia32_packssdw", - "llvm.x86.mmx.packsswb" => "__builtin_ia32_packsswb", - "llvm.x86.mmx.packuswb" => "__builtin_ia32_packuswb", - "llvm.x86.mmx.padd.b" => "__builtin_ia32_paddb", - "llvm.x86.mmx.padd.d" => "__builtin_ia32_paddd", - "llvm.x86.mmx.padd.q" => "__builtin_ia32_paddq", - "llvm.x86.mmx.padd.w" => "__builtin_ia32_paddw", - "llvm.x86.mmx.padds.b" => "__builtin_ia32_paddsb", - "llvm.x86.mmx.padds.w" => "__builtin_ia32_paddsw", - "llvm.x86.mmx.paddus.b" => "__builtin_ia32_paddusb", - "llvm.x86.mmx.paddus.w" => "__builtin_ia32_paddusw", - "llvm.x86.mmx.palignr.b" => "__builtin_ia32_palignr", - "llvm.x86.mmx.pand" => "__builtin_ia32_pand", - "llvm.x86.mmx.pandn" => "__builtin_ia32_pandn", - "llvm.x86.mmx.pavg.b" => "__builtin_ia32_pavgb", - "llvm.x86.mmx.pavg.w" => "__builtin_ia32_pavgw", - "llvm.x86.mmx.pcmpeq.b" => "__builtin_ia32_pcmpeqb", - "llvm.x86.mmx.pcmpeq.d" => "__builtin_ia32_pcmpeqd", - "llvm.x86.mmx.pcmpeq.w" => "__builtin_ia32_pcmpeqw", - "llvm.x86.mmx.pcmpgt.b" => "__builtin_ia32_pcmpgtb", - "llvm.x86.mmx.pcmpgt.d" => "__builtin_ia32_pcmpgtd", - "llvm.x86.mmx.pcmpgt.w" => "__builtin_ia32_pcmpgtw", - "llvm.x86.mmx.pextr.w" => "__builtin_ia32_vec_ext_v4hi", - "llvm.x86.mmx.pinsr.w" => "__builtin_ia32_vec_set_v4hi", - "llvm.x86.mmx.pmadd.wd" => "__builtin_ia32_pmaddwd", - "llvm.x86.mmx.pmaxs.w" => "__builtin_ia32_pmaxsw", - "llvm.x86.mmx.pmaxu.b" => "__builtin_ia32_pmaxub", - "llvm.x86.mmx.pmins.w" => "__builtin_ia32_pminsw", - "llvm.x86.mmx.pminu.b" => "__builtin_ia32_pminub", - "llvm.x86.mmx.pmovmskb" => "__builtin_ia32_pmovmskb", - "llvm.x86.mmx.pmulh.w" => "__builtin_ia32_pmulhw", - "llvm.x86.mmx.pmulhu.w" => "__builtin_ia32_pmulhuw", - "llvm.x86.mmx.pmull.w" => "__builtin_ia32_pmullw", - "llvm.x86.mmx.pmulu.dq" => "__builtin_ia32_pmuludq", - "llvm.x86.mmx.por" => "__builtin_ia32_por", - "llvm.x86.mmx.psad.bw" => "__builtin_ia32_psadbw", - "llvm.x86.mmx.psll.d" => "__builtin_ia32_pslld", - "llvm.x86.mmx.psll.q" => "__builtin_ia32_psllq", - "llvm.x86.mmx.psll.w" => "__builtin_ia32_psllw", - "llvm.x86.mmx.pslli.d" => "__builtin_ia32_pslldi", - "llvm.x86.mmx.pslli.q" => "__builtin_ia32_psllqi", - "llvm.x86.mmx.pslli.w" => "__builtin_ia32_psllwi", - "llvm.x86.mmx.psra.d" => "__builtin_ia32_psrad", - "llvm.x86.mmx.psra.w" => "__builtin_ia32_psraw", - "llvm.x86.mmx.psrai.d" => "__builtin_ia32_psradi", - "llvm.x86.mmx.psrai.w" => "__builtin_ia32_psrawi", - "llvm.x86.mmx.psrl.d" => "__builtin_ia32_psrld", - "llvm.x86.mmx.psrl.q" => "__builtin_ia32_psrlq", - "llvm.x86.mmx.psrl.w" => "__builtin_ia32_psrlw", - "llvm.x86.mmx.psrli.d" => "__builtin_ia32_psrldi", - "llvm.x86.mmx.psrli.q" => "__builtin_ia32_psrlqi", - "llvm.x86.mmx.psrli.w" => "__builtin_ia32_psrlwi", - "llvm.x86.mmx.psub.b" => "__builtin_ia32_psubb", - "llvm.x86.mmx.psub.d" => "__builtin_ia32_psubd", - "llvm.x86.mmx.psub.q" => "__builtin_ia32_psubq", - "llvm.x86.mmx.psub.w" => "__builtin_ia32_psubw", - "llvm.x86.mmx.psubs.b" => "__builtin_ia32_psubsb", - "llvm.x86.mmx.psubs.w" => "__builtin_ia32_psubsw", - "llvm.x86.mmx.psubus.b" => "__builtin_ia32_psubusb", - "llvm.x86.mmx.psubus.w" => "__builtin_ia32_psubusw", - "llvm.x86.mmx.punpckhbw" => "__builtin_ia32_punpckhbw", - "llvm.x86.mmx.punpckhdq" => "__builtin_ia32_punpckhdq", - "llvm.x86.mmx.punpckhwd" => "__builtin_ia32_punpckhwd", - "llvm.x86.mmx.punpcklbw" => "__builtin_ia32_punpcklbw", - "llvm.x86.mmx.punpckldq" => "__builtin_ia32_punpckldq", - "llvm.x86.mmx.punpcklwd" => "__builtin_ia32_punpcklwd", - "llvm.x86.mmx.pxor" => "__builtin_ia32_pxor", "llvm.x86.monitorx" => "__builtin_ia32_monitorx", "llvm.x86.movdir64b" => "__builtin_ia32_movdir64b", "llvm.x86.mwaitx" => "__builtin_ia32_mwaitx", @@ -9193,16 +9319,10 @@ match name { "llvm.x86.sse.comile.ss" => "__builtin_ia32_comile", "llvm.x86.sse.comilt.ss" => "__builtin_ia32_comilt", "llvm.x86.sse.comineq.ss" => "__builtin_ia32_comineq", - "llvm.x86.sse.cvtpd2pi" => "__builtin_ia32_cvtpd2pi", - "llvm.x86.sse.cvtpi2pd" => "__builtin_ia32_cvtpi2pd", - "llvm.x86.sse.cvtpi2ps" => "__builtin_ia32_cvtpi2ps", - "llvm.x86.sse.cvtps2pi" => "__builtin_ia32_cvtps2pi", "llvm.x86.sse.cvtsi2ss" => "__builtin_ia32_cvtsi2ss", "llvm.x86.sse.cvtsi642ss" => "__builtin_ia32_cvtsi642ss", "llvm.x86.sse.cvtss2si" => "__builtin_ia32_cvtss2si", "llvm.x86.sse.cvtss2si64" => "__builtin_ia32_cvtss2si64", - "llvm.x86.sse.cvttpd2pi" => "__builtin_ia32_cvttpd2pi", - "llvm.x86.sse.cvttps2pi" => "__builtin_ia32_cvttps2pi", "llvm.x86.sse.cvttss2si" => "__builtin_ia32_cvttss2si", "llvm.x86.sse.cvttss2si64" => "__builtin_ia32_cvttss2si64", "llvm.x86.sse.div.ss" => "__builtin_ia32_divss", @@ -9212,7 +9332,6 @@ match name { "llvm.x86.sse.min.ss" => "__builtin_ia32_minss", "llvm.x86.sse.movmsk.ps" => "__builtin_ia32_movmskps", "llvm.x86.sse.mul.ss" => "__builtin_ia32_mulss", - "llvm.x86.sse.pshuf.w" => "__builtin_ia32_pshufw", "llvm.x86.sse.rcp.ps" => "__builtin_ia32_rcpps", "llvm.x86.sse.rcp.ss" => "__builtin_ia32_rcpss", "llvm.x86.sse.rsqrt.ps" => "__builtin_ia32_rsqrtps", @@ -9398,35 +9517,20 @@ match name { "llvm.x86.sse4a.insertqi" => "__builtin_ia32_insertqi", "llvm.x86.sse4a.movnt.sd" => "__builtin_ia32_movntsd", "llvm.x86.sse4a.movnt.ss" => "__builtin_ia32_movntss", - "llvm.x86.ssse3.pabs.b" => "__builtin_ia32_pabsb", "llvm.x86.ssse3.pabs.b.128" => "__builtin_ia32_pabsb128", - "llvm.x86.ssse3.pabs.d" => "__builtin_ia32_pabsd", "llvm.x86.ssse3.pabs.d.128" => "__builtin_ia32_pabsd128", - "llvm.x86.ssse3.pabs.w" => "__builtin_ia32_pabsw", "llvm.x86.ssse3.pabs.w.128" => "__builtin_ia32_pabsw128", - "llvm.x86.ssse3.phadd.d" => "__builtin_ia32_phaddd", "llvm.x86.ssse3.phadd.d.128" => "__builtin_ia32_phaddd128", - "llvm.x86.ssse3.phadd.sw" => "__builtin_ia32_phaddsw", "llvm.x86.ssse3.phadd.sw.128" => "__builtin_ia32_phaddsw128", - "llvm.x86.ssse3.phadd.w" => "__builtin_ia32_phaddw", "llvm.x86.ssse3.phadd.w.128" => "__builtin_ia32_phaddw128", - "llvm.x86.ssse3.phsub.d" => "__builtin_ia32_phsubd", "llvm.x86.ssse3.phsub.d.128" => "__builtin_ia32_phsubd128", - "llvm.x86.ssse3.phsub.sw" => "__builtin_ia32_phsubsw", "llvm.x86.ssse3.phsub.sw.128" => "__builtin_ia32_phsubsw128", - "llvm.x86.ssse3.phsub.w" => "__builtin_ia32_phsubw", "llvm.x86.ssse3.phsub.w.128" => "__builtin_ia32_phsubw128", - "llvm.x86.ssse3.pmadd.ub.sw" => "__builtin_ia32_pmaddubsw", "llvm.x86.ssse3.pmadd.ub.sw.128" => "__builtin_ia32_pmaddubsw128", - "llvm.x86.ssse3.pmul.hr.sw" => "__builtin_ia32_pmulhrsw", "llvm.x86.ssse3.pmul.hr.sw.128" => "__builtin_ia32_pmulhrsw128", - "llvm.x86.ssse3.pshuf.b" => "__builtin_ia32_pshufb", "llvm.x86.ssse3.pshuf.b.128" => "__builtin_ia32_pshufb128", - "llvm.x86.ssse3.psign.b" => "__builtin_ia32_psignb", "llvm.x86.ssse3.psign.b.128" => "__builtin_ia32_psignb128", - "llvm.x86.ssse3.psign.d" => "__builtin_ia32_psignd", "llvm.x86.ssse3.psign.d.128" => "__builtin_ia32_psignd128", - "llvm.x86.ssse3.psign.w" => "__builtin_ia32_psignw", "llvm.x86.ssse3.psign.w.128" => "__builtin_ia32_psignw128", "llvm.x86.sttilecfg" => "__builtin_ia32_tile_storeconfig", "llvm.x86.stui" => "__builtin_ia32_stui", diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs index 554e57250e6..0a448ded6b1 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs @@ -1,11 +1,43 @@ use std::borrow::Cow; -use gccjit::{Function, FunctionPtrType, RValue, ToRValue, UnaryOp}; +use gccjit::{CType, Context, Function, FunctionPtrType, RValue, ToRValue, UnaryOp}; use rustc_codegen_ssa::traits::BuilderMethods; use crate::builder::Builder; use crate::context::CodegenCx; +#[cfg_attr(not(feature = "master"), allow(unused_variables))] +pub fn adjust_function<'gcc>( + context: &'gcc Context<'gcc>, + func_name: &str, + func_ptr: RValue<'gcc>, + args: &[RValue<'gcc>], +) -> RValue<'gcc> { + // FIXME: we should not need this hack: this is required because both _mm_fcmadd_sch + // and _mm_mask3_fcmadd_round_sch calls llvm.x86.avx512fp16.mask.vfcmadd.csh and we + // seem to need to map this one LLVM intrinsic to 2 different GCC builtins. + #[cfg(feature = "master")] + match func_name { + "__builtin_ia32_vfcmaddcsh_mask3_round" => { + if format!("{:?}", args[3]).ends_with("255") { + return context + .get_target_builtin_function("__builtin_ia32_vfcmaddcsh_mask_round") + .get_address(None); + } + } + "__builtin_ia32_vfmaddcsh_mask3_round" => { + if format!("{:?}", args[3]).ends_with("255") { + return context + .get_target_builtin_function("__builtin_ia32_vfmaddcsh_mask_round") + .get_address(None); + } + } + _ => (), + } + + func_ptr +} + pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>( builder: &Builder<'a, 'gcc, 'tcx>, gcc_func: FunctionPtrType<'gcc>, @@ -13,6 +45,11 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>( func_name: &str, original_function_name: Option<&String>, ) -> Cow<'b, [RValue<'gcc>]> { + // TODO: this might not be a good way to workaround the missing tile builtins. + if func_name == "__builtin_trap" { + return vec![].into(); + } + // Some LLVM intrinsics do not map 1-to-1 to GCC intrinsics, so we add the missing // arguments here. if gcc_func.get_param_count() != args.len() { @@ -147,7 +184,11 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>( | "__builtin_ia32_psrav16hi_mask" | "__builtin_ia32_psrav8hi_mask" | "__builtin_ia32_permvarhi256_mask" - | "__builtin_ia32_permvarhi128_mask" => { + | "__builtin_ia32_permvarhi128_mask" + | "__builtin_ia32_maxph128_mask" + | "__builtin_ia32_maxph256_mask" + | "__builtin_ia32_minph128_mask" + | "__builtin_ia32_minph256_mask" => { let mut new_args = args.to_vec(); let arg3_type = gcc_func.get_param_type(2); let vector_type = arg3_type.dyncast_vector().expect("vector type"); @@ -182,7 +223,19 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>( | "__builtin_ia32_vplzcntd_128_mask" | "__builtin_ia32_vplzcntq_512_mask" | "__builtin_ia32_vplzcntq_256_mask" - | "__builtin_ia32_vplzcntq_128_mask" => { + | "__builtin_ia32_vplzcntq_128_mask" + | "__builtin_ia32_cvtqq2pd128_mask" + | "__builtin_ia32_cvtqq2pd256_mask" + | "__builtin_ia32_cvtqq2ps256_mask" + | "__builtin_ia32_cvtuqq2pd128_mask" + | "__builtin_ia32_cvtuqq2pd256_mask" + | "__builtin_ia32_cvtuqq2ps256_mask" + | "__builtin_ia32_vcvtw2ph128_mask" + | "__builtin_ia32_vcvtw2ph256_mask" + | "__builtin_ia32_vcvtuw2ph128_mask" + | "__builtin_ia32_vcvtuw2ph256_mask" + | "__builtin_ia32_vcvtdq2ph256_mask" + | "__builtin_ia32_vcvtudq2ph256_mask" => { let mut new_args = args.to_vec(); // Remove last arg as it doesn't seem to be used in GCC and is always false. new_args.pop(); @@ -281,7 +334,11 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>( new_args.push(last_arg); args = new_args.into(); } - "__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask" => { + "__builtin_ia32_vfmaddsubps512_mask" + | "__builtin_ia32_vfmaddsubpd512_mask" + | "__builtin_ia32_cmpsh_mask_round" + | "__builtin_ia32_vfmaddph512_mask" + | "__builtin_ia32_vfmaddsubph512_mask" => { let mut new_args = args.to_vec(); let last_arg = new_args.pop().expect("last arg"); let arg4_type = gcc_func.get_param_type(3); @@ -304,9 +361,8 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>( | "__builtin_ia32_vpermi2varpd128_mask" | "__builtin_ia32_vpmadd52huq512_mask" | "__builtin_ia32_vpmadd52luq512_mask" - | "__builtin_ia32_vpmadd52huq256_mask" - | "__builtin_ia32_vpmadd52luq256_mask" - | "__builtin_ia32_vpmadd52huq128_mask" => { + | "__builtin_ia32_vfmaddsubph128_mask" + | "__builtin_ia32_vfmaddsubph256_mask" => { let mut new_args = args.to_vec(); let arg4_type = gcc_func.get_param_type(3); let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); @@ -355,7 +411,14 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>( let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); args = vec![new_args[1], new_args[0], new_args[2], minus_one].into(); } - "__builtin_ia32_xrstor" | "__builtin_ia32_xsavec" => { + "__builtin_ia32_xrstor" + | "__builtin_ia32_xrstor64" + | "__builtin_ia32_xsavec" + | "__builtin_ia32_xsavec64" + | "__builtin_ia32_xsave" + | "__builtin_ia32_xsave64" + | "__builtin_ia32_xsaveopt" + | "__builtin_ia32_xsaveopt64" => { let new_args = args.to_vec(); let thirty_two = builder.context.new_rvalue_from_int(new_args[1].get_type(), 32); let arg2 = new_args[1] << thirty_two | new_args[2]; @@ -378,11 +441,76 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>( ); args = vec![arg.get_address(None)].into(); } + "__builtin_ia32_cvtqq2pd512_mask" + | "__builtin_ia32_cvtqq2ps512_mask" + | "__builtin_ia32_cvtuqq2pd512_mask" + | "__builtin_ia32_cvtuqq2ps512_mask" + | "__builtin_ia32_sqrtph512_mask_round" + | "__builtin_ia32_vcvtw2ph512_mask_round" + | "__builtin_ia32_vcvtuw2ph512_mask_round" + | "__builtin_ia32_vcvtdq2ph512_mask_round" + | "__builtin_ia32_vcvtudq2ph512_mask_round" + | "__builtin_ia32_vcvtqq2ph512_mask_round" + | "__builtin_ia32_vcvtuqq2ph512_mask_round" => { + let mut old_args = args.to_vec(); + let mut new_args = vec![]; + new_args.push(old_args.swap_remove(0)); + let arg2_type = gcc_func.get_param_type(1); + let vector_type = arg2_type.dyncast_vector().expect("vector type"); + let zero = builder.context.new_rvalue_zero(vector_type.get_element_type()); + let num_units = vector_type.get_num_units(); + let first_arg = + builder.context.new_rvalue_from_vector(None, arg2_type, &vec![zero; num_units]); + new_args.push(first_arg); + let arg3_type = gcc_func.get_param_type(2); + let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1); + new_args.push(minus_one); + new_args.push(old_args.swap_remove(0)); + args = new_args.into(); + } + "__builtin_ia32_addph512_mask_round" + | "__builtin_ia32_subph512_mask_round" + | "__builtin_ia32_mulph512_mask_round" + | "__builtin_ia32_divph512_mask_round" + | "__builtin_ia32_maxph512_mask_round" + | "__builtin_ia32_minph512_mask_round" => { + let mut new_args = args.to_vec(); + let last_arg = new_args.pop().expect("last arg"); + + let arg3_type = gcc_func.get_param_type(2); + let vector_type = arg3_type.dyncast_vector().expect("vector type"); + let zero = builder.context.new_rvalue_zero(vector_type.get_element_type()); + let num_units = vector_type.get_num_units(); + let first_arg = + builder.context.new_rvalue_from_vector(None, arg3_type, &vec![zero; num_units]); + new_args.push(first_arg); + + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + new_args.push(minus_one); + new_args.push(last_arg); + args = new_args.into(); + } + // NOTE: the LLVM intrinsics receive 3 floats, but the GCC builtin requires 3 vectors. + "__builtin_ia32_vfmaddsh3_mask" => { + let new_args = args.to_vec(); + let arg1_type = gcc_func.get_param_type(0); + let arg2_type = gcc_func.get_param_type(1); + let arg3_type = gcc_func.get_param_type(2); + let arg4_type = gcc_func.get_param_type(3); + let a = builder.context.new_rvalue_from_vector(None, arg1_type, &[new_args[0]; 8]); + let b = builder.context.new_rvalue_from_vector(None, arg2_type, &[new_args[1]; 8]); + let c = builder.context.new_rvalue_from_vector(None, arg3_type, &[new_args[2]; 8]); + let arg4 = builder.context.new_rvalue_from_int(arg4_type, -1); + args = vec![a, b, c, arg4, new_args[3]].into(); + } _ => (), } } else { match func_name { - "__builtin_ia32_rndscaless_mask_round" | "__builtin_ia32_rndscalesd_mask_round" => { + "__builtin_ia32_rndscaless_mask_round" + | "__builtin_ia32_rndscalesd_mask_round" + | "__builtin_ia32_reducesh_mask_round" => { let new_args = args.to_vec(); let arg3_type = gcc_func.get_param_type(2); let arg3 = builder.context.new_cast(None, new_args[4], arg3_type); @@ -390,7 +518,7 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>( let arg4 = builder.context.new_bitcast(None, new_args[2], arg4_type); args = vec![new_args[0], new_args[1], arg3, arg4, new_args[3], new_args[5]].into(); } - // NOTE: the LLVM intrinsic receives 3 floats, but the GCC builtin requires 3 vectors. + // NOTE: the LLVM intrinsics receive 3 floats, but the GCC builtin requires 3 vectors. // FIXME: the intrinsics like _mm_mask_fmadd_sd should probably directly call the GCC // intrinsic to avoid this. "__builtin_ia32_vfmaddss3_round" => { @@ -473,6 +601,52 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>( let new_args = args.to_vec(); args = vec![new_args[1], new_args[0], new_args[2]].into(); } + "__builtin_ia32_rangesd128_mask_round" + | "__builtin_ia32_rangess128_mask_round" + | "__builtin_ia32_reducesd_mask_round" + | "__builtin_ia32_reducess_mask_round" => { + let new_args = args.to_vec(); + args = vec![ + new_args[0], + new_args[1], + new_args[4], + new_args[2], + new_args[3], + new_args[5], + ] + .into(); + } + "__builtin_ia32_rndscalesh_mask_round" => { + let new_args = args.to_vec(); + args = vec![ + new_args[0], + new_args[1], + new_args[4], + new_args[2], + new_args[3], + new_args[5], + ] + .into(); + } + "fma" => { + let mut new_args = args.to_vec(); + new_args[0] = builder.context.new_cast(None, new_args[0], builder.double_type); + new_args[1] = builder.context.new_cast(None, new_args[1], builder.double_type); + new_args[2] = builder.context.new_cast(None, new_args[2], builder.double_type); + args = new_args.into(); + } + "__builtin_ia32_sqrtsh_mask_round" + | "__builtin_ia32_vcvtss2sh_mask_round" + | "__builtin_ia32_vcvtsd2sh_mask_round" + | "__builtin_ia32_vcvtsh2ss_mask_round" + | "__builtin_ia32_vcvtsh2sd_mask_round" + | "__builtin_ia32_rcpsh_mask" + | "__builtin_ia32_rsqrtsh_mask" => { + // The first two arguments are inverted, so swap them. + let mut new_args = args.to_vec(); + new_args.swap(0, 1); + args = new_args.into(); + } _ => (), } } @@ -489,7 +663,9 @@ pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>( orig_args: &[RValue<'gcc>], ) -> RValue<'gcc> { match func_name { - "__builtin_ia32_vfmaddss3_round" | "__builtin_ia32_vfmaddsd3_round" => { + "__builtin_ia32_vfmaddss3_round" + | "__builtin_ia32_vfmaddsd3_round" + | "__builtin_ia32_vfmaddsh3_mask" => { #[cfg(feature = "master")] { let zero = builder.context.new_rvalue_zero(builder.int_type); @@ -511,12 +687,11 @@ pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>( let field2 = builder.context.new_field(None, args[1].get_type(), "carryResult"); let struct_type = builder.context.new_struct_type(None, "addcarryResult", &[field1, field2]); - return_value = builder.context.new_struct_constructor( - None, - struct_type.as_type(), - None, - &[return_value, last_arg.dereference(None).to_rvalue()], - ); + return_value = + builder.context.new_struct_constructor(None, struct_type.as_type(), None, &[ + return_value, + last_arg.dereference(None).to_rvalue(), + ]); } } "__builtin_ia32_stmxcsr" => { @@ -541,12 +716,15 @@ pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>( let field2 = builder.context.new_field(None, return_value.get_type(), "success"); let struct_type = builder.context.new_struct_type(None, "rdrand_result", &[field1, field2]); - return_value = builder.context.new_struct_constructor( - None, - struct_type.as_type(), - None, - &[random_number, success_variable.to_rvalue()], - ); + return_value = + builder.context.new_struct_constructor(None, struct_type.as_type(), None, &[ + random_number, + success_variable.to_rvalue(), + ]); + } + "fma" => { + let f16_type = builder.context.new_c_type(CType::Float16); + return_value = builder.context.new_cast(None, return_value, f16_type); } _ => (), } @@ -781,7 +959,9 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function "llvm.x86.avx512.mask.cmp.b.256" => "__builtin_ia32_cmpb256_mask", "llvm.x86.avx512.mask.cmp.b.128" => "__builtin_ia32_cmpb128_mask", "llvm.x86.xrstor" => "__builtin_ia32_xrstor", + "llvm.x86.xrstor64" => "__builtin_ia32_xrstor64", "llvm.x86.xsavec" => "__builtin_ia32_xsavec", + "llvm.x86.xsavec64" => "__builtin_ia32_xsavec64", "llvm.x86.addcarry.32" => "__builtin_ia32_addcarryx_u32", "llvm.x86.subborrow.32" => "__builtin_ia32_sbb_u32", "llvm.x86.avx512.mask.compress.store.w.512" => "__builtin_ia32_compressstoreuhi512_mask", @@ -970,9 +1150,9 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function "llvm.x86.avx512.dbpsadbw.128" => "__builtin_ia32_dbpsadbw128_mask", "llvm.x86.avx512.vpmadd52h.uq.512" => "__builtin_ia32_vpmadd52huq512_mask", "llvm.x86.avx512.vpmadd52l.uq.512" => "__builtin_ia32_vpmadd52luq512_mask", - "llvm.x86.avx512.vpmadd52h.uq.256" => "__builtin_ia32_vpmadd52huq256_mask", - "llvm.x86.avx512.vpmadd52l.uq.256" => "__builtin_ia32_vpmadd52luq256_mask", - "llvm.x86.avx512.vpmadd52h.uq.128" => "__builtin_ia32_vpmadd52huq128_mask", + "llvm.x86.avx512.vpmadd52h.uq.256" => "__builtin_ia32_vpmadd52huq256", + "llvm.x86.avx512.vpmadd52l.uq.256" => "__builtin_ia32_vpmadd52luq256", + "llvm.x86.avx512.vpmadd52h.uq.128" => "__builtin_ia32_vpmadd52huq128", "llvm.x86.avx512.vpdpwssd.512" => "__builtin_ia32_vpdpwssd_v16si", "llvm.x86.avx512.vpdpwssd.256" => "__builtin_ia32_vpdpwssd_v8si", "llvm.x86.avx512.vpdpwssd.128" => "__builtin_ia32_vpdpwssd_v4si", @@ -985,6 +1165,180 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function "llvm.x86.avx512.vpdpbusds.512" => "__builtin_ia32_vpdpbusds_v16si", "llvm.x86.avx512.vpdpbusds.256" => "__builtin_ia32_vpdpbusds_v8si", "llvm.x86.avx512.vpdpbusds.128" => "__builtin_ia32_vpdpbusds_v4si", + "llvm.x86.xsave" => "__builtin_ia32_xsave", + "llvm.x86.xsave64" => "__builtin_ia32_xsave64", + "llvm.x86.xsaveopt" => "__builtin_ia32_xsaveopt", + "llvm.x86.xsaveopt64" => "__builtin_ia32_xsaveopt64", + "llvm.x86.avx512.mask.loadu.w.512" => "__builtin_ia32_loaddquhi512_mask", + "llvm.x86.avx512.mask.loadu.b.512" => "__builtin_ia32_loaddquqi512_mask", + "llvm.x86.avx512.mask.loadu.w.256" => "__builtin_ia32_loaddquhi256_mask", + "llvm.x86.avx512.mask.loadu.b.256" => "__builtin_ia32_loaddquqi256_mask", + "llvm.x86.avx512.mask.loadu.w.128" => "__builtin_ia32_loaddquhi128_mask", + "llvm.x86.avx512.mask.loadu.b.128" => "__builtin_ia32_loaddquqi128_mask", + "llvm.x86.avx512.mask.storeu.w.512" => "__builtin_ia32_storedquhi512_mask", + "llvm.x86.avx512.mask.storeu.b.512" => "__builtin_ia32_storedquqi512_mask", + "llvm.x86.avx512.mask.storeu.w.256" => "__builtin_ia32_storedquhi256_mask", + "llvm.x86.avx512.mask.storeu.b.256" => "__builtin_ia32_storedquqi256_mask", + "llvm.x86.avx512.mask.storeu.w.128" => "__builtin_ia32_storedquhi128_mask", + "llvm.x86.avx512.mask.storeu.b.128" => "__builtin_ia32_storedquqi128_mask", + "llvm.x86.avx512.mask.expand.load.w.512" => "__builtin_ia32_expandloadhi512_mask", + "llvm.x86.avx512.mask.expand.load.w.256" => "__builtin_ia32_expandloadhi256_mask", + "llvm.x86.avx512.mask.expand.load.w.128" => "__builtin_ia32_expandloadhi128_mask", + "llvm.x86.avx512.mask.expand.load.b.512" => "__builtin_ia32_expandloadqi512_mask", + "llvm.x86.avx512.mask.expand.load.b.256" => "__builtin_ia32_expandloadqi256_mask", + "llvm.x86.avx512.mask.expand.load.b.128" => "__builtin_ia32_expandloadqi128_mask", + "llvm.x86.avx512.sitofp.round.v8f64.v8i64" => "__builtin_ia32_cvtqq2pd512_mask", + "llvm.x86.avx512.sitofp.round.v2f64.v2i64" => "__builtin_ia32_cvtqq2pd128_mask", + "llvm.x86.avx512.sitofp.round.v4f64.v4i64" => "__builtin_ia32_cvtqq2pd256_mask", + "llvm.x86.avx512.sitofp.round.v8f32.v8i64" => "__builtin_ia32_cvtqq2ps512_mask", + "llvm.x86.avx512.sitofp.round.v4f32.v4i64" => "__builtin_ia32_cvtqq2ps256_mask", + "llvm.x86.avx512.uitofp.round.v8f64.v8u64" => "__builtin_ia32_cvtuqq2pd512_mask", + "llvm.x86.avx512.uitofp.round.v2f64.v2u64" => "__builtin_ia32_cvtuqq2pd128_mask", + "llvm.x86.avx512.uitofp.round.v4f64.v4u64" => "__builtin_ia32_cvtuqq2pd256_mask", + "llvm.x86.avx512.uitofp.round.v8f32.v8u64" => "__builtin_ia32_cvtuqq2ps512_mask", + "llvm.x86.avx512.uitofp.round.v4f32.v4u64" => "__builtin_ia32_cvtuqq2ps256_mask", + "llvm.x86.avx512.mask.reduce.pd.512" => "__builtin_ia32_reducepd512_mask_round", + "llvm.x86.avx512.mask.reduce.ps.512" => "__builtin_ia32_reduceps512_mask_round", + "llvm.x86.avx512.mask.reduce.sd" => "__builtin_ia32_reducesd_mask_round", + "llvm.x86.avx512.mask.reduce.ss" => "__builtin_ia32_reducess_mask_round", + "llvm.x86.avx512.mask.loadu.d.256" => "__builtin_ia32_loaddqusi256_mask", + "llvm.x86.avx512.mask.loadu.q.256" => "__builtin_ia32_loaddqudi256_mask", + "llvm.x86.avx512.mask.loadu.ps.256" => "__builtin_ia32_loadups256_mask", + "llvm.x86.avx512.mask.loadu.pd.256" => "__builtin_ia32_loadupd256_mask", + "llvm.x86.avx512.mask.loadu.d.128" => "__builtin_ia32_loaddqusi128_mask", + "llvm.x86.avx512.mask.loadu.q.128" => "__builtin_ia32_loaddqudi128_mask", + "llvm.x86.avx512.mask.loadu.ps.128" => "__builtin_ia32_loadups128_mask", + "llvm.x86.avx512.mask.loadu.pd.128" => "__builtin_ia32_loadupd128_mask", + "llvm.x86.avx512.mask.load.d.512" => "__builtin_ia32_movdqa32load512_mask", + "llvm.x86.avx512.mask.load.q.512" => "__builtin_ia32_movdqa64load512_mask", + "llvm.x86.avx512.mask.load.ps.512" => "__builtin_ia32_loadaps512_mask", + "llvm.x86.avx512.mask.load.pd.512" => "__builtin_ia32_loadapd512_mask", + "llvm.x86.avx512.mask.load.d.256" => "__builtin_ia32_movdqa32load256_mask", + "llvm.x86.avx512.mask.load.q.256" => "__builtin_ia32_movdqa64load256_mask", + "llvm.x86.avx512fp16.mask.cmp.sh" => "__builtin_ia32_cmpsh_mask_round", + "llvm.x86.avx512fp16.vcomi.sh" => "__builtin_ia32_cmpsh_mask_round", + "llvm.x86.avx512fp16.add.ph.512" => "__builtin_ia32_addph512_mask_round", + "llvm.x86.avx512fp16.sub.ph.512" => "__builtin_ia32_subph512_mask_round", + "llvm.x86.avx512fp16.mul.ph.512" => "__builtin_ia32_mulph512_mask_round", + "llvm.x86.avx512fp16.div.ph.512" => "__builtin_ia32_divph512_mask_round", + "llvm.x86.avx512fp16.mask.vfmul.cph.512" => "__builtin_ia32_vfmulcph512_mask_round", + "llvm.x86.avx512fp16.mask.vfmul.csh" => "__builtin_ia32_vfmulcsh_mask_round", + "llvm.x86.avx512fp16.mask.vfcmul.cph.512" => "__builtin_ia32_vfcmulcph512_mask_round", + "llvm.x86.avx512fp16.mask.vfcmul.csh" => "__builtin_ia32_vfcmulcsh_mask_round", + "llvm.x86.avx512fp16.mask.vfmadd.cph.512" => "__builtin_ia32_vfmaddcph512_mask3_round", + "llvm.x86.avx512fp16.maskz.vfmadd.cph.512" => "__builtin_ia32_vfmaddcph512_maskz_round", + "llvm.x86.avx512fp16.mask.vfmadd.csh" => "__builtin_ia32_vfmaddcsh_mask3_round", + "llvm.x86.avx512fp16.maskz.vfmadd.csh" => "__builtin_ia32_vfmaddcsh_maskz_round", + "llvm.x86.avx512fp16.mask.vfcmadd.cph.512" => "__builtin_ia32_vfcmaddcph512_mask3_round", + "llvm.x86.avx512fp16.maskz.vfcmadd.cph.512" => "__builtin_ia32_vfcmaddcph512_maskz_round", + "llvm.x86.avx512fp16.mask.vfcmadd.csh" => "__builtin_ia32_vfcmaddcsh_mask3_round", + "llvm.x86.avx512fp16.maskz.vfcmadd.csh" => "__builtin_ia32_vfcmaddcsh_maskz_round", + "llvm.x86.avx512fp16.vfmadd.ph.512" => "__builtin_ia32_vfmaddph512_mask", + "llvm.x86.avx512fp16.vcvtsi642sh" => "__builtin_ia32_vcvtsi2sh64_round", + "llvm.x86.avx512fp16.vcvtusi642sh" => "__builtin_ia32_vcvtusi2sh64_round", + "llvm.x86.avx512fp16.vcvtsh2si64" => "__builtin_ia32_vcvtsh2si64_round", + "llvm.x86.avx512fp16.vcvtsh2usi64" => "__builtin_ia32_vcvtsh2usi64_round", + "llvm.x86.avx512fp16.vcvttsh2si64" => "__builtin_ia32_vcvttsh2si64_round", + "llvm.x86.avx512fp16.vcvttsh2usi64" => "__builtin_ia32_vcvttsh2usi64_round", + "llvm.x86.avx512.mask.load.ps.256" => "__builtin_ia32_loadaps256_mask", + "llvm.x86.avx512.mask.load.pd.256" => "__builtin_ia32_loadapd256_mask", + "llvm.x86.avx512.mask.load.d.128" => "__builtin_ia32_movdqa32load128_mask", + "llvm.x86.avx512.mask.load.q.128" => "__builtin_ia32_movdqa64load128_mask", + "llvm.x86.avx512.mask.load.ps.128" => "__builtin_ia32_loadaps128_mask", + "llvm.x86.avx512.mask.load.pd.128" => "__builtin_ia32_loadapd128_mask", + "llvm.x86.avx512.mask.storeu.d.256" => "__builtin_ia32_storedqusi256_mask", + "llvm.x86.avx512.mask.storeu.q.256" => "__builtin_ia32_storedqudi256_mask", + "llvm.x86.avx512.mask.storeu.ps.256" => "__builtin_ia32_storeups256_mask", + "llvm.x86.avx512.mask.storeu.pd.256" => "__builtin_ia32_storeupd256_mask", + "llvm.x86.avx512.mask.storeu.d.128" => "__builtin_ia32_storedqusi128_mask", + "llvm.x86.avx512.mask.storeu.q.128" => "__builtin_ia32_storedqudi128_mask", + "llvm.x86.avx512.mask.storeu.ps.128" => "__builtin_ia32_storeups128_mask", + "llvm.x86.avx512.mask.storeu.pd.128" => "__builtin_ia32_storeupd128_mask", + "llvm.x86.avx512.mask.store.d.512" => "__builtin_ia32_movdqa32store512_mask", + "llvm.x86.avx512.mask.store.q.512" => "__builtin_ia32_movdqa64store512_mask", + "llvm.x86.avx512.mask.store.ps.512" => "__builtin_ia32_storeaps512_mask", + "llvm.x86.avx512.mask.store.pd.512" => "__builtin_ia32_storeapd512_mask", + "llvm.x86.avx512.mask.store.d.256" => "__builtin_ia32_movdqa32store256_mask", + "llvm.x86.avx512.mask.store.q.256" => "__builtin_ia32_movdqa64store256_mask", + "llvm.x86.avx512.mask.store.ps.256" => "__builtin_ia32_storeaps256_mask", + "llvm.x86.avx512.mask.store.pd.256" => "__builtin_ia32_storeapd256_mask", + "llvm.x86.avx512.mask.store.d.128" => "__builtin_ia32_movdqa32store128_mask", + "llvm.x86.avx512.mask.store.q.128" => "__builtin_ia32_movdqa64store128_mask", + "llvm.x86.avx512.mask.store.ps.128" => "__builtin_ia32_storeaps128_mask", + "llvm.x86.avx512.mask.store.pd.128" => "__builtin_ia32_storeapd128_mask", + "llvm.x86.avx512fp16.vfmadd.f16" => "__builtin_ia32_vfmaddsh3_mask", + "llvm.x86.avx512fp16.vfmaddsub.ph.128" => "__builtin_ia32_vfmaddsubph128_mask", + "llvm.x86.avx512fp16.vfmaddsub.ph.256" => "__builtin_ia32_vfmaddsubph256_mask", + "llvm.x86.avx512fp16.vfmaddsub.ph.512" => "__builtin_ia32_vfmaddsubph512_mask", + "llvm.x86.avx512fp16.sqrt.ph.512" => "__builtin_ia32_sqrtph512_mask_round", + "llvm.x86.avx512fp16.mask.sqrt.sh" => "__builtin_ia32_sqrtsh_mask_round", + "llvm.x86.avx512fp16.max.ph.128" => "__builtin_ia32_maxph128_mask", + "llvm.x86.avx512fp16.max.ph.256" => "__builtin_ia32_maxph256_mask", + "llvm.x86.avx512fp16.max.ph.512" => "__builtin_ia32_maxph512_mask_round", + "llvm.x86.avx512fp16.min.ph.128" => "__builtin_ia32_minph128_mask", + "llvm.x86.avx512fp16.min.ph.256" => "__builtin_ia32_minph256_mask", + "llvm.x86.avx512fp16.min.ph.512" => "__builtin_ia32_minph512_mask_round", + "llvm.x86.avx512fp16.mask.getexp.sh" => "__builtin_ia32_getexpsh_mask_round", + "llvm.x86.avx512fp16.mask.rndscale.ph.128" => "__builtin_ia32_rndscaleph128_mask", + "llvm.x86.avx512fp16.mask.rndscale.ph.256" => "__builtin_ia32_rndscaleph256_mask", + "llvm.x86.avx512fp16.mask.rndscale.ph.512" => "__builtin_ia32_rndscaleph512_mask_round", + "llvm.x86.avx512fp16.mask.scalef.ph.512" => "__builtin_ia32_scalefph512_mask_round", + "llvm.x86.avx512fp16.mask.reduce.ph.512" => "__builtin_ia32_reduceph512_mask_round", + "llvm.x86.avx512fp16.mask.reduce.sh" => "__builtin_ia32_reducesh_mask_round", + "llvm.x86.avx512.sitofp.round.v8f16.v8i16" => "__builtin_ia32_vcvtw2ph128_mask", + "llvm.x86.avx512.sitofp.round.v16f16.v16i16" => "__builtin_ia32_vcvtw2ph256_mask", + "llvm.x86.avx512.sitofp.round.v32f16.v32i16" => "__builtin_ia32_vcvtw2ph512_mask_round", + "llvm.x86.avx512.uitofp.round.v8f16.v8u16" => "__builtin_ia32_vcvtuw2ph128_mask", + "llvm.x86.avx512.uitofp.round.v16f16.v16u16" => "__builtin_ia32_vcvtuw2ph256_mask", + "llvm.x86.avx512.uitofp.round.v32f16.v32u16" => "__builtin_ia32_vcvtuw2ph512_mask_round", + "llvm.x86.avx512.sitofp.round.v8f16.v8i32" => "__builtin_ia32_vcvtdq2ph256_mask", + "llvm.x86.avx512.sitofp.round.v16f16.v16i32" => "__builtin_ia32_vcvtdq2ph512_mask_round", + "llvm.x86.avx512fp16.vcvtsi2sh" => "__builtin_ia32_vcvtsi2sh32_round", + "llvm.x86.avx512.uitofp.round.v8f16.v8u32" => "__builtin_ia32_vcvtudq2ph256_mask", + "llvm.x86.avx512.uitofp.round.v16f16.v16u32" => "__builtin_ia32_vcvtudq2ph512_mask_round", + "llvm.x86.avx512fp16.vcvtusi2sh" => "__builtin_ia32_vcvtusi2sh32_round", + "llvm.x86.avx512.sitofp.round.v8f16.v8i64" => "__builtin_ia32_vcvtqq2ph512_mask_round", + "llvm.x86.avx512.uitofp.round.v8f16.v8u64" => "__builtin_ia32_vcvtuqq2ph512_mask_round", + "llvm.x86.avx512fp16.mask.vcvtps2phx.512" => "__builtin_ia32_vcvtps2phx512_mask_round", + "llvm.x86.avx512fp16.mask.vcvtpd2ph.512" => "__builtin_ia32_vcvtpd2ph512_mask_round", + "llvm.x86.avx512fp16.mask.vcvtph2uw.512" => "__builtin_ia32_vcvtph2uw512_mask_round", + "llvm.x86.avx512fp16.mask.vcvttph2w.512" => "__builtin_ia32_vcvttph2w512_mask_round", + "llvm.x86.avx512fp16.mask.vcvttph2uw.512" => "__builtin_ia32_vcvttph2uw512_mask_round", + "llvm.x86.avx512fp16.mask.vcvtph2dq.512" => "__builtin_ia32_vcvtph2dq512_mask_round", + "llvm.x86.avx512fp16.vcvtsh2si32" => "__builtin_ia32_vcvtsh2si32_round", + "llvm.x86.avx512fp16.mask.vcvtph2udq.512" => "__builtin_ia32_vcvtph2udq512_mask_round", + "llvm.x86.avx512fp16.vcvtsh2usi32" => "__builtin_ia32_vcvtsh2usi32_round", + "llvm.x86.avx512fp16.mask.vcvttph2dq.512" => "__builtin_ia32_vcvttph2dq512_mask_round", + "llvm.x86.avx512fp16.vcvttsh2si32" => "__builtin_ia32_vcvttsh2si32_round", + "llvm.x86.avx512fp16.mask.vcvttph2udq.512" => "__builtin_ia32_vcvttph2udq512_mask_round", + "llvm.x86.avx512fp16.vcvttsh2usi32" => "__builtin_ia32_vcvttsh2usi32_round", + "llvm.x86.avx512fp16.mask.vcvtph2qq.512" => "__builtin_ia32_vcvtph2qq512_mask_round", + "llvm.x86.avx512fp16.mask.vcvtph2uqq.512" => "__builtin_ia32_vcvtph2uqq512_mask_round", + "llvm.x86.avx512fp16.mask.vcvttph2qq.512" => "__builtin_ia32_vcvttph2qq512_mask_round", + "llvm.x86.avx512fp16.mask.vcvttph2uqq.512" => "__builtin_ia32_vcvttph2uqq512_mask_round", + "llvm.x86.avx512fp16.mask.vcvtph2psx.512" => "__builtin_ia32_vcvtph2psx512_mask_round", + "llvm.x86.avx512fp16.mask.vcvtph2pd.512" => "__builtin_ia32_vcvtph2pd512_mask_round", + "llvm.x86.avx512fp16.mask.vfcmadd.cph.256" => "__builtin_ia32_vfcmaddcph256_mask3", + "llvm.x86.avx512fp16.mask.vfmadd.cph.256" => "__builtin_ia32_vfmaddcph256_mask3", + "llvm.x86.avx512fp16.mask.vfcmadd.cph.128" => "__builtin_ia32_vfcmaddcph128_mask3", + "llvm.x86.avx512fp16.mask.vfmadd.cph.128" => "__builtin_ia32_vfmaddcph128_mask3", + + // TODO: support the tile builtins: + "llvm.x86.ldtilecfg" => "__builtin_trap", + "llvm.x86.sttilecfg" => "__builtin_trap", + "llvm.x86.tileloadd64" => "__builtin_trap", + "llvm.x86.tilerelease" => "__builtin_trap", + "llvm.x86.tilestored64" => "__builtin_trap", + "llvm.x86.tileloaddt164" => "__builtin_trap", + "llvm.x86.tilezero" => "__builtin_trap", + "llvm.x86.tdpbf16ps" => "__builtin_trap", + "llvm.x86.tdpbssd" => "__builtin_trap", + "llvm.x86.tdpbsud" => "__builtin_trap", + "llvm.x86.tdpbusd" => "__builtin_trap", + "llvm.x86.tdpbuud" => "__builtin_trap", + "llvm.x86.tdpfp16ps" => "__builtin_trap", + "llvm.x86.tcmmimfp16ps" => "__builtin_trap", + "llvm.x86.tcmmrlfp16ps" => "__builtin_trap", // NOTE: this file is generated by https://github.com/GuillaumeGomez/llvmint/blob/master/generate_list.py _ => include!("archs.rs"), diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index 4fd033255fe..b0298a35cb0 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -7,6 +7,7 @@ use std::iter; #[cfg(feature = "master")] use gccjit::FunctionType; use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp}; +use rustc_codegen_ssa::MemFlags; use rustc_codegen_ssa::base::wants_msvc_seh; use rustc_codegen_ssa::common::IntPredicate; use rustc_codegen_ssa::errors::InvalidMonomorphization; @@ -17,18 +18,17 @@ use rustc_codegen_ssa::traits::{ }; #[cfg(feature = "master")] use rustc_codegen_ssa::traits::{BaseTypeCodegenMethods, MiscCodegenMethods}; -use rustc_codegen_ssa::MemFlags; use rustc_middle::bug; use rustc_middle::ty::layout::LayoutOf; #[cfg(feature = "master")] use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; use rustc_middle::ty::{self, Instance, Ty}; -use rustc_span::{sym, Span, Symbol}; -use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode}; +use rustc_span::{Span, Symbol, sym}; use rustc_target::abi::HasDataLayout; +use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode}; +use rustc_target::spec::PanicStrategy; #[cfg(feature = "master")] use rustc_target::spec::abi::Abi; -use rustc_target::spec::PanicStrategy; #[cfg(feature = "master")] use crate::abi::FnAbiGccExt; @@ -66,6 +66,9 @@ fn get_simple_intrinsic<'gcc, 'tcx>( sym::log2f64 => "log2", sym::fmaf32 => "fmaf", sym::fmaf64 => "fma", + // FIXME: calling `fma` from libc without FMA target feature uses expensive sofware emulation + sym::fmuladdf32 => "fmaf", // TODO: use gcc intrinsic analogous to llvm.fmuladd.f32 + sym::fmuladdf64 => "fma", // TODO: use gcc intrinsic analogous to llvm.fmuladd.f64 sym::fabsf32 => "fabsf", sym::fabsf64 => "fabs", sym::minnumf32 => "fminf", @@ -127,20 +130,13 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc // https://github.com/rust-lang/rust-clippy/issues/12497 // and leave `else if use_integer_compare` to be placed "as is". #[allow(clippy::suspicious_else_formatting)] - let llval = match name { + let value = match name { _ if simple.is_some() => { - // FIXME(antoyo): remove this cast when the API supports function. - let func = unsafe { - std::mem::transmute::<Function<'gcc>, RValue<'gcc>>(simple.expect("simple")) - }; - self.call( - self.type_void(), - None, - None, + let func = simple.expect("simple function"); + self.cx.context.new_call( + self.location, func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), - None, - None, ) } sym::likely => self.expect(args[0].immediate(), true), @@ -298,13 +294,13 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc } sym::raw_eq => { - use rustc_target::abi::Abi::*; + use rustc_abi::BackendRepr::*; let tp_ty = fn_args.type_at(0); let layout = self.layout_of(tp_ty).layout; - let _use_integer_compare = match layout.abi() { + let _use_integer_compare = match layout.backend_repr() { Scalar(_) | ScalarPair(_, _) => true, Uninhabited | Vector { .. } => false, - Aggregate { .. } => { + Memory { .. } => { // For rusty ABIs, small aggregates are actually passed // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), // so we re-use that same threshold here. @@ -383,7 +379,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc _ if name_str.starts_with("simd_") => { match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { - Ok(llval) => llval, + Ok(value) => value, Err(()) => return Ok(()), } } @@ -396,9 +392,9 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc if let PassMode::Cast { cast: ref ty, .. } = fn_abi.ret.mode { let ptr_llty = self.type_ptr_to(ty.gcc_type(self)); let ptr = self.pointercast(result.val.llval, ptr_llty); - self.store(llval, ptr, result.val.align); + self.store(value, ptr, result.val.align); } else { - OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) + OperandRef::from_immediate_or_packed_pair(self, value, result.layout) .val .store(self, result); } diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs index 28f6a0821fb..43dbfafa871 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs @@ -16,7 +16,7 @@ use rustc_hir as hir; use rustc_middle::mir::BinOp; use rustc_middle::ty::layout::HasTyCtxt; use rustc_middle::ty::{self, Ty}; -use rustc_span::{sym, Span, Symbol}; +use rustc_span::{Span, Symbol, sym}; use rustc_target::abi::{Align, Size}; use crate::builder::Builder; @@ -60,10 +60,11 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let arg_tys = sig.inputs(); if name == sym::simd_select_bitmask { - require_simd!( - arg_tys[1], - InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] } - ); + require_simd!(arg_tys[1], InvalidMonomorphization::SimdArgument { + span, + name, + ty: arg_tys[1] + }); let (len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); let expected_int_bits = (len.max(8) - 1).next_power_of_two(); @@ -75,8 +76,10 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(), ty::Array(elem, len) if matches!(*elem.kind(), ty::Uint(ty::UintTy::U8)) - && len.try_eval_target_usize(bx.tcx, ty::ParamEnv::reveal_all()) - == Some(expected_bytes) => + && len + .try_to_target_usize(bx.tcx) + .expect("expected monomorphic const in codegen") + == expected_bytes => { let place = PlaceRef::alloca(bx, args[0].layout); args[0].val.store(bx, place); @@ -135,17 +138,14 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx()); - require!( - in_len == out_len, - InvalidMonomorphization::ReturnLengthInputType { - span, - name, - in_len, - in_ty, - ret_ty, - out_len - } - ); + require!(in_len == out_len, InvalidMonomorphization::ReturnLengthInputType { + span, + name, + in_len, + in_ty, + ret_ty, + out_len + }); require!( bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer, InvalidMonomorphization::ReturnIntegerType { span, name, ret_ty, out_ty } @@ -200,7 +200,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( bx.context.new_bitcast(None, shuffled, v_type) }; - if name == sym::simd_bswap || name == sym::simd_bitreverse { + if matches!(name, sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctpop) { require!( bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer, InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem } @@ -211,6 +211,22 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( return Ok(simd_bswap(bx, args[0].immediate())); } + let simd_ctpop = |bx: &mut Builder<'a, 'gcc, 'tcx>, vector: RValue<'gcc>| -> RValue<'gcc> { + let mut vector_elements = vec![]; + let elem_ty = bx.element_type(llret_ty); + for i in 0..in_len { + let index = bx.context.new_rvalue_from_long(bx.ulong_type, i as i64); + let element = bx.extract_element(vector, index).to_rvalue(); + let result = bx.context.new_cast(None, bx.pop_count(element), elem_ty); + vector_elements.push(result); + } + bx.context.new_rvalue_from_vector(None, llret_ty, &vector_elements) + }; + + if name == sym::simd_ctpop { + return Ok(simd_ctpop(bx, args[0].immediate())); + } + // We use a different algorithm from non-vector bitreverse to take advantage of most // processors' vector shuffle units. It works like this: // 1. Generate pre-reversed low and high nibbles as a vector. @@ -251,17 +267,23 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let lo_nibble = bx.context.new_rvalue_from_vector(None, long_byte_vector_type, &lo_nibble_elements); - let mask = bx.context.new_rvalue_from_vector( - None, - long_byte_vector_type, - &vec![bx.context.new_rvalue_from_int(bx.u8_type, 0x0f); byte_vector_type_size as _], - ); - - let four_vec = bx.context.new_rvalue_from_vector( - None, - long_byte_vector_type, - &vec![bx.context.new_rvalue_from_int(bx.u8_type, 4); byte_vector_type_size as _], - ); + let mask = bx.context.new_rvalue_from_vector(None, long_byte_vector_type, &vec![ + bx.context + .new_rvalue_from_int( + bx.u8_type, 0x0f + ); + byte_vector_type_size + as _ + ]); + + let four_vec = bx.context.new_rvalue_from_vector(None, long_byte_vector_type, &vec![ + bx.context + .new_rvalue_from_int( + bx.u8_type, 4 + ); + byte_vector_type_size + as _ + ]); // Step 2: Byte-swap the input. let swapped = simd_bswap(bx, args[0].immediate()); @@ -364,14 +386,21 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx()); - require!( - out_len == n, - InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len } - ); - require!( - in_elem == out_ty, - InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty } - ); + require!(out_len == n, InvalidMonomorphization::ReturnLength { + span, + name, + in_len: n, + ret_ty, + out_len + }); + require!(in_elem == out_ty, InvalidMonomorphization::ReturnElement { + span, + name, + in_elem, + in_ty, + ret_ty, + out_ty + }); let vector = args[2].immediate(); @@ -380,16 +409,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( #[cfg(feature = "master")] if name == sym::simd_insert { - require!( - in_elem == arg_tys[2], - InvalidMonomorphization::InsertedType { - span, - name, - in_elem, - in_ty, - out_ty: arg_tys[2] - } - ); + require!(in_elem == arg_tys[2], InvalidMonomorphization::InsertedType { + span, + name, + in_elem, + in_ty, + out_ty: arg_tys[2] + }); let vector = args[0].immediate(); let index = args[1].immediate(); let value = args[2].immediate(); @@ -403,10 +429,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( #[cfg(feature = "master")] if name == sym::simd_extract { - require!( - ret_ty == in_elem, - InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty } - ); + require!(ret_ty == in_elem, InvalidMonomorphization::ReturnType { + span, + name, + in_elem, + in_ty, + ret_ty + }); let vector = args[0].immediate(); return Ok(bx.context.new_vector_access(None, vector, args[1].immediate()).to_rvalue()); } @@ -414,15 +443,18 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( if name == sym::simd_select { let m_elem_ty = in_elem; let m_len = in_len; - require_simd!( - arg_tys[1], - InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] } - ); + require_simd!(arg_tys[1], InvalidMonomorphization::SimdArgument { + span, + name, + ty: arg_tys[1] + }); let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); - require!( - m_len == v_len, - InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len } - ); + require!(m_len == v_len, InvalidMonomorphization::MismatchedLengths { + span, + name, + m_len, + v_len + }); match *m_elem_ty.kind() { ty::Int(_) => {} _ => return_error!(InvalidMonomorphization::MaskType { span, name, ty: m_elem_ty }), @@ -434,27 +466,25 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); - require!( - in_len == out_len, - InvalidMonomorphization::ReturnLengthInputType { - span, - name, - in_len, - in_ty, - ret_ty, - out_len - } - ); + require!(in_len == out_len, InvalidMonomorphization::ReturnLengthInputType { + span, + name, + in_len, + in_ty, + ret_ty, + out_len + }); match *in_elem.kind() { ty::RawPtr(p_ty, _) => { let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| { bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty) }); - require!( - metadata.is_unit(), - InvalidMonomorphization::CastFatPointer { span, name, ty: in_elem } - ); + require!(metadata.is_unit(), InvalidMonomorphization::CastWidePointer { + span, + name, + ty: in_elem + }); } _ => { return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem }) @@ -465,10 +495,11 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| { bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty) }); - require!( - metadata.is_unit(), - InvalidMonomorphization::CastFatPointer { span, name, ty: out_elem } - ); + require!(metadata.is_unit(), InvalidMonomorphization::CastWidePointer { + span, + name, + ty: out_elem + }); } _ => { return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem }) @@ -491,17 +522,14 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); - require!( - in_len == out_len, - InvalidMonomorphization::ReturnLengthInputType { - span, - name, - in_len, - in_ty, - ret_ty, - out_len - } - ); + require!(in_len == out_len, InvalidMonomorphization::ReturnLengthInputType { + span, + name, + in_len, + in_ty, + ret_ty, + out_len + }); match *in_elem.kind() { ty::RawPtr(_, _) => {} @@ -530,17 +558,14 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); - require!( - in_len == out_len, - InvalidMonomorphization::ReturnLengthInputType { - span, - name, - in_len, - in_ty, - ret_ty, - out_len - } - ); + require!(in_len == out_len, InvalidMonomorphization::ReturnLengthInputType { + span, + name, + in_len, + in_ty, + ret_ty, + out_len + }); match *in_elem.kind() { ty::Uint(ty::UintTy::Usize) => {} @@ -569,17 +594,14 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( if name == sym::simd_cast || name == sym::simd_as { require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); - require!( - in_len == out_len, - InvalidMonomorphization::ReturnLengthInputType { - span, - name, - in_len, - in_ty, - ret_ty, - out_len - } - ); + require!(in_len == out_len, InvalidMonomorphization::ReturnLengthInputType { + span, + name, + in_len, + in_ty, + ret_ty, + out_len + }); // casting cares about nominal type, not just structural type if in_elem == out_elem { return Ok(args[0].immediate()); @@ -605,17 +627,14 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( match (in_style, out_style) { (Style::Unsupported, Style::Unsupported) => { - require!( - false, - InvalidMonomorphization::UnsupportedCast { - span, - name, - in_ty, - in_elem, - ret_ty, - out_elem - } - ); + require!(false, InvalidMonomorphization::UnsupportedCast { + span, + name, + in_ty, + in_elem, + ret_ty, + out_elem + }); } _ => return Ok(bx.context.convert_vector(None, args[0].immediate(), llret_ty)), } @@ -679,8 +698,10 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( } ty::Array(elem, len) if matches!(*elem.kind(), ty::Uint(ty::UintTy::U8)) - && len.try_eval_target_usize(bx.tcx, ty::ParamEnv::reveal_all()) - == Some(expected_bytes) => + && len + .try_to_target_usize(bx.tcx) + .expect("expected monomorphic const in codegen") + == expected_bytes => { // Zero-extend iN to the array length: let ze = bx.zext(result, bx.type_ix(expected_bytes * 8)); @@ -717,11 +738,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( return Err(()); }}; } - let (elem_ty_str, elem_ty) = if let ty::Float(ref f) = *in_elem.kind() { + let (elem_ty_str, elem_ty, cast_type) = if let ty::Float(ref f) = *in_elem.kind() { let elem_ty = bx.cx.type_float_from_ty(*f); match f.bit_width() { - 32 => ("f", elem_ty), - 64 => ("", elem_ty), + 16 => ("", elem_ty, Some(bx.cx.double_type)), + 32 => ("f", elem_ty, None), + 64 => ("", elem_ty, None), _ => { return_error!(InvalidMonomorphization::FloatingPointVector { span, @@ -757,10 +779,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( _ => return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }), }; let builtin_name = format!("{}{}", intr_name, elem_ty_str); - let funcs = bx.cx.functions.borrow(); - let function = funcs - .get(&builtin_name) - .unwrap_or_else(|| panic!("unable to find builtin function {}", builtin_name)); + let function = bx.context.get_builtin_function(builtin_name); // TODO(antoyo): add platform-specific behavior here for architectures that have these // intrinsics as instructions (for instance, gpus) @@ -768,17 +787,28 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( for i in 0..in_len { let index = bx.context.new_rvalue_from_long(bx.ulong_type, i as i64); // we have to treat fpowi specially, since fpowi's second argument is always an i32 - let arguments = if name == sym::simd_fpowi { - vec![ + let mut arguments = vec![]; + if name == sym::simd_fpowi { + arguments = vec![ bx.extract_element(args[0].immediate(), index).to_rvalue(), args[1].immediate(), - ] + ]; } else { - args.iter() - .map(|arg| bx.extract_element(arg.immediate(), index).to_rvalue()) - .collect() + for arg in args { + let mut element = bx.extract_element(arg.immediate(), index).to_rvalue(); + // FIXME: it would probably be better to not have casts here and use the proper + // instructions. + if let Some(typ) = cast_type { + element = bx.context.new_cast(None, element, typ); + } + arguments.push(element); + } }; - vector_elements.push(bx.context.new_call(None, *function, &arguments)); + let mut result = bx.context.new_call(None, function, &arguments); + if cast_type.is_some() { + result = bx.context.new_cast(None, result, elem_ty); + } + vector_elements.push(result); } let c = bx.context.new_rvalue_from_vector(None, vec_ty, &vector_elements); Ok(c) @@ -880,47 +910,45 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // All types must be simd vector types require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty }); - require_simd!( - arg_tys[1], - InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] } - ); - require_simd!( - arg_tys[2], - InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] } - ); + require_simd!(arg_tys[1], InvalidMonomorphization::SimdSecond { + span, + name, + ty: arg_tys[1] + }); + require_simd!(arg_tys[2], InvalidMonomorphization::SimdThird { + span, + name, + ty: arg_tys[2] + }); require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); // Of the same length: let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx()); - require!( - in_len == out_len, - InvalidMonomorphization::SecondArgumentLength { - span, - name, - in_len, - in_ty, - arg_ty: arg_tys[1], - out_len - } - ); - require!( - in_len == out_len2, - InvalidMonomorphization::ThirdArgumentLength { - span, - name, - in_len, - in_ty, - arg_ty: arg_tys[2], - out_len: out_len2 - } - ); + require!(in_len == out_len, InvalidMonomorphization::SecondArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[1], + out_len + }); + require!(in_len == out_len2, InvalidMonomorphization::ThirdArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[2], + out_len: out_len2 + }); // The return type must match the first argument type - require!( - ret_ty == in_ty, - InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty } - ); + require!(ret_ty == in_ty, InvalidMonomorphization::ExpectedReturnType { + span, + name, + in_ty, + ret_ty + }); // This counts how many pointers fn ptr_count(t: Ty<'_>) -> usize { @@ -947,18 +975,15 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( (ptr_count(element_ty1), non_ptr(element_ty1)) } _ => { - require!( - false, - InvalidMonomorphization::ExpectedElementType { - span, - name, - expected_element: element_ty1, - second_arg: arg_tys[1], - in_elem, - in_ty, - mutability: ExpectedPointerMutability::Not, - } - ); + require!(false, InvalidMonomorphization::ExpectedElementType { + span, + name, + expected_element: element_ty1, + second_arg: arg_tys[1], + in_elem, + in_ty, + mutability: ExpectedPointerMutability::Not, + }); unreachable!(); } }; @@ -971,15 +996,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( match *element_ty2.kind() { ty::Int(_) => (), _ => { - require!( - false, - InvalidMonomorphization::ThirdArgElementType { - span, - name, - expected_element: element_ty2, - third_arg: arg_tys[2] - } - ); + require!(false, InvalidMonomorphization::ThirdArgElementType { + span, + name, + expected_element: element_ty2, + third_arg: arg_tys[2] + }); } } @@ -1003,40 +1025,36 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // All types must be simd vector types require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty }); - require_simd!( - arg_tys[1], - InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] } - ); - require_simd!( - arg_tys[2], - InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] } - ); + require_simd!(arg_tys[1], InvalidMonomorphization::SimdSecond { + span, + name, + ty: arg_tys[1] + }); + require_simd!(arg_tys[2], InvalidMonomorphization::SimdThird { + span, + name, + ty: arg_tys[2] + }); // Of the same length: let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx()); let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx()); - require!( - in_len == element_len1, - InvalidMonomorphization::SecondArgumentLength { - span, - name, - in_len, - in_ty, - arg_ty: arg_tys[1], - out_len: element_len1 - } - ); - require!( - in_len == element_len2, - InvalidMonomorphization::ThirdArgumentLength { - span, - name, - in_len, - in_ty, - arg_ty: arg_tys[2], - out_len: element_len2 - } - ); + require!(in_len == element_len1, InvalidMonomorphization::SecondArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[1], + out_len: element_len1 + }); + require!(in_len == element_len2, InvalidMonomorphization::ThirdArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[2], + out_len: element_len2 + }); // This counts how many pointers fn ptr_count(t: Ty<'_>) -> usize { @@ -1064,18 +1082,15 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( (ptr_count(element_ty1), non_ptr(element_ty1)) } _ => { - require!( - false, - InvalidMonomorphization::ExpectedElementType { - span, - name, - expected_element: element_ty1, - second_arg: arg_tys[1], - in_elem, - in_ty, - mutability: ExpectedPointerMutability::Mut, - } - ); + require!(false, InvalidMonomorphization::ExpectedElementType { + span, + name, + expected_element: element_ty1, + second_arg: arg_tys[1], + in_elem, + in_ty, + mutability: ExpectedPointerMutability::Mut, + }); unreachable!(); } }; @@ -1087,15 +1102,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( match *element_ty2.kind() { ty::Int(_) => (), _ => { - require!( - false, - InvalidMonomorphization::ThirdArgElementType { - span, - name, - expected_element: element_ty2, - third_arg: arg_tys[2] - } - ); + require!(false, InvalidMonomorphization::ThirdArgElementType { + span, + name, + expected_element: element_ty2, + third_arg: arg_tys[2] + }); } } @@ -1262,10 +1274,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( ($name:ident : $vec_op:expr, $float_reduce:ident, $ordered:expr, $op:ident, $identity:expr) => { if name == sym::$name { - require!( - ret_ty == in_elem, - InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty } - ); + require!(ret_ty == in_elem, InvalidMonomorphization::ReturnType { + span, + name, + in_elem, + in_ty, + ret_ty + }); return match *in_elem.kind() { ty::Int(_) | ty::Uint(_) => { let r = bx.vector_reduce_op(args[0].immediate(), $vec_op); @@ -1331,10 +1346,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( macro_rules! minmax_red { ($name:ident: $int_red:ident, $float_red:ident) => { if name == sym::$name { - require!( - ret_ty == in_elem, - InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty } - ); + require!(ret_ty == in_elem, InvalidMonomorphization::ReturnType { + span, + name, + in_elem, + in_ty, + ret_ty + }); return match *in_elem.kind() { ty::Int(_) | ty::Uint(_) => Ok(bx.$int_red(args[0].immediate())), ty::Float(_) => Ok(bx.$float_red(args[0].immediate())), @@ -1358,10 +1376,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( ($name:ident : $op:expr, $boolean:expr) => { if name == sym::$name { let input = if !$boolean { - require!( - ret_ty == in_elem, - InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty } - ); + require!(ret_ty == in_elem, InvalidMonomorphization::ReturnType { + span, + name, + in_elem, + in_ty, + ret_ty + }); args[0].immediate() } else { match *in_elem.kind() { diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs index 4de671ac4a0..f70dc94b267 100644 --- a/compiler/rustc_codegen_gcc/src/lib.rs +++ b/compiler/rustc_codegen_gcc/src/lib.rs @@ -32,6 +32,7 @@ extern crate tempfile; extern crate tracing; // The rustc crates we need +extern crate rustc_abi; extern crate rustc_apfloat; extern crate rustc_ast; extern crate rustc_attr; @@ -107,10 +108,10 @@ use rustc_metadata::EncodedMetadata; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; use rustc_middle::ty::TyCtxt; use rustc_middle::util::Providers; -use rustc_session::config::{Lto, OptLevel, OutputFilenames}; use rustc_session::Session; -use rustc_span::fatal_error::FatalError; +use rustc_session::config::{Lto, OptLevel, OutputFilenames}; use rustc_span::Symbol; +use rustc_span::fatal_error::FatalError; use tempfile::TempDir; use crate::back::lto::ModuleBuffer; @@ -363,7 +364,7 @@ impl Deref for SyncContext { unsafe impl Send for SyncContext {} // FIXME(antoyo): that shouldn't be Sync. Parallel compilation is currently disabled with "-Zno-parallel-llvm". -// TODO: disable it here by returing false in CodegenBackend::supports_parallel(). +// TODO: disable it here by returning false in CodegenBackend::supports_parallel(). unsafe impl Sync for SyncContext {} impl WriteBackendMethods for GccCodegenBackend { @@ -490,8 +491,9 @@ pub fn target_features( ) -> Vec<Symbol> { // TODO(antoyo): use global_gcc_features. sess.target - .supported_target_features() + .rust_target_features() .iter() + .filter(|(_, gate, _)| gate.is_supported()) .filter_map(|&(feature, gate, _)| { if sess.is_nightly_build() || allow_unstable || gate.is_stable() { Some(feature) diff --git a/compiler/rustc_codegen_gcc/src/mono_item.rs b/compiler/rustc_codegen_gcc/src/mono_item.rs index 8a8b748750c..b7b282bf2a6 100644 --- a/compiler/rustc_codegen_gcc/src/mono_item.rs +++ b/compiler/rustc_codegen_gcc/src/mono_item.rs @@ -37,7 +37,7 @@ impl<'gcc, 'tcx> PreDefineCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); let global = self.define_global(symbol_name, gcc_type, is_tls, attrs.link_section); #[cfg(feature = "master")] - global.add_string_attribute(VarAttribute::Visibility(base::visibility_to_gcc(visibility))); + global.add_attribute(VarAttribute::Visibility(base::visibility_to_gcc(visibility))); // TODO(antoyo): set linkage. self.instances.borrow_mut().insert(instance, global); diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs index cb45bbde2c2..0efdf36da48 100644 --- a/compiler/rustc_codegen_gcc/src/type_of.rs +++ b/compiler/rustc_codegen_gcc/src/type_of.rs @@ -1,6 +1,9 @@ use std::fmt::Write; use gccjit::{Struct, Type}; +use rustc_abi as abi; +use rustc_abi::Primitive::*; +use rustc_abi::{BackendRepr, FieldsShape, Integer, PointeeInfo, Size, Variants}; use rustc_codegen_ssa::traits::{ BaseTypeCodegenMethods, DerivedTypeCodegenMethods, LayoutTypeCodegenMethods, }; @@ -8,11 +11,8 @@ use rustc_middle::bug; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::print::with_no_trimmed_paths; use rustc_middle::ty::{self, CoroutineArgsExt, Ty, TypeVisitableExt}; +use rustc_target::abi::TyAbiInterface; use rustc_target::abi::call::{CastTarget, FnAbi, Reg}; -use rustc_target::abi::{ - self, Abi, FieldsShape, Float, Int, Integer, PointeeInfo, Pointer, Size, TyAbiInterface, - Variants, -}; use crate::abi::{FnAbiGcc, FnAbiGccExt, GccType}; use crate::context::CodegenCx; @@ -60,9 +60,9 @@ fn uncached_gcc_type<'gcc, 'tcx>( layout: TyAndLayout<'tcx>, defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>, ) -> Type<'gcc> { - match layout.abi { - Abi::Scalar(_) => bug!("handled elsewhere"), - Abi::Vector { ref element, count } => { + match layout.backend_repr { + BackendRepr::Scalar(_) => bug!("handled elsewhere"), + BackendRepr::Vector { ref element, count } => { let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO); let element = // NOTE: gcc doesn't allow pointer types in vectors. @@ -74,7 +74,7 @@ fn uncached_gcc_type<'gcc, 'tcx>( }; return cx.context.new_vector_type(element, count); } - Abi::ScalarPair(..) => { + BackendRepr::ScalarPair(..) => { return cx.type_struct( &[ layout.scalar_pair_element_gcc_type(cx, 0), @@ -83,7 +83,7 @@ fn uncached_gcc_type<'gcc, 'tcx>( false, ); } - Abi::Uninhabited | Abi::Aggregate { .. } => {} + BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {} } let name = match *layout.ty.kind() { @@ -176,16 +176,21 @@ pub trait LayoutGccExt<'tcx> { impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { fn is_gcc_immediate(&self) -> bool { - match self.abi { - Abi::Scalar(_) | Abi::Vector { .. } => true, - Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false, + match self.backend_repr { + BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true, + BackendRepr::ScalarPair(..) | BackendRepr::Uninhabited | BackendRepr::Memory { .. } => { + false + } } } fn is_gcc_scalar_pair(&self) -> bool { - match self.abi { - Abi::ScalarPair(..) => true, - Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false, + match self.backend_repr { + BackendRepr::ScalarPair(..) => true, + BackendRepr::Uninhabited + | BackendRepr::Scalar(_) + | BackendRepr::Vector { .. } + | BackendRepr::Memory { .. } => false, } } @@ -197,7 +202,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`. /// If the type is an unsized struct, the regular layout is generated, - /// with the inner-most trailing unsized field using the "minimal unit" + /// with the innermost trailing unsized field using the "minimal unit" /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { @@ -205,9 +210,9 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { // This must produce the same result for `repr(transparent)` wrappers as for the inner type! // In other words, this should generally not look at the type at all, but only at the // layout. - if let Abi::Scalar(ref scalar) = self.abi { + if let BackendRepr::Scalar(ref scalar) = self.backend_repr { // Use a different cache for scalars because pointers to DSTs - // can be either fat or thin (data pointers of fat pointers). + // can be either wide or thin (data pointers of wide pointers). if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) { return ty; } @@ -261,7 +266,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { } fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { - if let Abi::Scalar(ref scalar) = self.abi { + if let BackendRepr::Scalar(ref scalar) = self.backend_repr { if scalar.is_bool() { return cx.type_i1(); } @@ -299,8 +304,8 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { // This must produce the same result for `repr(transparent)` wrappers as for the inner type! // In other words, this should generally not look at the type at all, but only at the // layout. - let (a, b) = match self.abi { - Abi::ScalarPair(ref a, ref b) => (a, b), + let (a, b) = match self.backend_repr { + BackendRepr::ScalarPair(ref a, ref b) => (a, b), _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self), }; let scalar = [a, b][index]; |
