diff options
Diffstat (limited to 'compiler/rustc_codegen_gcc/src')
26 files changed, 18689 insertions, 0 deletions
diff --git a/compiler/rustc_codegen_gcc/src/abi.rs b/compiler/rustc_codegen_gcc/src/abi.rs new file mode 100644 index 00000000000..6fb1cbfad8c --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/abi.rs @@ -0,0 +1,160 @@ +use gccjit::{ToLValue, ToRValue, Type}; +use rustc_codegen_ssa::traits::{AbiBuilderMethods, BaseTypeMethods}; +use rustc_data_structures::fx::FxHashSet; +use rustc_middle::bug; +use rustc_middle::ty::Ty; +use rustc_target::abi::call::{CastTarget, FnAbi, PassMode, Reg, RegKind}; + +use crate::builder::Builder; +use crate::context::CodegenCx; +use crate::intrinsic::ArgAbiExt; +use crate::type_of::LayoutGccExt; + +impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { + fn get_param(&mut self, index: usize) -> Self::Value { + let func = self.current_func(); + let param = func.get_param(index as i32); + let on_stack = + if let Some(on_stack_param_indices) = self.on_stack_function_params.borrow().get(&func) { + on_stack_param_indices.contains(&index) + } + else { + false + }; + if on_stack { + param.to_lvalue().get_address(None) + } + else { + param.to_rvalue() + } + } +} + +impl GccType for CastTarget { + fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> { + let rest_gcc_unit = self.rest.unit.gcc_type(cx); + let (rest_count, rem_bytes) = + if self.rest.unit.size.bytes() == 0 { + (0, 0) + } + else { + (self.rest.total.bytes() / self.rest.unit.size.bytes(), self.rest.total.bytes() % self.rest.unit.size.bytes()) + }; + + if self.prefix.iter().all(|x| x.is_none()) { + // Simplify to a single unit when there is no prefix and size <= unit size + if self.rest.total <= self.rest.unit.size { + return rest_gcc_unit; + } + + // Simplify to array when all chunks are the same size and type + if rem_bytes == 0 { + return cx.type_array(rest_gcc_unit, rest_count); + } + } + + // Create list of fields in the main structure + let mut args: Vec<_> = self + .prefix + .iter() + .flat_map(|option_reg| { + option_reg.map(|reg| reg.gcc_type(cx)) + }) + .chain((0..rest_count).map(|_| rest_gcc_unit)) + .collect(); + + // Append final integer + if rem_bytes != 0 { + // Only integers can be really split further. + assert_eq!(self.rest.unit.kind, RegKind::Integer); + args.push(cx.type_ix(rem_bytes * 8)); + } + + cx.type_struct(&args, false) + } +} + +pub trait GccType { + fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc>; +} + +impl GccType for Reg { + fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> { + match self.kind { + RegKind::Integer => cx.type_ix(self.size.bits()), + RegKind::Float => { + match self.size.bits() { + 32 => cx.type_f32(), + 64 => cx.type_f64(), + _ => bug!("unsupported float: {:?}", self), + } + }, + RegKind::Vector => unimplemented!(), //cx.type_vector(cx.type_i8(), self.size.bytes()), + } + } +} + +pub trait FnAbiGccExt<'gcc, 'tcx> { + // TODO(antoyo): return a function pointer type instead? + fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool, FxHashSet<usize>); + fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; +} + +impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { + fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool, FxHashSet<usize>) { + let mut on_stack_param_indices = FxHashSet::default(); + + // This capacity calculation is approximate. + let mut argument_tys = Vec::with_capacity( + self.args.len() + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 } + ); + + let return_ty = + match self.ret.mode { + PassMode::Ignore => cx.type_void(), + PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx), + PassMode::Cast(ref cast, _) => cast.gcc_type(cx), + PassMode::Indirect { .. } => { + argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); + cx.type_void() + } + }; + + for arg in self.args.iter() { + let arg_ty = match arg.mode { + PassMode::Ignore => continue, + PassMode::Direct(_) => arg.layout.immediate_gcc_type(cx), + PassMode::Pair(..) => { + argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 0, true)); + argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 1, true)); + continue; + } + PassMode::Indirect { extra_attrs: Some(_), .. } => { + unimplemented!(); + } + PassMode::Cast(ref cast, pad_i32) => { + // add padding + if pad_i32 { + argument_tys.push(Reg::i32().gcc_type(cx)); + } + cast.gcc_type(cx) + } + PassMode::Indirect { extra_attrs: None, on_stack: true, .. } => { + on_stack_param_indices.insert(argument_tys.len()); + arg.memory_ty(cx) + }, + PassMode::Indirect { extra_attrs: None, on_stack: false, .. } => cx.type_ptr_to(arg.memory_ty(cx)), + }; + argument_tys.push(arg_ty); + } + + (return_ty, argument_tys, self.c_variadic, on_stack_param_indices) + } + + fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { + let (return_type, params, variadic, on_stack_param_indices) = self.gcc_type(cx); + let pointer_type = cx.context.new_function_pointer_type(None, return_type, ¶ms, variadic); + cx.on_stack_params.borrow_mut().insert(pointer_type.dyncast_function_ptr_type().expect("function ptr type"), on_stack_param_indices); + pointer_type + } +} diff --git a/compiler/rustc_codegen_gcc/src/allocator.rs b/compiler/rustc_codegen_gcc/src/allocator.rs new file mode 100644 index 00000000000..4bad33ee879 --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/allocator.rs @@ -0,0 +1,126 @@ +#[cfg(feature="master")] +use gccjit::FnAttribute; +use gccjit::{FunctionType, GlobalKind, ToRValue}; +use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS}; +use rustc_middle::bug; +use rustc_middle::ty::TyCtxt; +use rustc_session::config::OomStrategy; +use rustc_span::symbol::sym; + +use crate::GccContext; + +pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_name: &str, kind: AllocatorKind, alloc_error_handler_kind: AllocatorKind) { + let context = &mods.context; + let usize = + match tcx.sess.target.pointer_width { + 16 => context.new_type::<u16>(), + 32 => context.new_type::<u32>(), + 64 => context.new_type::<u64>(), + tws => bug!("Unsupported target word size for int: {}", tws), + }; + let i8 = context.new_type::<i8>(); + let i8p = i8.make_pointer(); + let void = context.new_type::<()>(); + + for method in ALLOCATOR_METHODS { + let mut types = Vec::with_capacity(method.inputs.len()); + for ty in method.inputs.iter() { + match *ty { + AllocatorTy::Layout => { + types.push(usize); + types.push(usize); + } + AllocatorTy::Ptr => types.push(i8p), + AllocatorTy::Usize => types.push(usize), + + AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"), + } + } + let output = match method.output { + AllocatorTy::ResultPtr => Some(i8p), + AllocatorTy::Unit => None, + + AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => { + panic!("invalid allocator output") + } + }; + let name = format!("__rust_{}", method.name); + + let args: Vec<_> = types.iter().enumerate() + .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) + .collect(); + let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, name, false); + + if tcx.sess.target.options.default_hidden_visibility { + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); + } + if tcx.sess.must_emit_unwind_tables() { + // TODO(antoyo): emit unwind tables. + } + + let callee = kind.fn_name(method.name); + let args: Vec<_> = types.iter().enumerate() + .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) + .collect(); + let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, callee, false); + #[cfg(feature="master")] + callee.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); + + let block = func.new_block("entry"); + + let args = args + .iter() + .enumerate() + .map(|(i, _)| func.get_param(i as i32).to_rvalue()) + .collect::<Vec<_>>(); + let ret = context.new_call(None, callee, &args); + //llvm::LLVMSetTailCall(ret, True); + if output.is_some() { + block.end_with_return(None, ret); + } + else { + block.end_with_void_return(None); + } + + // TODO(@Commeownist): Check if we need to emit some extra debugging info in certain circumstances + // as described in https://github.com/rust-lang/rust/commit/77a96ed5646f7c3ee8897693decc4626fe380643 + } + + let types = [usize, usize]; + let name = "__rust_alloc_error_handler".to_string(); + let args: Vec<_> = types.iter().enumerate() + .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) + .collect(); + let func = context.new_function(None, FunctionType::Exported, void, &args, name, false); + + if tcx.sess.target.default_hidden_visibility { + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); + } + + let callee = alloc_error_handler_kind.fn_name(sym::oom); + let args: Vec<_> = types.iter().enumerate() + .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) + .collect(); + let callee = context.new_function(None, FunctionType::Extern, void, &args, callee, false); + #[cfg(feature="master")] + callee.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); + + let block = func.new_block("entry"); + + let args = args + .iter() + .enumerate() + .map(|(i, _)| func.get_param(i as i32).to_rvalue()) + .collect::<Vec<_>>(); + let _ret = context.new_call(None, callee, &args); + //llvm::LLVMSetTailCall(ret, True); + block.end_with_void_return(None); + + let name = OomStrategy::SYMBOL.to_string(); + let global = context.new_global(None, GlobalKind::Exported, i8, name); + let value = tcx.sess.opts.unstable_opts.oom.should_panic(); + let value = context.new_rvalue_from_int(i8, value as i32); + global.global_set_initializer_rvalue(value); +} diff --git a/compiler/rustc_codegen_gcc/src/archive.rs b/compiler/rustc_codegen_gcc/src/archive.rs new file mode 100644 index 00000000000..11fa074f5ac --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/archive.rs @@ -0,0 +1,27 @@ +use std::path::{Path, PathBuf}; + +use rustc_codegen_ssa::back::archive::{ + get_native_object_symbols, ArArchiveBuilder, ArchiveBuilder, ArchiveBuilderBuilder, +}; +use rustc_session::Session; + +use rustc_session::cstore::DllImport; + +pub(crate) struct ArArchiveBuilderBuilder; + +impl ArchiveBuilderBuilder for ArArchiveBuilderBuilder { + fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box<dyn ArchiveBuilder<'a> + 'a> { + Box::new(ArArchiveBuilder::new(sess, get_native_object_symbols)) + } + + fn create_dll_import_lib( + &self, + _sess: &Session, + _lib_name: &str, + _dll_imports: &[DllImport], + _tmpdir: &Path, + _is_direct_dependency: bool, + ) -> PathBuf { + unimplemented!("creating dll imports is not yet supported"); + } +} diff --git a/compiler/rustc_codegen_gcc/src/asm.rs b/compiler/rustc_codegen_gcc/src/asm.rs new file mode 100644 index 00000000000..41e9d61a10e --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/asm.rs @@ -0,0 +1,857 @@ +use gccjit::{LValue, RValue, ToRValue, Type}; +use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece}; +use rustc_codegen_ssa::mir::operand::OperandValue; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::traits::{AsmBuilderMethods, AsmMethods, BaseTypeMethods, BuilderMethods, GlobalAsmOperandRef, InlineAsmOperandRef}; + +use rustc_middle::{bug, ty::Instance}; +use rustc_span::Span; +use rustc_target::asm::*; + +use std::borrow::Cow; + +use crate::builder::Builder; +use crate::context::CodegenCx; +use crate::errors::UnwindingInlineAsm; +use crate::type_of::LayoutGccExt; +use crate::callee::get_fn; + + +// Rust asm! and GCC Extended Asm semantics differ substantially. +// +// 1. Rust asm operands go along as one list of operands. Operands themselves indicate +// if they're "in" or "out". "In" and "out" operands can interleave. One operand can be +// both "in" and "out" (`inout(reg)`). +// +// GCC asm has two different lists for "in" and "out" operands. In terms of gccjit, +// this means that all "out" operands must go before "in" operands. "In" and "out" operands +// cannot interleave. +// +// 2. Operand lists in both Rust and GCC are indexed. Index starts from 0. Indexes are important +// because the asm template refers to operands by index. +// +// Mapping from Rust to GCC index would be 1-1 if it wasn't for... +// +// 3. Clobbers. GCC has a separate list of clobbers, and clobbers don't have indexes. +// Contrary, Rust expresses clobbers through "out" operands that aren't tied to +// a variable (`_`), and such "clobbers" do have index. +// +// 4. Furthermore, GCC Extended Asm does not support explicit register constraints +// (like `out("eax")`) directly, offering so-called "local register variables" +// as a workaround. These variables need to be declared and initialized *before* +// the Extended Asm block but *after* normal local variables +// (see comment in `codegen_inline_asm` for explanation). +// +// With that in mind, let's see how we translate Rust syntax to GCC +// (from now on, `CC` stands for "constraint code"): +// +// * `out(reg_class) var` -> translated to output operand: `"=CC"(var)` +// * `inout(reg_class) var` -> translated to output operand: `"+CC"(var)` +// * `in(reg_class) var` -> translated to input operand: `"CC"(var)` +// +// * `out(reg_class) _` -> translated to one `=r(tmp)`, where "tmp" is a temporary unused variable +// +// * `out("explicit register") _` -> not translated to any operands, register is simply added to clobbers list +// +// * `inout(reg_class) in_var => out_var` -> translated to two operands: +// output: `"=CC"(in_var)` +// input: `"num"(out_var)` where num is the GCC index +// of the corresponding output operand +// +// * `inout(reg_class) in_var => _` -> same as `inout(reg_class) in_var => tmp`, +// where "tmp" is a temporary unused variable +// +// * `out/in/inout("explicit register") var` -> translated to one or two operands as described above +// with `"r"(var)` constraint, +// and one register variable assigned to the desired register. + +const ATT_SYNTAX_INS: &str = ".att_syntax noprefix\n\t"; +const INTEL_SYNTAX_INS: &str = "\n\t.intel_syntax noprefix"; + + +struct AsmOutOperand<'a, 'tcx, 'gcc> { + rust_idx: usize, + constraint: &'a str, + late: bool, + readwrite: bool, + + tmp_var: LValue<'gcc>, + out_place: Option<PlaceRef<'tcx, RValue<'gcc>>> +} + +struct AsmInOperand<'a, 'tcx> { + rust_idx: usize, + constraint: Cow<'a, str>, + val: RValue<'tcx> +} + +impl AsmOutOperand<'_, '_, '_> { + fn to_constraint(&self) -> String { + let mut res = String::with_capacity(self.constraint.len() + self.late as usize + 1); + + let sign = if self.readwrite { '+' } else { '=' }; + res.push(sign); + if !self.late { + res.push('&'); + } + + res.push_str(&self.constraint); + res + } +} + +enum ConstraintOrRegister { + Constraint(&'static str), + Register(&'static str) +} + + +impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { + fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, span: &[Span], _instance: Instance<'_>, _dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>) { + if options.contains(InlineAsmOptions::MAY_UNWIND) { + self.sess() + .create_err(UnwindingInlineAsm { span: span[0] }) + .emit(); + return; + } + + let asm_arch = self.tcx.sess.asm_arch.unwrap(); + let is_x86 = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64); + let att_dialect = is_x86 && options.contains(InlineAsmOptions::ATT_SYNTAX); + + // GCC index of an output operand equals its position in the array + let mut outputs = vec![]; + + // GCC index of an input operand equals its position in the array + // added to `outputs.len()` + let mut inputs = vec![]; + + // Clobbers collected from `out("explicit register") _` and `inout("expl_reg") var => _` + let mut clobbers = vec![]; + + // We're trying to preallocate space for the template + let mut constants_len = 0; + + // There are rules we must adhere to if we want GCC to do the right thing: + // + // * Every local variable that the asm block uses as an output must be declared *before* + // the asm block. + // * There must be no instructions whatsoever between the register variables and the asm. + // + // Therefore, the backend must generate the instructions strictly in this order: + // + // 1. Output variables. + // 2. Register variables. + // 3. The asm block. + // + // We also must make sure that no input operands are emitted before output operands. + // + // This is why we work in passes, first emitting local vars, then local register vars. + // Also, we don't emit any asm operands immediately; we save them to + // the one of the buffers to be emitted later. + + // 1. Normal variables (and saving operands to buffers). + for (rust_idx, op) in rust_operands.iter().enumerate() { + match *op { + InlineAsmOperandRef::Out { reg, late, place } => { + use ConstraintOrRegister::*; + + let (constraint, ty) = match (reg_to_gcc(reg), place) { + (Constraint(constraint), Some(place)) => (constraint, place.layout.gcc_type(self.cx)), + // When `reg` is a class and not an explicit register but the out place is not specified, + // we need to create an unused output variable to assign the output to. This var + // needs to be of a type that's "compatible" with the register class, but specific type + // doesn't matter. + (Constraint(constraint), None) => (constraint, dummy_output_type(self.cx, reg.reg_class())), + (Register(_), Some(_)) => { + // left for the next pass + continue + }, + (Register(reg_name), None) => { + // `clobber_abi` can add lots of clobbers that are not supported by the target, + // such as AVX-512 registers, so we just ignore unsupported registers + let is_target_supported = reg.reg_class().supported_types(asm_arch).iter() + .any(|&(_, feature)| { + if let Some(feature) = feature { + self.tcx.sess.target_features.contains(&feature) + } else { + true // Register class is unconditionally supported + } + }); + + if is_target_supported && !clobbers.contains(®_name) { + clobbers.push(reg_name); + } + continue + } + }; + + let tmp_var = self.current_func().new_local(None, ty, "output_register"); + outputs.push(AsmOutOperand { + constraint, + rust_idx, + late, + readwrite: false, + tmp_var, + out_place: place + }); + } + + InlineAsmOperandRef::In { reg, value } => { + if let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) { + inputs.push(AsmInOperand { + constraint: Cow::Borrowed(constraint), + rust_idx, + val: value.immediate() + }); + } + else { + // left for the next pass + continue + } + } + + InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => { + let constraint = if let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) { + constraint + } + else { + // left for the next pass + continue + }; + + // Rustc frontend guarantees that input and output types are "compatible", + // so we can just use input var's type for the output variable. + // + // This decision is also backed by the fact that LLVM needs in and out + // values to be of *exactly the same type*, not just "compatible". + // I'm not sure if GCC is so picky too, but better safe than sorry. + let ty = in_value.layout.gcc_type(self.cx); + let tmp_var = self.current_func().new_local(None, ty, "output_register"); + + // If the out_place is None (i.e `inout(reg) _` syntax was used), we translate + // it to one "readwrite (+) output variable", otherwise we translate it to two + // "out and tied in" vars as described above. + let readwrite = out_place.is_none(); + outputs.push(AsmOutOperand { + constraint, + rust_idx, + late, + readwrite, + tmp_var, + out_place, + }); + + if !readwrite { + let out_gcc_idx = outputs.len() - 1; + let constraint = Cow::Owned(out_gcc_idx.to_string()); + + inputs.push(AsmInOperand { + constraint, + rust_idx, + val: in_value.immediate() + }); + } + } + + InlineAsmOperandRef::Const { ref string } => { + constants_len += string.len() + att_dialect as usize; + } + + InlineAsmOperandRef::SymFn { instance } => { + // TODO(@Amanieu): Additional mangling is needed on + // some targets to add a leading underscore (Mach-O) + // or byte count suffixes (x86 Windows). + constants_len += self.tcx.symbol_name(instance).name.len(); + } + InlineAsmOperandRef::SymStatic { def_id } => { + // TODO(@Amanieu): Additional mangling is needed on + // some targets to add a leading underscore (Mach-O). + constants_len += self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name.len(); + } + } + } + + // 2. Register variables. + for (rust_idx, op) in rust_operands.iter().enumerate() { + match *op { + // `out("explicit register") var` + InlineAsmOperandRef::Out { reg, late, place } => { + if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) { + let out_place = if let Some(place) = place { + place + } + else { + // processed in the previous pass + continue + }; + + let ty = out_place.layout.gcc_type(self.cx); + let tmp_var = self.current_func().new_local(None, ty, "output_register"); + tmp_var.set_register_name(reg_name); + + outputs.push(AsmOutOperand { + constraint: "r".into(), + rust_idx, + late, + readwrite: false, + tmp_var, + out_place: Some(out_place) + }); + } + + // processed in the previous pass + } + + // `in("explicit register") var` + InlineAsmOperandRef::In { reg, value } => { + if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) { + let ty = value.layout.gcc_type(self.cx); + let reg_var = self.current_func().new_local(None, ty, "input_register"); + reg_var.set_register_name(reg_name); + self.llbb().add_assignment(None, reg_var, value.immediate()); + + inputs.push(AsmInOperand { + constraint: "r".into(), + rust_idx, + val: reg_var.to_rvalue() + }); + } + + // processed in the previous pass + } + + // `inout("explicit register") in_var => out_var` + InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => { + if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) { + // See explanation in the first pass. + let ty = in_value.layout.gcc_type(self.cx); + let tmp_var = self.current_func().new_local(None, ty, "output_register"); + tmp_var.set_register_name(reg_name); + + outputs.push(AsmOutOperand { + constraint: "r".into(), + rust_idx, + late, + readwrite: false, + tmp_var, + out_place, + }); + + let constraint = Cow::Owned((outputs.len() - 1).to_string()); + inputs.push(AsmInOperand { + constraint, + rust_idx, + val: in_value.immediate() + }); + } + + // processed in the previous pass + } + + InlineAsmOperandRef::SymFn { instance } => { + inputs.push(AsmInOperand { + constraint: "X".into(), + rust_idx, + val: get_fn(self.cx, instance).get_address(None), + }); + } + + InlineAsmOperandRef::SymStatic { def_id } => { + inputs.push(AsmInOperand { + constraint: "X".into(), + rust_idx, + val: self.cx.get_static(def_id).get_address(None), + }); + } + + InlineAsmOperandRef::Const { .. } => { + // processed in the previous pass + } + } + } + + // 3. Build the template string + + let mut template_str = String::with_capacity(estimate_template_length(template, constants_len, att_dialect)); + if att_dialect { + template_str.push_str(ATT_SYNTAX_INS); + } + + for piece in template { + match *piece { + InlineAsmTemplatePiece::String(ref string) => { + for char in string.chars() { + // TODO(antoyo): might also need to escape | if rustc doesn't do it. + let escaped_char = + match char { + '%' => "%%", + '{' => "%{", + '}' => "%}", + _ => { + template_str.push(char); + continue; + }, + }; + template_str.push_str(escaped_char); + } + } + InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => { + let mut push_to_template = |modifier, gcc_idx| { + use std::fmt::Write; + + template_str.push('%'); + if let Some(modifier) = modifier { + template_str.push(modifier); + } + write!(template_str, "{}", gcc_idx).expect("pushing to string failed"); + }; + + match rust_operands[operand_idx] { + InlineAsmOperandRef::Out { reg, .. } => { + let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier); + let gcc_index = outputs.iter() + .position(|op| operand_idx == op.rust_idx) + .expect("wrong rust index"); + push_to_template(modifier, gcc_index); + } + + InlineAsmOperandRef::In { reg, .. } => { + let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier); + let in_gcc_index = inputs.iter() + .position(|op| operand_idx == op.rust_idx) + .expect("wrong rust index"); + let gcc_index = in_gcc_index + outputs.len(); + push_to_template(modifier, gcc_index); + } + + InlineAsmOperandRef::InOut { reg, .. } => { + let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier); + + // The input register is tied to the output, so we can just use the index of the output register + let gcc_index = outputs.iter() + .position(|op| operand_idx == op.rust_idx) + .expect("wrong rust index"); + push_to_template(modifier, gcc_index); + } + + InlineAsmOperandRef::SymFn { instance } => { + // TODO(@Amanieu): Additional mangling is needed on + // some targets to add a leading underscore (Mach-O) + // or byte count suffixes (x86 Windows). + let name = self.tcx.symbol_name(instance).name; + template_str.push_str(name); + } + + InlineAsmOperandRef::SymStatic { def_id } => { + // TODO(@Amanieu): Additional mangling is needed on + // some targets to add a leading underscore (Mach-O). + let instance = Instance::mono(self.tcx, def_id); + let name = self.tcx.symbol_name(instance).name; + template_str.push_str(name); + } + + InlineAsmOperandRef::Const { ref string } => { + // Const operands get injected directly into the template + if att_dialect { + template_str.push('$'); + } + template_str.push_str(string); + } + } + } + } + } + + if att_dialect { + template_str.push_str(INTEL_SYNTAX_INS); + } + + // 4. Generate Extended Asm block + + let block = self.llbb(); + let extended_asm = block.add_extended_asm(None, &template_str); + + for op in &outputs { + extended_asm.add_output_operand(None, &op.to_constraint(), op.tmp_var); + } + + for op in &inputs { + extended_asm.add_input_operand(None, &op.constraint, op.val); + } + + for clobber in clobbers.iter() { + extended_asm.add_clobber(clobber); + } + + if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) { + // TODO(@Commeownist): I'm not 100% sure this one clobber is sufficient + // on all architectures. For instance, what about FP stack? + extended_asm.add_clobber("cc"); + } + if !options.contains(InlineAsmOptions::NOMEM) { + extended_asm.add_clobber("memory"); + } + if !options.contains(InlineAsmOptions::PURE) { + extended_asm.set_volatile_flag(true); + } + if !options.contains(InlineAsmOptions::NOSTACK) { + // TODO(@Commeownist): figure out how to align stack + } + if options.contains(InlineAsmOptions::NORETURN) { + let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable"); + let builtin_unreachable: RValue<'gcc> = unsafe { std::mem::transmute(builtin_unreachable) }; + self.call(self.type_void(), None, builtin_unreachable, &[], None); + } + + // Write results to outputs. + // + // We need to do this because: + // 1. Turning `PlaceRef` into `RValue` is error-prone and has nasty edge cases + // (especially with current `rustc_backend_ssa` API). + // 2. Not every output operand has an `out_place`, and it's required by `add_output_operand`. + // + // Instead, we generate a temporary output variable for each output operand, and then this loop, + // generates `out_place = tmp_var;` assignments if out_place exists. + for op in &outputs { + if let Some(place) = op.out_place { + OperandValue::Immediate(op.tmp_var.to_rvalue()).store(self, place); + } + } + + } +} + +fn estimate_template_length(template: &[InlineAsmTemplatePiece], constants_len: usize, att_dialect: bool) -> usize { + let len: usize = template.iter().map(|piece| { + match *piece { + InlineAsmTemplatePiece::String(ref string) => { + string.len() + } + InlineAsmTemplatePiece::Placeholder { .. } => { + // '%' + 1 char modifier + 1 char index + 3 + } + } + }) + .sum(); + + // increase it by 5% to account for possible '%' signs that'll be duplicated + // I pulled the number out of blue, but should be fair enough + // as the upper bound + let mut res = (len as f32 * 1.05) as usize + constants_len; + + if att_dialect { + res += INTEL_SYNTAX_INS.len() + ATT_SYNTAX_INS.len(); + } + res +} + +/// Converts a register class to a GCC constraint code. +fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { + let constraint = match reg { + // For vector registers LLVM wants the register name to match the type size. + InlineAsmRegOrRegClass::Reg(reg) => { + match reg { + InlineAsmReg::X86(_) => { + // TODO(antoyo): add support for vector register. + // + // // For explicit registers, we have to create a register variable: https://stackoverflow.com/a/31774784/389119 + return ConstraintOrRegister::Register(match reg.name() { + // Some of registers' names does not map 1-1 from rust to gcc + "st(0)" => "st", + + name => name, + }); + } + + _ => unimplemented!(), + } + }, + // They can be retrieved from https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html + InlineAsmRegOrRegClass::RegClass(reg) => match reg { + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r", + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w", + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x", + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => { + unreachable!("clobber-only") + } + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "t", + InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => "d", + InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => "r", + InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => "w", + InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => "e", + InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w", + InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "d", // more specific than "r" + InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f", + InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r", + // https://github.com/gcc-mirror/gcc/blob/master/gcc/config/nvptx/nvptx.md -> look for + // "define_constraint". + InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h", + InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r", + InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l", + + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b", + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f", + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr) + | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => { + unreachable!("clobber-only") + }, + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f", + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => { + unreachable!("clobber-only") + } + InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r", + InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q", + InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q", + InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg) + | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x", + InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v", + InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "Yk", + InlineAsmRegClass::X86( + X86InlineAsmRegClass::kreg0 + | X86InlineAsmRegClass::x87_reg + | X86InlineAsmRegClass::mmx_reg + | X86InlineAsmRegClass::tmm_reg, + ) => unreachable!("clobber-only"), + InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { + bug!("GCC backend does not support SPIR-V") + } + InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r", + InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f", + InlineAsmRegClass::Err => unreachable!(), + } + }; + + ConstraintOrRegister::Constraint(constraint) +} + +/// Type to use for outputs that are discarded. It doesn't really matter what +/// the type is, as long as it is valid for the constraint code. +fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegClass) -> Type<'gcc> { + match reg { + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => unimplemented!(), + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) + | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => { + unimplemented!() + } + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)=> cx.type_i32(), + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(), + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(), + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => { + unimplemented!() + } + InlineAsmRegClass::Avr(_) => unimplemented!(), + InlineAsmRegClass::Bpf(_) => unimplemented!(), + InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(), + InlineAsmRegClass::Msp430(_) => unimplemented!(), + InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(), + InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(), + InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(), + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(), + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(), + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr) + | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => { + unreachable!("clobber-only") + }, + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(), + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => cx.type_f32(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) + | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::mmx_reg) => unimplemented!(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg) + | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) + | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg0) => cx.type_i16(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::tmm_reg) => unimplemented!(), + InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(), + InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { + bug!("LLVM backend does not support SPIR-V") + }, + InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(), + InlineAsmRegClass::Err => unreachable!(), + } +} + +impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> { + fn codegen_global_asm(&self, template: &[InlineAsmTemplatePiece], operands: &[GlobalAsmOperandRef<'tcx>], options: InlineAsmOptions, _line_spans: &[Span]) { + let asm_arch = self.tcx.sess.asm_arch.unwrap(); + + // Default to Intel syntax on x86 + let att_dialect = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64) + && options.contains(InlineAsmOptions::ATT_SYNTAX); + + // Build the template string + let mut template_str = ".pushsection .text\n".to_owned(); + if att_dialect { + template_str.push_str(".att_syntax\n"); + } + for piece in template { + match *piece { + InlineAsmTemplatePiece::String(ref string) => { + let mut index = 0; + while index < string.len() { + // NOTE: gcc does not allow inline comment, so remove them. + let comment_index = string[index..].find("//") + .map(|comment_index| comment_index + index) + .unwrap_or(string.len()); + template_str.push_str(&string[index..comment_index]); + index = string[comment_index..].find('\n') + .map(|index| index + comment_index) + .unwrap_or(string.len()); + } + }, + InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => { + match operands[operand_idx] { + GlobalAsmOperandRef::Const { ref string } => { + // Const operands get injected directly into the + // template. Note that we don't need to escape % + // here unlike normal inline assembly. + template_str.push_str(string); + } + + GlobalAsmOperandRef::SymFn { instance } => { + let function = get_fn(self, instance); + self.add_used_function(function); + // TODO(@Amanieu): Additional mangling is needed on + // some targets to add a leading underscore (Mach-O) + // or byte count suffixes (x86 Windows). + let name = self.tcx.symbol_name(instance).name; + template_str.push_str(name); + } + + GlobalAsmOperandRef::SymStatic { def_id } => { + // TODO(antoyo): set the global variable as used. + // TODO(@Amanieu): Additional mangling is needed on + // some targets to add a leading underscore (Mach-O). + let instance = Instance::mono(self.tcx, def_id); + let name = self.tcx.symbol_name(instance).name; + template_str.push_str(name); + } + } + } + } + } + + if att_dialect { + template_str.push_str("\n\t.intel_syntax noprefix"); + } + // NOTE: seems like gcc will put the asm in the wrong section, so set it to .text manually. + template_str.push_str("\n.popsection"); + self.context.add_top_level_asm(None, &template_str); + } +} + +fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option<char>) -> Option<char> { + // The modifiers can be retrieved from + // https://gcc.gnu.org/onlinedocs/gcc/Modifiers.html#Modifiers + match reg { + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier, + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) + | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => { + if modifier == Some('v') { None } else { modifier } + } + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => { + unreachable!("clobber-only") + } + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => None, + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => None, + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'), + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => { + if modifier.is_none() { + Some('q') + } else { + modifier + } + } + InlineAsmRegClass::Hexagon(_) => None, + InlineAsmRegClass::Mips(_) => None, + InlineAsmRegClass::Nvptx(_) => None, + InlineAsmRegClass::PowerPC(_) => None, + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) + | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => None, + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => { + unreachable!("clobber-only") + } + InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) + | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier { + None => if arch == InlineAsmArch::X86_64 { Some('q') } else { Some('k') }, + Some('l') => Some('b'), + Some('h') => Some('h'), + Some('x') => Some('w'), + Some('e') => Some('k'), + Some('r') => Some('q'), + _ => unreachable!(), + }, + InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => None, + InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::xmm_reg) + | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::ymm_reg) + | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) { + (X86InlineAsmRegClass::xmm_reg, None) => Some('x'), + (X86InlineAsmRegClass::ymm_reg, None) => Some('t'), + (X86InlineAsmRegClass::zmm_reg, None) => Some('g'), + (_, Some('x')) => Some('x'), + (_, Some('y')) => Some('t'), + (_, Some('z')) => Some('g'), + _ => unreachable!(), + }, + InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None, + InlineAsmRegClass::X86( + X86InlineAsmRegClass::x87_reg + | X86InlineAsmRegClass::mmx_reg + | X86InlineAsmRegClass::kreg0 + | X86InlineAsmRegClass::tmm_reg, + ) => { + unreachable!("clobber-only") + } + InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => None, + InlineAsmRegClass::Bpf(_) => None, + InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) + | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) + | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier { + Some('h') => Some('B'), + Some('l') => Some('A'), + _ => None, + }, + InlineAsmRegClass::Avr(_) => None, + InlineAsmRegClass::S390x(_) => None, + InlineAsmRegClass::Msp430(_) => None, + InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { + bug!("LLVM backend does not support SPIR-V") + } + InlineAsmRegClass::Err => unreachable!(), + } +} diff --git a/compiler/rustc_codegen_gcc/src/attributes.rs b/compiler/rustc_codegen_gcc/src/attributes.rs new file mode 100644 index 00000000000..243a1a36dd0 --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/attributes.rs @@ -0,0 +1,112 @@ +#[cfg(feature="master")] +use gccjit::FnAttribute; +use gccjit::Function; +use rustc_attr::InstructionSetAttr; +use rustc_codegen_ssa::target_features::tied_target_features; +use rustc_data_structures::fx::FxHashMap; +use rustc_middle::ty; +use rustc_session::Session; +use rustc_span::symbol::sym; +use smallvec::{smallvec, SmallVec}; + +use crate::context::CodegenCx; + +// Given a map from target_features to whether they are enabled or disabled, +// ensure only valid combinations are allowed. +pub fn check_tied_features(sess: &Session, features: &FxHashMap<&str, bool>) -> Option<&'static [&'static str]> { + for tied in tied_target_features(sess) { + // Tied features must be set to the same value, or not set at all + let mut tied_iter = tied.iter(); + let enabled = features.get(tied_iter.next().unwrap()); + if tied_iter.any(|feature| enabled != features.get(feature)) { + return Some(tied); + } + } + None +} + +// TODO(antoyo): maybe move to a new module gcc_util. +// To find a list of GCC's names, check https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html +fn to_gcc_features<'a>(sess: &Session, s: &'a str) -> SmallVec<[&'a str; 2]> { + let arch = if sess.target.arch == "x86_64" { "x86" } else { &*sess.target.arch }; + match (arch, s) { + ("x86", "sse4.2") => smallvec!["sse4.2", "crc32"], + ("x86", "pclmulqdq") => smallvec!["pclmul"], + ("x86", "rdrand") => smallvec!["rdrnd"], + ("x86", "bmi1") => smallvec!["bmi"], + ("x86", "cmpxchg16b") => smallvec!["cx16"], + ("x86", "avx512vaes") => smallvec!["vaes"], + ("x86", "avx512gfni") => smallvec!["gfni"], + ("x86", "avx512vpclmulqdq") => smallvec!["vpclmulqdq"], + // NOTE: seems like GCC requires 'avx512bw' for 'avx512vbmi2'. + ("x86", "avx512vbmi2") => smallvec!["avx512vbmi2", "avx512bw"], + // NOTE: seems like GCC requires 'avx512bw' for 'avx512bitalg'. + ("x86", "avx512bitalg") => smallvec!["avx512bitalg", "avx512bw"], + ("aarch64", "rcpc2") => smallvec!["rcpc-immo"], + ("aarch64", "dpb") => smallvec!["ccpp"], + ("aarch64", "dpb2") => smallvec!["ccdp"], + ("aarch64", "frintts") => smallvec!["fptoint"], + ("aarch64", "fcma") => smallvec!["complxnum"], + ("aarch64", "pmuv3") => smallvec!["perfmon"], + ("aarch64", "paca") => smallvec!["pauth"], + ("aarch64", "pacg") => smallvec!["pauth"], + // Rust ties fp and neon together. In LLVM neon implicitly enables fp, + // but we manually enable neon when a feature only implicitly enables fp + ("aarch64", "f32mm") => smallvec!["f32mm", "neon"], + ("aarch64", "f64mm") => smallvec!["f64mm", "neon"], + ("aarch64", "fhm") => smallvec!["fp16fml", "neon"], + ("aarch64", "fp16") => smallvec!["fullfp16", "neon"], + ("aarch64", "jsconv") => smallvec!["jsconv", "neon"], + ("aarch64", "sve") => smallvec!["sve", "neon"], + ("aarch64", "sve2") => smallvec!["sve2", "neon"], + ("aarch64", "sve2-aes") => smallvec!["sve2-aes", "neon"], + ("aarch64", "sve2-sm4") => smallvec!["sve2-sm4", "neon"], + ("aarch64", "sve2-sha3") => smallvec!["sve2-sha3", "neon"], + ("aarch64", "sve2-bitperm") => smallvec!["sve2-bitperm", "neon"], + (_, s) => smallvec![s], + } +} + +/// Composite function which sets GCC attributes for function depending on its AST (`#[attribute]`) +/// attributes. +pub fn from_fn_attrs<'gcc, 'tcx>( + cx: &CodegenCx<'gcc, 'tcx>, + #[cfg_attr(not(feature="master"), allow(unused_variables))] + func: Function<'gcc>, + instance: ty::Instance<'tcx>, +) { + let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id()); + + let function_features = + codegen_fn_attrs.target_features.iter().map(|features| features.as_str()).collect::<Vec<&str>>(); + + if let Some(features) = check_tied_features(cx.tcx.sess, &function_features.iter().map(|features| (*features, true)).collect()) { + let span = cx.tcx + .get_attr(instance.def_id(), sym::target_feature) + .map_or_else(|| cx.tcx.def_span(instance.def_id()), |a| a.span); + let msg = format!("the target features {} must all be either enabled or disabled together", features.join(", ")); + let mut err = cx.tcx.sess.struct_span_err(span, &msg); + err.help("add the missing features in a `target_feature` attribute"); + err.emit(); + return; + } + + let mut function_features = function_features + .iter() + .flat_map(|feat| to_gcc_features(cx.tcx.sess, feat).into_iter()) + .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x { + InstructionSetAttr::ArmA32 => "-thumb-mode", // TODO(antoyo): support removing feature. + InstructionSetAttr::ArmT32 => "thumb-mode", + })) + .collect::<Vec<_>>(); + + // TODO(antoyo): check if we really need global backend features. (Maybe they could be applied + // globally?) + let mut global_features = cx.tcx.global_backend_features(()).iter().map(|s| s.as_str()); + function_features.extend(&mut global_features); + let target_features = function_features.join(","); + if !target_features.is_empty() { + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Target(&target_features)); + } +} diff --git a/compiler/rustc_codegen_gcc/src/back/mod.rs b/compiler/rustc_codegen_gcc/src/back/mod.rs new file mode 100644 index 00000000000..d692799d764 --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/back/mod.rs @@ -0,0 +1 @@ +pub mod write; diff --git a/compiler/rustc_codegen_gcc/src/back/write.rs b/compiler/rustc_codegen_gcc/src/back/write.rs new file mode 100644 index 00000000000..5f54ac4ebc6 --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/back/write.rs @@ -0,0 +1,84 @@ +use std::{env, fs}; + +use gccjit::OutputKind; +use rustc_codegen_ssa::{CompiledModule, ModuleCodegen}; +use rustc_codegen_ssa::back::write::{CodegenContext, EmitObj, ModuleConfig}; +use rustc_errors::Handler; +use rustc_session::config::OutputType; +use rustc_span::fatal_error::FatalError; +use rustc_target::spec::SplitDebuginfo; + +use crate::{GccCodegenBackend, GccContext}; + +pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_handler: &Handler, module: ModuleCodegen<GccContext>, config: &ModuleConfig) -> Result<CompiledModule, FatalError> { + let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name); + { + let context = &module.module_llvm.context; + + let module_name = module.name.clone(); + let module_name = Some(&module_name[..]); + + let _bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name); + let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name); + + if config.bitcode_needed() { + // TODO(antoyo) + } + + if config.emit_ir { + unimplemented!(); + } + + if config.emit_asm { + let _timer = cgcx + .prof + .generic_activity_with_arg("LLVM_module_codegen_emit_asm", &*module.name); + let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name); + context.compile_to_file(OutputKind::Assembler, path.to_str().expect("path to str")); + } + + match config.emit_obj { + EmitObj::ObjectCode(_) => { + let _timer = cgcx + .prof + .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &*module.name); + if env::var("CG_GCCJIT_DUMP_MODULE_NAMES").as_deref() == Ok("1") { + println!("Module {}", module.name); + } + if env::var("CG_GCCJIT_DUMP_ALL_MODULES").as_deref() == Ok("1") || env::var("CG_GCCJIT_DUMP_MODULE").as_deref() == Ok(&module.name) { + println!("Dumping reproducer {}", module.name); + let _ = fs::create_dir("/tmp/reproducers"); + // FIXME(antoyo): segfault in dump_reproducer_to_file() might be caused by + // transmuting an rvalue to an lvalue. + // Segfault is actually in gcc::jit::reproducer::get_identifier_as_lvalue + context.dump_reproducer_to_file(&format!("/tmp/reproducers/{}.c", module.name)); + println!("Dumped reproducer {}", module.name); + } + if env::var("CG_GCCJIT_DUMP_TO_FILE").as_deref() == Ok("1") { + let _ = fs::create_dir("/tmp/gccjit_dumps"); + let path = &format!("/tmp/gccjit_dumps/{}.c", module.name); + context.set_debug_info(true); + context.dump_to_file(path, true); + } + context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str")); + } + + EmitObj::Bitcode => { + // TODO(antoyo) + } + + EmitObj::None => {} + } + } + + Ok(module.into_compiled_module( + config.emit_obj != EmitObj::None, + cgcx.target_can_use_split_dwarf && cgcx.split_debuginfo == SplitDebuginfo::Unpacked, + config.emit_bc, + &cgcx.output_filenames, + )) +} + +pub(crate) fn link(_cgcx: &CodegenContext<GccCodegenBackend>, _diag_handler: &Handler, mut _modules: Vec<ModuleCodegen<GccContext>>) -> Result<ModuleCodegen<GccContext>, FatalError> { + unimplemented!(); +} diff --git a/compiler/rustc_codegen_gcc/src/base.rs b/compiler/rustc_codegen_gcc/src/base.rs new file mode 100644 index 00000000000..dcd560b3dcd --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/base.rs @@ -0,0 +1,191 @@ +use std::env; +use std::time::Instant; + +use gccjit::{ + Context, + FunctionType, + GlobalKind, +}; +use rustc_middle::dep_graph; +use rustc_middle::ty::TyCtxt; +#[cfg(feature="master")] +use rustc_middle::mir::mono::Visibility; +use rustc_middle::mir::mono::Linkage; +use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; +use rustc_codegen_ssa::base::maybe_create_entry_wrapper; +use rustc_codegen_ssa::mono_item::MonoItemExt; +use rustc_codegen_ssa::traits::DebugInfoMethods; +use rustc_session::config::DebugInfo; +use rustc_span::Symbol; + +use crate::GccContext; +use crate::builder::Builder; +use crate::context::CodegenCx; + +#[cfg(feature="master")] +pub fn visibility_to_gcc(linkage: Visibility) -> gccjit::Visibility { + match linkage { + Visibility::Default => gccjit::Visibility::Default, + Visibility::Hidden => gccjit::Visibility::Hidden, + Visibility::Protected => gccjit::Visibility::Protected, + } +} + +pub fn global_linkage_to_gcc(linkage: Linkage) -> GlobalKind { + match linkage { + Linkage::External => GlobalKind::Imported, + Linkage::AvailableExternally => GlobalKind::Imported, + Linkage::LinkOnceAny => unimplemented!(), + Linkage::LinkOnceODR => unimplemented!(), + Linkage::WeakAny => unimplemented!(), + Linkage::WeakODR => unimplemented!(), + Linkage::Appending => unimplemented!(), + Linkage::Internal => GlobalKind::Internal, + Linkage::Private => GlobalKind::Internal, + Linkage::ExternalWeak => GlobalKind::Imported, // TODO(antoyo): should be weak linkage. + Linkage::Common => unimplemented!(), + } +} + +pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType { + match linkage { + Linkage::External => FunctionType::Exported, + Linkage::AvailableExternally => FunctionType::Extern, + Linkage::LinkOnceAny => unimplemented!(), + Linkage::LinkOnceODR => unimplemented!(), + Linkage::WeakAny => FunctionType::Exported, // FIXME(antoyo): should be similar to linkonce. + Linkage::WeakODR => unimplemented!(), + Linkage::Appending => unimplemented!(), + Linkage::Internal => FunctionType::Internal, + Linkage::Private => FunctionType::Internal, + Linkage::ExternalWeak => unimplemented!(), + Linkage::Common => unimplemented!(), + } +} + +pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, supports_128bit_integers: bool) -> (ModuleCodegen<GccContext>, u64) { + let prof_timer = tcx.prof.generic_activity("codegen_module"); + let start_time = Instant::now(); + + let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx); + let (module, _) = tcx.dep_graph.with_task( + dep_node, + tcx, + (cgu_name, supports_128bit_integers), + module_codegen, + Some(dep_graph::hash_result), + ); + let time_to_codegen = start_time.elapsed(); + drop(prof_timer); + + // We assume that the cost to run GCC on a CGU is proportional to + // the time we needed for codegenning it. + let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64; + + fn module_codegen(tcx: TyCtxt<'_>, (cgu_name, supports_128bit_integers): (Symbol, bool)) -> ModuleCodegen<GccContext> { + let cgu = tcx.codegen_unit(cgu_name); + // Instantiate monomorphizations without filling out definitions yet... + //let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str()); + let context = Context::default(); + + context.add_command_line_option("-fexceptions"); + context.add_driver_option("-fexceptions"); + + // TODO(antoyo): only set on x86 platforms. + context.add_command_line_option("-masm=intel"); + // TODO(antoyo): only add the following cli argument if the feature is supported. + context.add_command_line_option("-msse2"); + context.add_command_line_option("-mavx2"); + // FIXME(antoyo): the following causes an illegal instruction on vmovdqu64 in std_example on my CPU. + // Only add if the CPU supports it. + context.add_command_line_option("-msha"); + context.add_command_line_option("-mpclmul"); + context.add_command_line_option("-mfma"); + context.add_command_line_option("-mfma4"); + context.add_command_line_option("-m64"); + context.add_command_line_option("-mbmi"); + context.add_command_line_option("-mgfni"); + //context.add_command_line_option("-mavxvnni"); // The CI doesn't support this option. + context.add_command_line_option("-mf16c"); + context.add_command_line_option("-maes"); + context.add_command_line_option("-mxsavec"); + context.add_command_line_option("-mbmi2"); + context.add_command_line_option("-mrtm"); + context.add_command_line_option("-mvaes"); + context.add_command_line_option("-mvpclmulqdq"); + context.add_command_line_option("-mavx"); + + for arg in &tcx.sess.opts.cg.llvm_args { + context.add_command_line_option(arg); + } + // NOTE: This is needed to compile the file src/intrinsic/archs.rs during a bootstrap of rustc. + context.add_command_line_option("-fno-var-tracking-assignments"); + // NOTE: an optimization (https://github.com/rust-lang/rustc_codegen_gcc/issues/53). + context.add_command_line_option("-fno-semantic-interposition"); + // NOTE: Rust relies on LLVM not doing TBAA (https://github.com/rust-lang/unsafe-code-guidelines/issues/292). + context.add_command_line_option("-fno-strict-aliasing"); + // NOTE: Rust relies on LLVM doing wrapping on overflow. + context.add_command_line_option("-fwrapv"); + + if tcx.sess.opts.unstable_opts.function_sections.unwrap_or(tcx.sess.target.function_sections) { + context.add_command_line_option("-ffunction-sections"); + context.add_command_line_option("-fdata-sections"); + } + + if env::var("CG_GCCJIT_DUMP_RTL").as_deref() == Ok("1") { + context.add_command_line_option("-fdump-rtl-vregs"); + } + if env::var("CG_GCCJIT_DUMP_TREE_ALL").as_deref() == Ok("1") { + context.add_command_line_option("-fdump-tree-all"); + } + if env::var("CG_GCCJIT_DUMP_CODE").as_deref() == Ok("1") { + context.set_dump_code_on_compile(true); + } + if env::var("CG_GCCJIT_DUMP_GIMPLE").as_deref() == Ok("1") { + context.set_dump_initial_gimple(true); + } + context.set_debug_info(true); + if env::var("CG_GCCJIT_DUMP_EVERYTHING").as_deref() == Ok("1") { + context.set_dump_everything(true); + } + if env::var("CG_GCCJIT_KEEP_INTERMEDIATES").as_deref() == Ok("1") { + context.set_keep_intermediates(true); + } + + // NOTE: The codegen generates unrechable blocks. + context.set_allow_unreachable_blocks(true); + + { + let cx = CodegenCx::new(&context, cgu, tcx, supports_128bit_integers); + + let mono_items = cgu.items_in_deterministic_order(tcx); + for &(mono_item, (linkage, visibility)) in &mono_items { + mono_item.predefine::<Builder<'_, '_, '_>>(&cx, linkage, visibility); + } + + // ... and now that we have everything pre-defined, fill out those definitions. + for &(mono_item, _) in &mono_items { + mono_item.define::<Builder<'_, '_, '_>>(&cx); + } + + // If this codegen unit contains the main function, also create the + // wrapper here + maybe_create_entry_wrapper::<Builder<'_, '_, '_>>(&cx); + + // Finalize debuginfo + if cx.sess().opts.debuginfo != DebugInfo::None { + cx.debuginfo_finalize(); + } + } + + ModuleCodegen { + name: cgu_name.to_string(), + module_llvm: GccContext { + context + }, + kind: ModuleKind::Regular, + } + } + + (module, cost) +} diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs new file mode 100644 index 00000000000..a3c8142bea2 --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -0,0 +1,1994 @@ +use std::borrow::Cow; +use std::cell::Cell; +use std::convert::TryFrom; +use std::ops::Deref; + +use gccjit::{ + BinaryOp, + Block, + ComparisonOp, + Context, + Function, + LValue, + RValue, + ToRValue, + Type, + UnaryOp, +}; +use rustc_apfloat::{ieee, Float, Round, Status}; +use rustc_codegen_ssa::MemFlags; +use rustc_codegen_ssa::common::{ + AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind, +}; +use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::traits::{ + BackendTypes, + BaseTypeMethods, + BuilderMethods, + ConstMethods, + DerivedTypeMethods, + LayoutTypeMethods, + HasCodegen, + OverflowOp, + StaticBuilderMethods, +}; +use rustc_data_structures::fx::FxHashSet; +use rustc_middle::bug; +use rustc_middle::ty::{ParamEnv, Ty, TyCtxt}; +use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout}; +use rustc_span::Span; +use rustc_span::def_id::DefId; +use rustc_target::abi::{ + self, + call::FnAbi, + Align, + HasDataLayout, + Size, + TargetDataLayout, + WrappingRange, +}; +use rustc_target::spec::{HasTargetSpec, Target}; + +use crate::common::{SignType, TypeReflection, type_is_pointer}; +use crate::context::CodegenCx; +use crate::intrinsic::llvm; +use crate::type_of::LayoutGccExt; + +// TODO(antoyo) +type Funclet = (); + +// TODO(antoyo): remove this variable. +static mut RETURN_VALUE_COUNT: usize = 0; + +enum ExtremumOperation { + Max, + Min, +} + +pub struct Builder<'a: 'gcc, 'gcc, 'tcx> { + pub cx: &'a CodegenCx<'gcc, 'tcx>, + pub block: Block<'gcc>, + stack_var_count: Cell<usize>, +} + +impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { + fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self { + Builder { + cx, + block, + stack_var_count: Cell::new(0), + } + } + + fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> { + let size = src.get_type().get_size(); + + let func = self.current_func(); + + let load_ordering = + match order { + // TODO(antoyo): does this make sense? + AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire, + _ => order, + }; + let previous_value = self.atomic_load(dst.get_type(), dst, load_ordering, Size::from_bytes(size)); + let previous_var = func.new_local(None, previous_value.get_type(), "previous_value"); + let return_value = func.new_local(None, previous_value.get_type(), "return_value"); + self.llbb().add_assignment(None, previous_var, previous_value); + self.llbb().add_assignment(None, return_value, previous_var.to_rvalue()); + + let while_block = func.new_block("while"); + let after_block = func.new_block("after_while"); + self.llbb().end_with_jump(None, while_block); + + // NOTE: since jumps were added and compare_exchange doesn't expect this, the current block in the + // state need to be updated. + self.switch_to_block(while_block); + + let comparison_operator = + match operation { + ExtremumOperation::Max => ComparisonOp::LessThan, + ExtremumOperation::Min => ComparisonOp::GreaterThan, + }; + + let cond1 = self.context.new_comparison(None, comparison_operator, previous_var.to_rvalue(), self.context.new_cast(None, src, previous_value.get_type())); + let compare_exchange = self.compare_exchange(dst, previous_var, src, order, load_ordering, false); + let cond2 = self.cx.context.new_unary_op(None, UnaryOp::LogicalNegate, compare_exchange.get_type(), compare_exchange); + let cond = self.cx.context.new_binary_op(None, BinaryOp::LogicalAnd, self.cx.bool_type, cond1, cond2); + + while_block.end_with_conditional(None, cond, while_block, after_block); + + // NOTE: since jumps were added in a place rustc does not expect, the current block in the + // state need to be updated. + self.switch_to_block(after_block); + + return_value.to_rvalue() + } + + fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> { + let size = src.get_type().get_size(); + let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size)); + let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); + let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc()); + let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32); + + let void_ptr_type = self.context.new_type::<*mut ()>(); + let volatile_void_ptr_type = void_ptr_type.make_volatile(); + let dst = self.context.new_cast(None, dst, volatile_void_ptr_type); + let expected = self.context.new_cast(None, cmp.get_address(None), void_ptr_type); + + // NOTE: not sure why, but we have the wrong type here. + let int_type = compare_exchange.get_param(2).to_rvalue().get_type(); + let src = self.context.new_cast(None, src, int_type); + self.context.new_call(None, compare_exchange, &[dst, expected, src, weak, order, failure_order]) + } + + pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) { + self.llbb().add_assignment(None, lvalue, value); + } + + fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> { + let mut all_args_match = true; + let mut param_types = vec![]; + let param_count = func.get_param_count(); + for (index, arg) in args.iter().enumerate().take(param_count) { + let param = func.get_param(index as i32); + let param = param.to_rvalue().get_type(); + if param != arg.get_type() { + all_args_match = false; + } + param_types.push(param); + } + + if all_args_match { + return Cow::Borrowed(args); + } + + let casted_args: Vec<_> = param_types + .into_iter() + .zip(args.iter()) + .enumerate() + .map(|(_i, (expected_ty, &actual_val))| { + let actual_ty = actual_val.get_type(); + if expected_ty != actual_ty { + self.bitcast(actual_val, expected_ty) + } + else { + actual_val + } + }) + .collect(); + + Cow::Owned(casted_args) + } + + fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> { + let mut all_args_match = true; + let mut param_types = vec![]; + let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr"); + for (index, arg) in args.iter().enumerate().take(gcc_func.get_param_count()) { + let param = gcc_func.get_param_type(index); + if param != arg.get_type() { + all_args_match = false; + } + param_types.push(param); + } + + let mut on_stack_param_indices = FxHashSet::default(); + if let Some(indices) = self.on_stack_params.borrow().get(&gcc_func) { + on_stack_param_indices = indices.clone(); + } + + if all_args_match { + return Cow::Borrowed(args); + } + + let func_name = format!("{:?}", func_ptr); + + let casted_args: Vec<_> = param_types + .into_iter() + .zip(args.iter()) + .enumerate() + .map(|(index, (expected_ty, &actual_val))| { + if llvm::ignore_arg_cast(&func_name, index, args.len()) { + return actual_val; + } + + let actual_ty = actual_val.get_type(); + if expected_ty != actual_ty { + if !actual_ty.is_vector() && !expected_ty.is_vector() && (actual_ty.is_integral() && expected_ty.is_integral()) || (actual_ty.get_pointee().is_some() && expected_ty.get_pointee().is_some()) { + self.context.new_cast(None, actual_val, expected_ty) + } + else if on_stack_param_indices.contains(&index) { + actual_val.dereference(None).to_rvalue() + } + else { + assert!(!((actual_ty.is_vector() && !expected_ty.is_vector()) || (!actual_ty.is_vector() && expected_ty.is_vector())), "{:?} ({}) -> {:?} ({}), index: {:?}[{}]", actual_ty, actual_ty.is_vector(), expected_ty, expected_ty.is_vector(), func_ptr, index); + // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. + // TODO: remove bitcast now that vector types can be compared? + self.bitcast(actual_val, expected_ty) + } + } + else { + actual_val + } + }) + .collect(); + + Cow::Owned(casted_args) + } + + fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> { + let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO(antoyo): make sure make_pointer() is okay here. + let stored_ty = self.cx.val_ty(val); + let stored_ptr_ty = self.cx.type_ptr_to(stored_ty); + + if dest_ptr_ty == stored_ptr_ty { + ptr + } + else { + self.bitcast(ptr, stored_ptr_ty) + } + } + + pub fn current_func(&self) -> Function<'gcc> { + self.block.get_function() + } + + fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> { + // TODO(antoyo): remove when the API supports a different type for functions. + let func: Function<'gcc> = self.cx.rvalue_as_function(func); + let args = self.check_call("call", func, args); + + // gccjit requires to use the result of functions, even when it's not used. + // That's why we assign the result to a local or call add_eval(). + let return_type = func.get_return_type(); + let void_type = self.context.new_type::<()>(); + let current_func = self.block.get_function(); + if return_type != void_type { + unsafe { RETURN_VALUE_COUNT += 1 }; + let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT })); + self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args)); + result.to_rvalue() + } + else { + self.block.add_eval(None, self.cx.context.new_call(None, func, &args)); + // Return dummy value when not having return value. + self.context.new_rvalue_from_long(self.isize_type, 0) + } + } + + fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> { + let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr"); + let func_name = format!("{:?}", func_ptr); + let previous_arg_count = args.len(); + let orig_args = args; + let args = { + let function_address_names = self.function_address_names.borrow(); + let original_function_name = function_address_names.get(&func_ptr); + llvm::adjust_intrinsic_arguments(&self, gcc_func, args.into(), &func_name, original_function_name) + }; + let args_adjusted = args.len() != previous_arg_count; + let args = self.check_ptr_call("call", func_ptr, &*args); + + // gccjit requires to use the result of functions, even when it's not used. + // That's why we assign the result to a local or call add_eval(). + let return_type = gcc_func.get_return_type(); + let void_type = self.context.new_type::<()>(); + let current_func = self.block.get_function(); + + if return_type != void_type { + unsafe { RETURN_VALUE_COUNT += 1 }; + let return_value = self.cx.context.new_call_through_ptr(None, func_ptr, &args); + let return_value = llvm::adjust_intrinsic_return_value(&self, return_value, &func_name, &args, args_adjusted, orig_args); + let result = current_func.new_local(None, return_value.get_type(), &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT })); + self.block.add_assignment(None, result, return_value); + result.to_rvalue() + } + else { + #[cfg(not(feature="master"))] + if gcc_func.get_param_count() == 0 { + // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics. + self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[])); + } + else { + self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args)); + } + #[cfg(feature="master")] + self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args)); + // Return dummy value when not having return value. + let result = current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed"); + self.block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0)); + result.to_rvalue() + } + } + + pub fn overflow_call(&self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> { + // gccjit requires to use the result of functions, even when it's not used. + // That's why we assign the result to a local. + let return_type = self.context.new_type::<bool>(); + let current_func = self.block.get_function(); + // TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects. + unsafe { RETURN_VALUE_COUNT += 1 }; + let result = current_func.new_local(None, return_type, &format!("overflowReturnValue{}", unsafe { RETURN_VALUE_COUNT })); + self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args)); + result.to_rvalue() + } +} + +impl<'gcc, 'tcx> HasCodegen<'tcx> for Builder<'_, 'gcc, 'tcx> { + type CodegenCx = CodegenCx<'gcc, 'tcx>; +} + +impl<'tcx> HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> { + fn tcx(&self) -> TyCtxt<'tcx> { + self.cx.tcx() + } +} + +impl HasDataLayout for Builder<'_, '_, '_> { + fn data_layout(&self) -> &TargetDataLayout { + self.cx.data_layout() + } +} + +impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> { + type LayoutOfResult = TyAndLayout<'tcx>; + + #[inline] + fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! { + self.cx.handle_layout_err(err, span, ty) + } +} + +impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> { + type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>; + + #[inline] + fn handle_fn_abi_err( + &self, + err: FnAbiError<'tcx>, + span: Span, + fn_abi_request: FnAbiRequest<'tcx>, + ) -> ! { + self.cx.handle_fn_abi_err(err, span, fn_abi_request) + } +} + +impl<'a, 'gcc, 'tcx> Deref for Builder<'a, 'gcc, 'tcx> { + type Target = CodegenCx<'gcc, 'tcx>; + + fn deref<'b>(&'b self) -> &'a Self::Target { + self.cx + } +} + +impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> { + type Value = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Value; + type Function = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Function; + type BasicBlock = <CodegenCx<'gcc, 'tcx> as BackendTypes>::BasicBlock; + type Type = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Type; + type Funclet = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Funclet; + + type DIScope = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIScope; + type DILocation = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DILocation; + type DIVariable = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIVariable; +} + +impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { + fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Builder<'a, 'gcc, 'tcx> { + Builder::with_cx(cx, block) + } + + fn llbb(&self) -> Block<'gcc> { + self.block + } + + fn append_block(cx: &'a CodegenCx<'gcc, 'tcx>, func: RValue<'gcc>, name: &str) -> Block<'gcc> { + let func = cx.rvalue_as_function(func); + func.new_block(name) + } + + fn append_sibling_block(&mut self, name: &str) -> Block<'gcc> { + let func = self.current_func(); + func.new_block(name) + } + + fn switch_to_block(&mut self, block: Self::BasicBlock) { + self.block = block; + } + + fn ret_void(&mut self) { + self.llbb().end_with_void_return(None) + } + + fn ret(&mut self, value: RValue<'gcc>) { + let value = + if self.structs_as_pointer.borrow().contains(&value) { + // NOTE: hack to workaround a limitation of the rustc API: see comment on + // CodegenCx.structs_as_pointer + value.dereference(None).to_rvalue() + } + else { + value + }; + self.llbb().end_with_return(None, value); + } + + fn br(&mut self, dest: Block<'gcc>) { + self.llbb().end_with_jump(None, dest) + } + + fn cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>) { + self.llbb().end_with_conditional(None, cond, then_block, else_block) + } + + fn switch(&mut self, value: RValue<'gcc>, default_block: Block<'gcc>, cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>) { + let mut gcc_cases = vec![]; + let typ = self.val_ty(value); + for (on_val, dest) in cases { + let on_val = self.const_uint_big(typ, on_val); + gcc_cases.push(self.context.new_case(on_val, on_val, dest)); + } + self.block.end_with_switch(None, value, default_block, &gcc_cases); + } + + #[cfg(feature="master")] + fn invoke(&mut self, typ: Type<'gcc>, _fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> { + let try_block = self.current_func().new_block("try"); + + let current_block = self.block.clone(); + self.block = try_block; + let call = self.call(typ, None, func, args, None); // TODO(antoyo): use funclet here? + self.block = current_block; + + let return_value = self.current_func() + .new_local(None, call.get_type(), "invokeResult"); + + try_block.add_assignment(None, return_value, call); + + try_block.end_with_jump(None, then); + + if self.cleanup_blocks.borrow().contains(&catch) { + self.block.add_try_finally(None, try_block, catch); + } + else { + self.block.add_try_catch(None, try_block, catch); + } + + self.block.end_with_jump(None, then); + + return_value.to_rvalue() + } + + #[cfg(not(feature="master"))] + fn invoke(&mut self, typ: Type<'gcc>, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> { + let call_site = self.call(typ, None, func, args, None); + let condition = self.context.new_rvalue_from_int(self.bool_type, 1); + self.llbb().end_with_conditional(None, condition, then, catch); + if let Some(_fn_abi) = fn_abi { + // TODO(bjorn3): Apply function attributes + } + call_site + } + + fn unreachable(&mut self) { + let func = self.context.get_builtin_function("__builtin_unreachable"); + self.block.add_eval(None, self.context.new_call(None, func, &[])); + let return_type = self.block.get_function().get_return_type(); + let void_type = self.context.new_type::<()>(); + if return_type == void_type { + self.block.end_with_void_return(None) + } + else { + let return_value = self.current_func() + .new_local(None, return_type, "unreachableReturn"); + self.block.end_with_return(None, return_value) + } + } + + fn add(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.gcc_add(a, b) + } + + fn fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a + b + } + + fn sub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.gcc_sub(a, b) + } + + fn fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a - b + } + + fn mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.gcc_mul(a, b) + } + + fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a * b + } + + fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.gcc_udiv(a, b) + } + + fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // TODO(antoyo): poison if not exact. + let a_type = a.get_type().to_unsigned(self); + let a = self.gcc_int_cast(a, a_type); + let b_type = b.get_type().to_unsigned(self); + let b = self.gcc_int_cast(b, b_type); + a / b + } + + fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.gcc_sdiv(a, b) + } + + fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // TODO(antoyo): poison if not exact. + // FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they + // should be the same. + let typ = a.get_type().to_signed(self); + let b = self.context.new_cast(None, b, typ); + a / b + } + + fn fdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a / b + } + + fn urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.gcc_urem(a, b) + } + + fn srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.gcc_srem(a, b) + } + + fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // TODO(antoyo): add check in libgccjit since using the binary operator % causes the following error: + // during RTL pass: expand + // libgccjit.so: error: in expmed_mode_index, at expmed.h:240 + // 0x7f0101d58dc6 expmed_mode_index + // ../../../gcc/gcc/expmed.h:240 + // 0x7f0101d58e35 expmed_op_cost_ptr + // ../../../gcc/gcc/expmed.h:262 + // 0x7f0101d594a1 sdiv_cost_ptr + // ../../../gcc/gcc/expmed.h:531 + // 0x7f0101d594f3 sdiv_cost + // ../../../gcc/gcc/expmed.h:549 + // 0x7f0101d6af7e expand_divmod(int, tree_code, machine_mode, rtx_def*, rtx_def*, rtx_def*, int, optab_methods) + // ../../../gcc/gcc/expmed.cc:4356 + // 0x7f0101d94f9e expand_expr_divmod + // ../../../gcc/gcc/expr.cc:8929 + // 0x7f0101d97a26 expand_expr_real_2(separate_ops*, rtx_def*, machine_mode, expand_modifier) + // ../../../gcc/gcc/expr.cc:9566 + // 0x7f0101bef6ef expand_gimple_stmt_1 + // ../../../gcc/gcc/cfgexpand.cc:3967 + // 0x7f0101bef910 expand_gimple_stmt + // ../../../gcc/gcc/cfgexpand.cc:4028 + // 0x7f0101bf6ee7 expand_gimple_basic_block + // ../../../gcc/gcc/cfgexpand.cc:6069 + // 0x7f0101bf9194 execute + // ../../../gcc/gcc/cfgexpand.cc:6795 + if a.get_type().is_compatible_with(self.cx.float_type) { + let fmodf = self.context.get_builtin_function("fmodf"); + // FIXME(antoyo): this seems to produce the wrong result. + return self.context.new_call(None, fmodf, &[a, b]); + } + assert_eq!(a.get_type().unqualified(), self.cx.double_type); + + let fmod = self.context.get_builtin_function("fmod"); + return self.context.new_call(None, fmod, &[a, b]); + } + + fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.gcc_shl(a, b) + } + + fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.gcc_lshr(a, b) + } + + fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // TODO(antoyo): check whether behavior is an arithmetic shift for >> . + // It seems to be if the value is signed. + self.gcc_lshr(a, b) + } + + fn and(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.gcc_and(a, b) + } + + fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.cx.gcc_or(a, b) + } + + fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.gcc_xor(a, b) + } + + fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> { + self.gcc_neg(a) + } + + fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> { + self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a) + } + + fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> { + self.gcc_not(a) + } + + fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a + b + } + + fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.gcc_add(a, b) + } + + fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a - b + } + + fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // TODO(antoyo): should generate poison value? + self.gcc_sub(a, b) + } + + fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a * b + } + + fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a * b + } + + fn fadd_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + lhs + rhs + } + + fn fsub_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + lhs - rhs + } + + fn fmul_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + lhs * rhs + } + + fn fdiv_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + lhs / rhs + } + + fn frem_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + self.frem(lhs, rhs) + } + + fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) { + self.gcc_checked_binop(oop, typ, lhs, rhs) + } + + fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> { + // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type. + // Ideally, we shouldn't need to do this check. + let aligned_type = + if ty == self.cx.u128_type || ty == self.cx.i128_type { + ty + } + else { + ty.get_aligned(align.bytes()) + }; + // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial. + self.stack_var_count.set(self.stack_var_count.get() + 1); + self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None) + } + + fn byte_array_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> { + unimplemented!(); + } + + fn load(&mut self, pointee_ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc> { + let block = self.llbb(); + let function = block.get_function(); + // NOTE: instead of returning the dereference here, we have to assign it to a variable in + // the current basic block. Otherwise, it could be used in another basic block, causing a + // dereference after a drop, for instance. + // TODO(antoyo): handle align of the load instruction. + let ptr = self.context.new_cast(None, ptr, pointee_ty.make_pointer()); + let deref = ptr.dereference(None).to_rvalue(); + unsafe { RETURN_VALUE_COUNT += 1 }; + let loaded_value = function.new_local(None, pointee_ty, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT })); + block.add_assignment(None, loaded_value, deref); + loaded_value.to_rvalue() + } + + fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> { + // TODO(antoyo): use ty. + let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile()); + ptr.dereference(None).to_rvalue() + } + + fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> { + // TODO(antoyo): use ty. + // TODO(antoyo): handle alignment. + let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes())); + let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); + + let volatile_const_void_ptr_type = self.context.new_type::<()>() + .make_const() + .make_volatile() + .make_pointer(); + let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type); + self.context.new_call(None, atomic_load, &[ptr, ordering]) + } + + fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> { + assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); + + if place.layout.is_zst() { + return OperandRef::new_zst(self, place.layout); + } + + fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) { + let vr = scalar.valid_range(bx); + match scalar.primitive() { + abi::Int(..) => { + if !scalar.is_always_valid(bx) { + bx.range_metadata(load, vr); + } + } + abi::Pointer(_) if vr.start < vr.end && !vr.contains(0) => { + bx.nonnull_metadata(load); + } + _ => {} + } + } + + let val = + if let Some(llextra) = place.llextra { + OperandValue::Ref(place.llval, Some(llextra), place.align) + } + else if place.layout.is_gcc_immediate() { + let load = self.load( + place.layout.gcc_type(self), + place.llval, + place.align, + ); + if let abi::Abi::Scalar(ref scalar) = place.layout.abi { + scalar_load_metadata(self, load, scalar); + } + OperandValue::Immediate(self.to_immediate(load, place.layout)) + } + else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi { + let b_offset = a.size(self).align_to(b.align(self).abi); + let pair_type = place.layout.gcc_type(self); + + let mut load = |i, scalar: &abi::Scalar, align| { + let llptr = self.struct_gep(pair_type, place.llval, i as u64); + let llty = place.layout.scalar_pair_element_gcc_type(self, i, false); + let load = self.load(llty, llptr, align); + scalar_load_metadata(self, load, scalar); + if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load } + }; + + OperandValue::Pair( + load(0, a, place.align), + load(1, b, place.align.restrict_for_offset(b_offset)), + ) + } + else { + OperandValue::Ref(place.llval, None, place.align) + }; + + OperandRef { val, layout: place.layout } + } + + fn write_operand_repeatedly(&mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) { + let zero = self.const_usize(0); + let count = self.const_usize(count); + let start = dest.project_index(self, zero).llval; + let end = dest.project_index(self, count).llval; + + let header_bb = self.append_sibling_block("repeat_loop_header"); + let body_bb = self.append_sibling_block("repeat_loop_body"); + let next_bb = self.append_sibling_block("repeat_loop_next"); + + let ptr_type = start.get_type(); + let current = self.llbb().get_function().new_local(None, ptr_type, "loop_var"); + let current_val = current.to_rvalue(); + self.assign(current, start); + + self.br(header_bb); + + self.switch_to_block(header_bb); + let keep_going = self.icmp(IntPredicate::IntNE, current_val, end); + self.cond_br(keep_going, body_bb, next_bb); + + self.switch_to_block(body_bb); + let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size); + cg_elem.val.store(self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align)); + + let next = self.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]); + self.llbb().add_assignment(None, current, next); + self.br(header_bb); + + self.switch_to_block(next_bb); + } + + fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) { + // TODO(antoyo) + } + + fn nonnull_metadata(&mut self, _load: RValue<'gcc>) { + // TODO(antoyo) + } + + fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> { + self.store_with_flags(val, ptr, align, MemFlags::empty()) + } + + fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align, _flags: MemFlags) -> RValue<'gcc> { + let ptr = self.check_store(val, ptr); + let destination = ptr.dereference(None); + // NOTE: libgccjit does not support specifying the alignment on the assignment, so we cast + // to type so it gets the proper alignment. + let destination_type = destination.to_rvalue().get_type().unqualified(); + let aligned_type = destination_type.get_aligned(align.bytes()).make_pointer(); + let aligned_destination = self.cx.context.new_bitcast(None, ptr, aligned_type); + let aligned_destination = aligned_destination.dereference(None); + self.llbb().add_assignment(None, aligned_destination, val); + // TODO(antoyo): handle align and flags. + // NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here? + self.cx.context.new_rvalue_zero(self.type_i32()) + } + + fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) { + // TODO(antoyo): handle alignment. + let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes())); + let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); + let volatile_const_void_ptr_type = self.context.new_type::<()>() + .make_volatile() + .make_pointer(); + let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type); + + // FIXME(antoyo): fix libgccjit to allow comparing an integer type with an aligned integer type because + // the following cast is required to avoid this error: + // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int __attribute__((aligned(4)))) + let int_type = atomic_store.get_param(1).to_rvalue().get_type(); + let value = self.context.new_cast(None, value, int_type); + self.llbb() + .add_eval(None, self.context.new_call(None, atomic_store, &[ptr, value, ordering])); + } + + fn gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> { + let ptr_type = ptr.get_type(); + let mut pointee_type = ptr.get_type(); + // NOTE: we cannot use array indexing here like in inbounds_gep because array indexing is + // always considered in bounds in GCC (TODO(antoyo): to be verified). + // So, we have to cast to a number. + let mut result = self.context.new_bitcast(None, ptr, self.sizet_type); + // FIXME(antoyo): if there were more than 1 index, this code is probably wrong and would + // require dereferencing the pointer. + for index in indices { + pointee_type = pointee_type.get_pointee().expect("pointee type"); + let pointee_size = self.context.new_rvalue_from_int(index.get_type(), pointee_type.get_size() as i32); + result = result + self.gcc_int_cast(*index * pointee_size, self.sizet_type); + } + self.context.new_bitcast(None, result, ptr_type) + } + + fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> { + // NOTE: array indexing is always considered in bounds in GCC (TODO(antoyo): to be verified). + let mut indices = indices.into_iter(); + let index = indices.next().expect("first index in inbounds_gep"); + let mut result = self.context.new_array_access(None, ptr, *index); + for index in indices { + result = self.context.new_array_access(None, result, *index); + } + result.get_address(None) + } + + fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> { + // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays. + assert_eq!(idx as usize as u64, idx); + let value = ptr.dereference(None).to_rvalue(); + + if value_type.dyncast_array().is_some() { + let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from")); + let element = self.context.new_array_access(None, value, index); + element.get_address(None) + } + else if let Some(vector_type) = value_type.dyncast_vector() { + let array_type = vector_type.get_element_type().make_pointer(); + let array = self.bitcast(ptr, array_type); + let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from")); + let element = self.context.new_array_access(None, array, index); + element.get_address(None) + } + else if let Some(struct_type) = value_type.is_struct() { + ptr.dereference_field(None, struct_type.get_field(idx as i32)).get_address(None) + } + else { + panic!("Unexpected type {:?}", value_type); + } + } + + /* Casts */ + fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + // TODO(antoyo): check that it indeed truncate the value. + self.gcc_int_cast(value, dest_ty) + } + + fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + // TODO(antoyo): check that it indeed sign extend the value. + if dest_ty.dyncast_vector().is_some() { + // TODO(antoyo): nothing to do as it is only for LLVM? + return value; + } + self.context.new_cast(None, value, dest_ty) + } + + fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + self.gcc_float_to_uint_cast(value, dest_ty) + } + + fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + self.gcc_float_to_int_cast(value, dest_ty) + } + + fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + self.gcc_uint_to_float_cast(value, dest_ty) + } + + fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + self.gcc_int_to_float_cast(value, dest_ty) + } + + fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + // TODO(antoyo): make sure it truncates. + self.context.new_cast(None, value, dest_ty) + } + + fn fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + self.context.new_cast(None, value, dest_ty) + } + + fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + let usize_value = self.cx.const_bitcast(value, self.cx.type_isize()); + self.intcast(usize_value, dest_ty, false) + } + + fn inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + let usize_value = self.intcast(value, self.cx.type_isize(), false); + self.cx.const_bitcast(usize_value, dest_ty) + } + + fn bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + self.cx.const_bitcast(value, dest_ty) + } + + fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> { + // NOTE: is_signed is for value, not dest_typ. + self.gcc_int_cast(value, dest_typ) + } + + fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + let val_type = value.get_type(); + match (type_is_pointer(val_type), type_is_pointer(dest_ty)) { + (false, true) => { + // NOTE: Projecting a field of a pointer type will attempt a cast from a signed char to + // a pointer, which is not supported by gccjit. + return self.cx.context.new_cast(None, self.inttoptr(value, val_type.make_pointer()), dest_ty); + }, + (false, false) => { + // When they are not pointers, we want a transmute (or reinterpret_cast). + self.bitcast(value, dest_ty) + }, + (true, true) => self.cx.context.new_cast(None, value, dest_ty), + (true, false) => unimplemented!(), + } + } + + /* Comparisons */ + fn icmp(&mut self, op: IntPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + self.gcc_icmp(op, lhs, rhs) + } + + fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs) + } + + /* Miscellaneous instructions */ + fn memcpy(&mut self, dst: RValue<'gcc>, _dst_align: Align, src: RValue<'gcc>, _src_align: Align, size: RValue<'gcc>, flags: MemFlags) { + assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported"); + let size = self.intcast(size, self.type_size_t(), false); + let _is_volatile = flags.contains(MemFlags::VOLATILE); + let dst = self.pointercast(dst, self.type_i8p()); + let src = self.pointercast(src, self.type_ptr_to(self.type_void())); + let memcpy = self.context.get_builtin_function("memcpy"); + // TODO(antoyo): handle aligns and is_volatile. + self.block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size])); + } + + fn memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) { + if flags.contains(MemFlags::NONTEMPORAL) { + // HACK(nox): This is inefficient but there is no nontemporal memmove. + let val = self.load(src.get_type().get_pointee().expect("get_pointee"), src, src_align); + let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val))); + self.store_with_flags(val, ptr, dst_align, flags); + return; + } + let size = self.intcast(size, self.type_size_t(), false); + let _is_volatile = flags.contains(MemFlags::VOLATILE); + let dst = self.pointercast(dst, self.type_i8p()); + let src = self.pointercast(src, self.type_ptr_to(self.type_void())); + + let memmove = self.context.get_builtin_function("memmove"); + // TODO(antoyo): handle is_volatile. + self.block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size])); + } + + fn memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags) { + let _is_volatile = flags.contains(MemFlags::VOLATILE); + let ptr = self.pointercast(ptr, self.type_i8p()); + let memset = self.context.get_builtin_function("memset"); + // TODO(antoyo): handle align and is_volatile. + let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type); + let size = self.intcast(size, self.type_size_t(), false); + self.block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size])); + } + + fn select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc> { + let func = self.current_func(); + let variable = func.new_local(None, then_val.get_type(), "selectVar"); + let then_block = func.new_block("then"); + let else_block = func.new_block("else"); + let after_block = func.new_block("after"); + self.llbb().end_with_conditional(None, cond, then_block, else_block); + + then_block.add_assignment(None, variable, then_val); + then_block.end_with_jump(None, after_block); + + if !then_val.get_type().is_compatible_with(else_val.get_type()) { + else_val = self.context.new_cast(None, else_val, then_val.get_type()); + } + else_block.add_assignment(None, variable, else_val); + else_block.end_with_jump(None, after_block); + + // NOTE: since jumps were added in a place rustc does not expect, the current block in the + // state need to be updated. + self.switch_to_block(after_block); + + variable.to_rvalue() + } + + #[allow(dead_code)] + fn va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc> { + unimplemented!(); + } + + #[cfg(feature="master")] + fn extract_element(&mut self, vec: RValue<'gcc>, idx: RValue<'gcc>) -> RValue<'gcc> { + self.context.new_vector_access(None, vec, idx).to_rvalue() + } + + #[cfg(not(feature="master"))] + fn extract_element(&mut self, vec: RValue<'gcc>, idx: RValue<'gcc>) -> RValue<'gcc> { + let vector_type = vec.get_type().unqualified().dyncast_vector().expect("Called extract_element on a non-vector type"); + let element_type = vector_type.get_element_type(); + let vec_num_units = vector_type.get_num_units(); + let array_type = self.context.new_array_type(None, element_type, vec_num_units as u64); + let array = self.context.new_bitcast(None, vec, array_type).to_rvalue(); + self.context.new_array_access(None, array, idx).to_rvalue() + } + + fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + } + + fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> { + // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays. + assert_eq!(idx as usize as u64, idx); + let value_type = aggregate_value.get_type(); + + if value_type.dyncast_array().is_some() { + let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from")); + let element = self.context.new_array_access(None, aggregate_value, index); + element.get_address(None) + } + else if value_type.dyncast_vector().is_some() { + panic!(); + } + else if let Some(pointer_type) = value_type.get_pointee() { + if let Some(struct_type) = pointer_type.is_struct() { + // NOTE: hack to workaround a limitation of the rustc API: see comment on + // CodegenCx.structs_as_pointer + aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)).to_rvalue() + } + else { + panic!("Unexpected type {:?}", value_type); + } + } + else if let Some(struct_type) = value_type.is_struct() { + aggregate_value.access_field(None, struct_type.get_field(idx as i32)).to_rvalue() + } + else { + panic!("Unexpected type {:?}", value_type); + } + } + + fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> { + // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays. + assert_eq!(idx as usize as u64, idx); + let value_type = aggregate_value.get_type(); + + let lvalue = + if value_type.dyncast_array().is_some() { + let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from")); + self.context.new_array_access(None, aggregate_value, index) + } + else if value_type.dyncast_vector().is_some() { + panic!(); + } + else if let Some(pointer_type) = value_type.get_pointee() { + if let Some(struct_type) = pointer_type.is_struct() { + // NOTE: hack to workaround a limitation of the rustc API: see comment on + // CodegenCx.structs_as_pointer + aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)) + } + else { + panic!("Unexpected type {:?}", value_type); + } + } + else { + panic!("Unexpected type {:?}", value_type); + }; + + let lvalue_type = lvalue.to_rvalue().get_type(); + let value = + // NOTE: sometimes, rustc will create a value with the wrong type. + if lvalue_type != value.get_type() { + self.context.new_cast(None, value, lvalue_type) + } + else { + value + }; + + self.llbb().add_assignment(None, lvalue, value); + + aggregate_value + } + + fn set_personality_fn(&mut self, _personality: RValue<'gcc>) { + #[cfg(feature="master")] + { + let personality = self.rvalue_as_function(_personality); + self.current_func().set_personality_function(personality); + } + } + + #[cfg(feature="master")] + fn cleanup_landing_pad(&mut self, pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) { + self.set_personality_fn(pers_fn); + + // NOTE: insert the current block in a variable so that a later call to invoke knows to + // generate a try/finally instead of a try/catch for this block. + self.cleanup_blocks.borrow_mut().insert(self.block); + + let eh_pointer_builtin = self.cx.context.get_target_builtin_function("__builtin_eh_pointer"); + let zero = self.cx.context.new_rvalue_zero(self.int_type); + let ptr = self.cx.context.new_call(None, eh_pointer_builtin, &[zero]); + + let value1_type = self.u8_type.make_pointer(); + let ptr = self.cx.context.new_cast(None, ptr, value1_type); + let value1 = ptr; + let value2 = zero; // TODO(antoyo): set the proper value here (the type of exception?). + + (value1, value2) + } + + #[cfg(not(feature="master"))] + fn cleanup_landing_pad(&mut self, _pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) { + let value1 = self.current_func().new_local(None, self.u8_type.make_pointer(), "landing_pad0") + .to_rvalue(); + let value2 = self.current_func().new_local(None, self.i32_type, "landing_pad1").to_rvalue(); + (value1, value2) + } + + #[cfg(feature="master")] + fn resume(&mut self, exn0: RValue<'gcc>, _exn1: RValue<'gcc>) { + let exn_type = exn0.get_type(); + let exn = self.context.new_cast(None, exn0, exn_type); + let unwind_resume = self.context.get_target_builtin_function("__builtin_unwind_resume"); + self.llbb().add_eval(None, self.context.new_call(None, unwind_resume, &[exn])); + self.unreachable(); + } + + #[cfg(not(feature="master"))] + fn resume(&mut self, _exn0: RValue<'gcc>, _exn1: RValue<'gcc>) { + self.unreachable(); + } + + fn cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet { + unimplemented!(); + } + + fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) { + unimplemented!(); + } + + fn catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet { + unimplemented!(); + } + + fn catch_switch( + &mut self, + _parent: Option<RValue<'gcc>>, + _unwind: Option<Block<'gcc>>, + _handlers: &[Block<'gcc>], + ) -> RValue<'gcc> { + unimplemented!(); + } + + // Atomic Operations + fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> { + let expected = self.current_func().new_local(None, cmp.get_type(), "expected"); + self.llbb().add_assignment(None, expected, cmp); + // NOTE: gcc doesn't support a failure memory model that is stronger than the success + // memory model. + let order = + if failure_order as i32 > order as i32 { + failure_order + } + else { + order + }; + let success = self.compare_exchange(dst, expected, src, order, failure_order, weak); + + let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false); + let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result"); + let align = Align::from_bits(64).expect("align"); // TODO(antoyo): use good align. + + let value_type = result.to_rvalue().get_type(); + if let Some(struct_type) = value_type.is_struct() { + self.store(success, result.access_field(None, struct_type.get_field(1)).get_address(None), align); + // NOTE: since success contains the call to the intrinsic, it must be stored before + // expected so that we store expected after the call. + self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align); + } + // TODO(antoyo): handle when value is not a struct. + + result.to_rvalue() + } + + fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> { + let size = src.get_type().get_size(); + let name = + match op { + AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size), + AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size), + AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size), + AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size), + AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size), + AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size), + AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size), + AtomicRmwBinOp::AtomicMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order), + AtomicRmwBinOp::AtomicMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order), + AtomicRmwBinOp::AtomicUMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order), + AtomicRmwBinOp::AtomicUMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order), + }; + + + let atomic_function = self.context.get_builtin_function(name); + let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); + + let void_ptr_type = self.context.new_type::<*mut ()>(); + let volatile_void_ptr_type = void_ptr_type.make_volatile(); + let dst = self.context.new_cast(None, dst, volatile_void_ptr_type); + // FIXME(antoyo): not sure why, but we have the wrong type here. + let new_src_type = atomic_function.get_param(1).to_rvalue().get_type(); + let src = self.context.new_cast(None, src, new_src_type); + let res = self.context.new_call(None, atomic_function, &[dst, src, order]); + self.context.new_cast(None, res, src.get_type()) + } + + fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) { + let name = + match scope { + SynchronizationScope::SingleThread => "__atomic_signal_fence", + SynchronizationScope::CrossThread => "__atomic_thread_fence", + }; + let thread_fence = self.context.get_builtin_function(name); + let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); + self.llbb().add_eval(None, self.context.new_call(None, thread_fence, &[order])); + } + + fn set_invariant_load(&mut self, load: RValue<'gcc>) { + // NOTE: Hack to consider vtable function pointer as non-global-variable function pointer. + self.normal_function_addresses.borrow_mut().insert(load); + // TODO(antoyo) + } + + fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) { + // TODO(antoyo) + } + + fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) { + // TODO(antoyo) + } + + fn call( + &mut self, + _typ: Type<'gcc>, + fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, + func: RValue<'gcc>, + args: &[RValue<'gcc>], + funclet: Option<&Funclet>, + ) -> RValue<'gcc> { + // FIXME(antoyo): remove when having a proper API. + let gcc_func = unsafe { std::mem::transmute(func) }; + let call = if self.functions.borrow().values().any(|value| *value == gcc_func) { + self.function_call(func, args, funclet) + } + else { + // If it's a not function that was defined, it's a function pointer. + self.function_ptr_call(func, args, funclet) + }; + if let Some(_fn_abi) = fn_abi { + // TODO(bjorn3): Apply function attributes + } + call + } + + fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { + // FIXME(antoyo): this does not zero-extend. + if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) { + // FIXME(antoyo): hack because base::from_immediate converts i1 to i8. + // Fix the code in codegen_ssa::base::from_immediate. + return value; + } + self.gcc_int_cast(value, dest_typ) + } + + fn cx(&self) -> &CodegenCx<'gcc, 'tcx> { + self.cx + } + + fn do_not_inline(&mut self, _llret: RValue<'gcc>) { + // FIXME(bjorn3): implement + } + + fn set_span(&mut self, _span: Span) {} + + fn from_immediate(&mut self, val: Self::Value) -> Self::Value { + if self.cx().val_ty(val) == self.cx().type_i1() { + self.zext(val, self.cx().type_i8()) + } + else { + val + } + } + + fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value { + if scalar.is_bool() { + return self.trunc(val, self.cx().type_i1()); + } + val + } + + fn fptoui_sat(&mut self, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + self.fptoint_sat(false, val, dest_ty) + } + + fn fptosi_sat(&mut self, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + self.fptoint_sat(true, val, dest_ty) + } + + fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) { + unimplemented!(); + } +} + +impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { + fn fptoint_sat(&mut self, signed: bool, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + let src_ty = self.cx.val_ty(val); + let (float_ty, int_ty) = if self.cx.type_kind(src_ty) == TypeKind::Vector { + assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty)); + (self.cx.element_type(src_ty), self.cx.element_type(dest_ty)) + } else { + (src_ty, dest_ty) + }; + + // FIXME(jistone): the following was originally the fallback SSA implementation, before LLVM 13 + // added native `fptosi.sat` and `fptoui.sat` conversions, but it was used by GCC as well. + // Now that LLVM always relies on its own, the code has been moved to GCC, but the comments are + // still LLVM-specific. This should be updated, and use better GCC specifics if possible. + + let int_width = self.cx.int_width(int_ty); + let float_width = self.cx.float_width(float_ty); + // LLVM's fpto[su]i returns undef when the input val is infinite, NaN, or does not fit into the + // destination integer type after rounding towards zero. This `undef` value can cause UB in + // safe code (see issue #10184), so we implement a saturating conversion on top of it: + // Semantically, the mathematical value of the input is rounded towards zero to the next + // mathematical integer, and then the result is clamped into the range of the destination + // integer type. Positive and negative infinity are mapped to the maximum and minimum value of + // the destination integer type. NaN is mapped to 0. + // + // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to + // a value representable in int_ty. + // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits. + // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two. + // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly + // representable. Note that this only works if float_ty's exponent range is sufficiently large. + // f16 or 256 bit integers would break this property. Right now the smallest float type is f32 + // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127. + // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because + // we're rounding towards zero, we just get float_ty::MAX (which is always an integer). + // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX. + let int_max = |signed: bool, int_width: u64| -> u128 { + let shift_amount = 128 - int_width; + if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount } + }; + let int_min = |signed: bool, int_width: u64| -> i128 { + if signed { i128::MIN >> (128 - int_width) } else { 0 } + }; + + let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) { + let rounded_min = + ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero); + assert_eq!(rounded_min.status, Status::OK); + let rounded_max = + ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero); + assert!(rounded_max.value.is_finite()); + (rounded_min.value.to_bits(), rounded_max.value.to_bits()) + }; + let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) { + let rounded_min = + ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero); + assert_eq!(rounded_min.status, Status::OK); + let rounded_max = + ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero); + assert!(rounded_max.value.is_finite()); + (rounded_min.value.to_bits(), rounded_max.value.to_bits()) + }; + // To implement saturation, we perform the following steps: + // + // 1. Cast val to an integer with fpto[su]i. This may result in undef. + // 2. Compare val to f_min and f_max, and use the comparison results to select: + // a) int_ty::MIN if val < f_min or val is NaN + // b) int_ty::MAX if val > f_max + // c) the result of fpto[su]i otherwise + // 3. If val is NaN, return 0.0, otherwise return the result of step 2. + // + // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the + // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of + // undef does not introduce any non-determinism either. + // More importantly, the above procedure correctly implements saturating conversion. + // Proof (sketch): + // If val is NaN, 0 is returned by definition. + // Otherwise, val is finite or infinite and thus can be compared with f_min and f_max. + // This yields three cases to consider: + // (1) if val in [f_min, f_max], the result of fpto[su]i is returned, which agrees with + // saturating conversion for inputs in that range. + // (2) if val > f_max, then val is larger than int_ty::MAX. This holds even if f_max is rounded + // (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger + // than int_ty::MAX. Because val is larger than int_ty::MAX, the return value of int_ty::MAX + // is correct. + // (3) if val < f_min, then val is smaller than int_ty::MIN. As shown earlier, f_min exactly equals + // int_ty::MIN and therefore the return value of int_ty::MIN is correct. + // QED. + + let float_bits_to_llval = |bx: &mut Self, bits| { + let bits_llval = match float_width { + 32 => bx.cx().const_u32(bits as u32), + 64 => bx.cx().const_u64(bits as u64), + n => bug!("unsupported float width {}", n), + }; + bx.bitcast(bits_llval, float_ty) + }; + let (f_min, f_max) = match float_width { + 32 => compute_clamp_bounds_single(signed, int_width), + 64 => compute_clamp_bounds_double(signed, int_width), + n => bug!("unsupported float width {}", n), + }; + let f_min = float_bits_to_llval(self, f_min); + let f_max = float_bits_to_llval(self, f_max); + let int_max = self.cx.const_uint_big(int_ty, int_max(signed, int_width)); + let int_min = self.cx.const_uint_big(int_ty, int_min(signed, int_width) as u128); + let zero = self.cx.const_uint(int_ty, 0); + + // If we're working with vectors, constants must be "splatted": the constant is duplicated + // into each lane of the vector. The algorithm stays the same, we are just using the + // same constant across all lanes. + let maybe_splat = |bx: &mut Self, val| { + if bx.cx().type_kind(dest_ty) == TypeKind::Vector { + bx.vector_splat(bx.vector_length(dest_ty), val) + } else { + val + } + }; + let f_min = maybe_splat(self, f_min); + let f_max = maybe_splat(self, f_max); + let int_max = maybe_splat(self, int_max); + let int_min = maybe_splat(self, int_min); + let zero = maybe_splat(self, zero); + + // Step 1 ... + let fptosui_result = if signed { self.fptosi(val, dest_ty) } else { self.fptoui(val, dest_ty) }; + let less_or_nan = self.fcmp(RealPredicate::RealULT, val, f_min); + let greater = self.fcmp(RealPredicate::RealOGT, val, f_max); + + // Step 2: We use two comparisons and two selects, with %s1 being the + // result: + // %less_or_nan = fcmp ult %val, %f_min + // %greater = fcmp olt %val, %f_max + // %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result + // %s1 = select %greater, int_ty::MAX, %s0 + // Note that %less_or_nan uses an *unordered* comparison. This + // comparison is true if the operands are not comparable (i.e., if val is + // NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if + // val is NaN. + // + // Performance note: Unordered comparison can be lowered to a "flipped" + // comparison and a negation, and the negation can be merged into the + // select. Therefore, it not necessarily any more expensive than an + // ordered ("normal") comparison. Whether these optimizations will be + // performed is ultimately up to the backend, but at least x86 does + // perform them. + let s0 = self.select(less_or_nan, int_min, fptosui_result); + let s1 = self.select(greater, int_max, s0); + + // Step 3: NaN replacement. + // For unsigned types, the above step already yielded int_ty::MIN == 0 if val is NaN. + // Therefore we only need to execute this step for signed integer types. + if signed { + // LLVM has no isNaN predicate, so we use (val == val) instead + let cmp = self.fcmp(RealPredicate::RealOEQ, val, val); + self.select(cmp, s1, zero) + } else { + s1 + } + } + + #[cfg(feature="master")] + pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> { + let struct_type = mask.get_type().is_struct().expect("mask should be of struct type"); + + // TODO(antoyo): use a recursive unqualified() here. + let vector_type = v1.get_type().unqualified().dyncast_vector().expect("vector type"); + let element_type = vector_type.get_element_type(); + let vec_num_units = vector_type.get_num_units(); + + let mask_num_units = struct_type.get_field_count(); + let mut vector_elements = vec![]; + let mask_element_type = + if element_type.is_integral() { + element_type + } + else { + #[cfg(feature="master")] + { + self.cx.type_ix(element_type.get_size() as u64 * 8) + } + #[cfg(not(feature="master"))] + self.int_type + }; + for i in 0..mask_num_units { + let field = struct_type.get_field(i as i32); + vector_elements.push(self.context.new_cast(None, mask.access_field(None, field).to_rvalue(), mask_element_type)); + } + + // NOTE: the mask needs to be the same length as the input vectors, so add the missing + // elements in the mask if needed. + for _ in mask_num_units..vec_num_units { + vector_elements.push(self.context.new_rvalue_zero(mask_element_type)); + } + + let result_type = self.context.new_vector_type(element_type, mask_num_units as u64); + let (v1, v2) = + if vec_num_units < mask_num_units { + // NOTE: the mask needs to be the same length as the input vectors, so join the 2 + // vectors and create a dummy second vector. + let mut elements = vec![]; + for i in 0..vec_num_units { + elements.push(self.context.new_vector_access(None, v1, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue()); + } + for i in 0..(mask_num_units - vec_num_units) { + elements.push(self.context.new_vector_access(None, v2, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue()); + } + let v1 = self.context.new_rvalue_from_vector(None, result_type, &elements); + let zero = self.context.new_rvalue_zero(element_type); + let v2 = self.context.new_rvalue_from_vector(None, result_type, &vec![zero; mask_num_units]); + (v1, v2) + } + else { + (v1, v2) + }; + + let new_mask_num_units = std::cmp::max(mask_num_units, vec_num_units); + let mask_type = self.context.new_vector_type(mask_element_type, new_mask_num_units as u64); + let mask = self.context.new_rvalue_from_vector(None, mask_type, &vector_elements); + let result = self.context.new_rvalue_vector_perm(None, v1, v2, mask); + + if vec_num_units != mask_num_units { + // NOTE: if padding was added, only select the number of elements of the masks to + // remove that padding in the result. + let mut elements = vec![]; + for i in 0..mask_num_units { + elements.push(self.context.new_vector_access(None, result, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue()); + } + self.context.new_rvalue_from_vector(None, result_type, &elements) + } + else { + result + } + } + + #[cfg(not(feature="master"))] + pub fn shuffle_vector(&mut self, _v1: RValue<'gcc>, _v2: RValue<'gcc>, _mask: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + } + + #[cfg(feature="master")] + pub fn vector_reduce<F>(&mut self, src: RValue<'gcc>, op: F) -> RValue<'gcc> + where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc> + { + let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type"); + let element_type = vector_type.get_element_type(); + let mask_element_type = self.type_ix(element_type.get_size() as u64 * 8); + let element_count = vector_type.get_num_units(); + let mut vector_elements = vec![]; + for i in 0..element_count { + vector_elements.push(i); + } + let mask_type = self.context.new_vector_type(mask_element_type, element_count as u64); + let mut shift = 1; + let mut res = src; + while shift < element_count { + let vector_elements: Vec<_> = + vector_elements.iter() + .map(|i| self.context.new_rvalue_from_int(mask_element_type, ((i + shift) % element_count) as i32)) + .collect(); + let mask = self.context.new_rvalue_from_vector(None, mask_type, &vector_elements); + let shifted = self.context.new_rvalue_vector_perm(None, res, res, mask); + shift *= 2; + res = op(res, shifted, &self.context); + } + self.context.new_vector_access(None, res, self.context.new_rvalue_zero(self.int_type)) + .to_rvalue() + } + + #[cfg(not(feature="master"))] + pub fn vector_reduce<F>(&mut self, _src: RValue<'gcc>, _op: F) -> RValue<'gcc> + where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc> + { + unimplemented!(); + } + + pub fn vector_reduce_op(&mut self, src: RValue<'gcc>, op: BinaryOp) -> RValue<'gcc> { + self.vector_reduce(src, |a, b, context| context.new_binary_op(None, op, a.get_type(), a, b)) + } + + pub fn vector_reduce_fadd_fast(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + } + + #[cfg(feature="master")] + pub fn vector_reduce_fadd(&mut self, acc: RValue<'gcc>, src: RValue<'gcc>) -> RValue<'gcc> { + let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type"); + let element_count = vector_type.get_num_units(); + (0..element_count).into_iter() + .map(|i| self.context + .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _)) + .to_rvalue()) + .fold(acc, |x, i| x + i) + } + + #[cfg(not(feature="master"))] + pub fn vector_reduce_fadd(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + } + + pub fn vector_reduce_fmul_fast(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + } + + #[cfg(feature="master")] + pub fn vector_reduce_fmul(&mut self, acc: RValue<'gcc>, src: RValue<'gcc>) -> RValue<'gcc> { + let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type"); + let element_count = vector_type.get_num_units(); + (0..element_count).into_iter() + .map(|i| self.context + .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _)) + .to_rvalue()) + .fold(acc, |x, i| x * i) + } + + #[cfg(not(feature="master"))] + pub fn vector_reduce_fmul(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!() + } + + // Inspired by Hacker's Delight min implementation. + pub fn vector_reduce_min(&mut self, src: RValue<'gcc>) -> RValue<'gcc> { + self.vector_reduce(src, |a, b, context| { + let differences_or_zeros = difference_or_zero(a, b, context); + context.new_binary_op(None, BinaryOp::Plus, b.get_type(), b, differences_or_zeros) + }) + } + + // Inspired by Hacker's Delight max implementation. + pub fn vector_reduce_max(&mut self, src: RValue<'gcc>) -> RValue<'gcc> { + self.vector_reduce(src, |a, b, context| { + let differences_or_zeros = difference_or_zero(a, b, context); + context.new_binary_op(None, BinaryOp::Minus, a.get_type(), a, differences_or_zeros) + }) + } + + fn vector_extremum(&mut self, a: RValue<'gcc>, b: RValue<'gcc>, direction: ExtremumOperation) -> RValue<'gcc> { + let vector_type = a.get_type(); + + // mask out the NaNs in b and replace them with the corresponding lane in a, so when a and + // b get compared & spliced together, we get the numeric values instead of NaNs. + let b_nan_mask = self.context.new_comparison(None, ComparisonOp::NotEquals, b, b); + let mask_type = b_nan_mask.get_type(); + let b_nan_mask_inverted = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, mask_type, b_nan_mask); + let a_cast = self.context.new_bitcast(None, a, mask_type); + let b_cast = self.context.new_bitcast(None, b, mask_type); + let res = (b_nan_mask & a_cast) | (b_nan_mask_inverted & b_cast); + let b = self.context.new_bitcast(None, res, vector_type); + + // now do the actual comparison + let comparison_op = match direction { + ExtremumOperation::Min => ComparisonOp::LessThan, + ExtremumOperation::Max => ComparisonOp::GreaterThan, + }; + let cmp = self.context.new_comparison(None, comparison_op, a, b); + let cmp_inverted = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, cmp.get_type(), cmp); + let res = (cmp & a_cast) | (cmp_inverted & res); + self.context.new_bitcast(None, res, vector_type) + } + + pub fn vector_fmin(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.vector_extremum(a, b, ExtremumOperation::Min) + } + + #[cfg(feature="master")] + pub fn vector_reduce_fmin(&mut self, src: RValue<'gcc>) -> RValue<'gcc> { + let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type"); + let element_count = vector_type.get_num_units(); + let mut acc = self.context.new_vector_access(None, src, self.context.new_rvalue_zero(self.int_type)).to_rvalue(); + for i in 1..element_count { + let elem = self.context + .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _)) + .to_rvalue(); + let cmp = self.context.new_comparison(None, ComparisonOp::LessThan, acc, elem); + acc = self.select(cmp, acc, elem); + } + acc + } + + #[cfg(not(feature="master"))] + pub fn vector_reduce_fmin(&mut self, _src: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + } + + pub fn vector_fmax(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.vector_extremum(a, b, ExtremumOperation::Max) + } + + #[cfg(feature="master")] + pub fn vector_reduce_fmax(&mut self, src: RValue<'gcc>) -> RValue<'gcc> { + let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type"); + let element_count = vector_type.get_num_units(); + let mut acc = self.context.new_vector_access(None, src, self.context.new_rvalue_zero(self.int_type)).to_rvalue(); + for i in 1..element_count { + let elem = self.context + .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _)) + .to_rvalue(); + let cmp = self.context.new_comparison(None, ComparisonOp::GreaterThan, acc, elem); + acc = self.select(cmp, acc, elem); + } + acc + } + + #[cfg(not(feature="master"))] + pub fn vector_reduce_fmax(&mut self, _src: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + } + + pub fn vector_select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, else_val: RValue<'gcc>) -> RValue<'gcc> { + // cond is a vector of integers, not of bools. + let vector_type = cond.get_type().unqualified().dyncast_vector().expect("vector type"); + let num_units = vector_type.get_num_units(); + let element_type = vector_type.get_element_type(); + + #[cfg(feature="master")] + let (cond, element_type) = { + let then_val_vector_type = then_val.get_type().dyncast_vector().expect("vector type"); + let then_val_element_type = then_val_vector_type.get_element_type(); + let then_val_element_size = then_val_element_type.get_size(); + + // NOTE: the mask needs to be of the same size as the other arguments in order for the & + // operation to work. + if then_val_element_size != element_type.get_size() { + let new_element_type = self.type_ix(then_val_element_size as u64 * 8); + let new_vector_type = self.context.new_vector_type(new_element_type, num_units as u64); + let cond = self.context.convert_vector(None, cond, new_vector_type); + (cond, new_element_type) + } + else { + (cond, element_type) + } + }; + + let cond_type = cond.get_type(); + + let zeros = vec![self.context.new_rvalue_zero(element_type); num_units]; + let zeros = self.context.new_rvalue_from_vector(None, cond_type, &zeros); + + let result_type = then_val.get_type(); + + let masks = self.context.new_comparison(None, ComparisonOp::NotEquals, cond, zeros); + // NOTE: masks is a vector of integers, but the values can be vectors of floats, so use bitcast to make + // the & operation work. + let then_val = self.bitcast_if_needed(then_val, masks.get_type()); + let then_vals = masks & then_val; + + let minus_ones = vec![self.context.new_rvalue_from_int(element_type, -1); num_units]; + let minus_ones = self.context.new_rvalue_from_vector(None, cond_type, &minus_ones); + let inverted_masks = masks ^ minus_ones; + // NOTE: sometimes, the type of else_val can be different than the type of then_val in + // libgccjit (vector of int vs vector of int32_t), but they should be the same for the AND + // operation to work. + // TODO: remove bitcast now that vector types can be compared? + let else_val = self.context.new_bitcast(None, else_val, then_val.get_type()); + let else_vals = inverted_masks & else_val; + + let res = then_vals | else_vals; + self.bitcast_if_needed(res, result_type) + } +} + +fn difference_or_zero<'gcc>(a: RValue<'gcc>, b: RValue<'gcc>, context: &'gcc Context<'gcc>) -> RValue<'gcc> { + let difference = a - b; + let masks = context.new_comparison(None, ComparisonOp::GreaterThanEquals, b, a); + // NOTE: masks is a vector of integers, but the values can be vectors of floats, so use bitcast to make + // the & operation work. + let a_type = a.get_type(); + let masks = + if masks.get_type() != a_type { + context.new_bitcast(None, masks, a_type) + } + else { + masks + }; + difference & masks +} + +impl<'a, 'gcc, 'tcx> StaticBuilderMethods for Builder<'a, 'gcc, 'tcx> { + fn get_static(&mut self, def_id: DefId) -> RValue<'gcc> { + // Forward to the `get_static` method of `CodegenCx` + self.cx().get_static(def_id).get_address(None) + } +} + +impl<'tcx> HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> { + fn param_env(&self) -> ParamEnv<'tcx> { + self.cx.param_env() + } +} + +impl<'tcx> HasTargetSpec for Builder<'_, '_, 'tcx> { + fn target_spec(&self) -> &Target { + &self.cx.target_spec() + } +} + +pub trait ToGccComp { + fn to_gcc_comparison(&self) -> ComparisonOp; +} + +impl ToGccComp for IntPredicate { + fn to_gcc_comparison(&self) -> ComparisonOp { + match *self { + IntPredicate::IntEQ => ComparisonOp::Equals, + IntPredicate::IntNE => ComparisonOp::NotEquals, + IntPredicate::IntUGT => ComparisonOp::GreaterThan, + IntPredicate::IntUGE => ComparisonOp::GreaterThanEquals, + IntPredicate::IntULT => ComparisonOp::LessThan, + IntPredicate::IntULE => ComparisonOp::LessThanEquals, + IntPredicate::IntSGT => ComparisonOp::GreaterThan, + IntPredicate::IntSGE => ComparisonOp::GreaterThanEquals, + IntPredicate::IntSLT => ComparisonOp::LessThan, + IntPredicate::IntSLE => ComparisonOp::LessThanEquals, + } + } +} + +impl ToGccComp for RealPredicate { + fn to_gcc_comparison(&self) -> ComparisonOp { + // TODO(antoyo): check that ordered vs non-ordered is respected. + match *self { + RealPredicate::RealPredicateFalse => unreachable!(), + RealPredicate::RealOEQ => ComparisonOp::Equals, + RealPredicate::RealOGT => ComparisonOp::GreaterThan, + RealPredicate::RealOGE => ComparisonOp::GreaterThanEquals, + RealPredicate::RealOLT => ComparisonOp::LessThan, + RealPredicate::RealOLE => ComparisonOp::LessThanEquals, + RealPredicate::RealONE => ComparisonOp::NotEquals, + RealPredicate::RealORD => unreachable!(), + RealPredicate::RealUNO => unreachable!(), + RealPredicate::RealUEQ => ComparisonOp::Equals, + RealPredicate::RealUGT => ComparisonOp::GreaterThan, + RealPredicate::RealUGE => ComparisonOp::GreaterThan, + RealPredicate::RealULT => ComparisonOp::LessThan, + RealPredicate::RealULE => ComparisonOp::LessThan, + RealPredicate::RealUNE => ComparisonOp::NotEquals, + RealPredicate::RealPredicateTrue => unreachable!(), + } + } +} + +#[repr(C)] +#[allow(non_camel_case_types)] +enum MemOrdering { + __ATOMIC_RELAXED, + __ATOMIC_CONSUME, + __ATOMIC_ACQUIRE, + __ATOMIC_RELEASE, + __ATOMIC_ACQ_REL, + __ATOMIC_SEQ_CST, +} + +trait ToGccOrdering { + fn to_gcc(self) -> i32; +} + +impl ToGccOrdering for AtomicOrdering { + fn to_gcc(self) -> i32 { + use MemOrdering::*; + + let ordering = + match self { + AtomicOrdering::Unordered => __ATOMIC_RELAXED, + AtomicOrdering::Relaxed => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same. + AtomicOrdering::Acquire => __ATOMIC_ACQUIRE, + AtomicOrdering::Release => __ATOMIC_RELEASE, + AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL, + AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST, + }; + ordering as i32 + } +} diff --git a/compiler/rustc_codegen_gcc/src/callee.rs b/compiler/rustc_codegen_gcc/src/callee.rs new file mode 100644 index 00000000000..ba1e8656208 --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/callee.rs @@ -0,0 +1,174 @@ +#[cfg(feature="master")] +use gccjit::{FnAttribute, Visibility}; +use gccjit::{FunctionType, Function}; +use rustc_middle::ty::{self, Instance, TypeVisitableExt}; +use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; + +use crate::attributes; +use crate::context::CodegenCx; + +/// Codegens a reference to a fn/method item, monomorphizing and +/// inlining as it goes. +/// +/// # Parameters +/// +/// - `cx`: the crate context +/// - `instance`: the instance to be instantiated +pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) -> Function<'gcc> { + let tcx = cx.tcx(); + + assert!(!instance.substs.needs_infer()); + assert!(!instance.substs.has_escaping_bound_vars()); + + let sym = tcx.symbol_name(instance).name; + + if let Some(&func) = cx.function_instances.borrow().get(&instance) { + return func; + } + + let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty()); + + let func = + if let Some(_func) = cx.get_declared_value(&sym) { + // FIXME(antoyo): we never reach this because get_declared_value only returns global variables + // and here we try to get a function. + unreachable!(); + /* + // Create a fn pointer with the new signature. + let ptrty = fn_abi.ptr_to_gcc_type(cx); + + // This is subtle and surprising, but sometimes we have to bitcast + // the resulting fn pointer. The reason has to do with external + // functions. If you have two crates that both bind the same C + // library, they may not use precisely the same types: for + // example, they will probably each declare their own structs, + // which are distinct types from LLVM's point of view (nominal + // types). + // + // Now, if those two crates are linked into an application, and + // they contain inlined code, you can wind up with a situation + // where both of those functions wind up being loaded into this + // application simultaneously. In that case, the same function + // (from LLVM's point of view) requires two types. But of course + // LLVM won't allow one function to have two types. + // + // What we currently do, therefore, is declare the function with + // one of the two types (whichever happens to come first) and then + // bitcast as needed when the function is referenced to make sure + // it has the type we expect. + // + // This can occur on either a crate-local or crate-external + // reference. It also occurs when testing libcore and in some + // other weird situations. Annoying. + if cx.val_ty(func) != ptrty { + // TODO(antoyo): cast the pointer. + func + } + else { + func + }*/ + } + else { + cx.linkage.set(FunctionType::Extern); + let func = cx.declare_fn(&sym, &fn_abi); + + attributes::from_fn_attrs(cx, func, instance); + + let instance_def_id = instance.def_id(); + + // TODO(antoyo): set linkage and attributes. + + // Apply an appropriate linkage/visibility value to our item that we + // just declared. + // + // This is sort of subtle. Inside our codegen unit we started off + // compilation by predefining all our own `MonoItem` instances. That + // is, everything we're codegenning ourselves is already defined. That + // means that anything we're actually codegenning in this codegen unit + // will have hit the above branch in `get_declared_value`. As a result, + // we're guaranteed here that we're declaring a symbol that won't get + // defined, or in other words we're referencing a value from another + // codegen unit or even another crate. + // + // So because this is a foreign value we blanket apply an external + // linkage directive because it's coming from a different object file. + // The visibility here is where it gets tricky. This symbol could be + // referencing some foreign crate or foreign library (an `extern` + // block) in which case we want to leave the default visibility. We may + // also, though, have multiple codegen units. It could be a + // monomorphization, in which case its expected visibility depends on + // whether we are sharing generics or not. The important thing here is + // that the visibility we apply to the declaration is the same one that + // has been applied to the definition (wherever that definition may be). + let is_generic = instance.substs.non_erasable_generics().next().is_some(); + + if is_generic { + // This is a monomorphization. Its expected visibility depends + // on whether we are in share-generics mode. + + if cx.tcx.sess.opts.share_generics() { + // We are in share_generics mode. + + if let Some(instance_def_id) = instance_def_id.as_local() { + // This is a definition from the current crate. If the + // definition is unreachable for downstream crates or + // the current crate does not re-export generics, the + // definition of the instance will have been declared + // as `hidden`. + if cx.tcx.is_unreachable_local_definition(instance_def_id) + || !cx.tcx.local_crate_exports_generics() + { + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); + } + } else { + // This is a monomorphization of a generic function + // defined in an upstream crate. + if instance.upstream_monomorphization(tcx).is_some() { + // This is instantiated in another crate. It cannot + // be `hidden`. + } else { + // This is a local instantiation of an upstream definition. + // If the current crate does not re-export it + // (because it is a C library or an executable), it + // will have been declared `hidden`. + if !cx.tcx.local_crate_exports_generics() { + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); + } + } + } + } else { + // When not sharing generics, all instances are in the same + // crate and have hidden visibility + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); + } + } else { + // This is a non-generic function + if cx.tcx.is_codegened_item(instance_def_id) { + // This is a function that is instantiated in the local crate + + if instance_def_id.is_local() { + // This is function that is defined in the local crate. + // If it is not reachable, it is hidden. + if !cx.tcx.is_reachable_non_generic(instance_def_id) { + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); + } + } else { + // This is a function from an upstream crate that has + // been instantiated here. These are always hidden. + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); + } + } + } + + func + }; + + cx.function_instances.borrow_mut().insert(instance, func); + + func +} diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs new file mode 100644 index 00000000000..76fc7bd222e --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/common.rs @@ -0,0 +1,485 @@ +use gccjit::LValue; +use gccjit::{RValue, Type, ToRValue}; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::traits::{ + BaseTypeMethods, + ConstMethods, + DerivedTypeMethods, + MiscMethods, + StaticMethods, +}; +use rustc_middle::mir::Mutability; +use rustc_middle::ty::layout::{TyAndLayout, LayoutOf}; +use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar}; +use rustc_target::abi::{self, HasDataLayout, Pointer, Size}; + +use crate::consts::const_alloc_to_gcc; +use crate::context::CodegenCx; +use crate::type_of::LayoutGccExt; + +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + pub fn const_bytes(&self, bytes: &[u8]) -> RValue<'gcc> { + bytes_in_context(self, bytes) + } + + fn global_string(&self, string: &str) -> LValue<'gcc> { + // TODO(antoyo): handle non-null-terminated strings. + let string = self.context.new_string_literal(&*string); + let sym = self.generate_local_symbol_name("str"); + let global = self.declare_private_global(&sym, self.val_ty(string)); + global.global_set_initializer_rvalue(string); + global + // TODO(antoyo): set linkage. + } +} + +pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> RValue<'gcc> { + let context = &cx.context; + let byte_type = context.new_type::<u8>(); + let typ = context.new_array_type(None, byte_type, bytes.len() as u64); + let elements: Vec<_> = + bytes.iter() + .map(|&byte| context.new_rvalue_from_int(byte_type, byte as i32)) + .collect(); + context.new_array_constructor(None, typ, &elements) +} + +pub fn type_is_pointer(typ: Type<'_>) -> bool { + typ.get_pointee().is_some() +} + +impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { + fn const_null(&self, typ: Type<'gcc>) -> RValue<'gcc> { + if type_is_pointer(typ) { + self.context.new_null(typ) + } + else { + self.const_int(typ, 0) + } + } + + fn const_undef(&self, typ: Type<'gcc>) -> RValue<'gcc> { + let local = self.current_func.borrow().expect("func") + .new_local(None, typ, "undefined"); + if typ.is_struct().is_some() { + // NOTE: hack to workaround a limitation of the rustc API: see comment on + // CodegenCx.structs_as_pointer + let pointer = local.get_address(None); + self.structs_as_pointer.borrow_mut().insert(pointer); + pointer + } + else { + local.to_rvalue() + } + } + + fn const_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> { + self.gcc_int(typ, int) + } + + fn const_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> { + self.gcc_uint(typ, int) + } + + fn const_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> { + self.gcc_uint_big(typ, num) + } + + fn const_bool(&self, val: bool) -> RValue<'gcc> { + self.const_uint(self.type_i1(), val as u64) + } + + fn const_i16(&self, i: i16) -> RValue<'gcc> { + self.const_int(self.type_i16(), i as i64) + } + + fn const_i32(&self, i: i32) -> RValue<'gcc> { + self.const_int(self.type_i32(), i as i64) + } + + fn const_u32(&self, i: u32) -> RValue<'gcc> { + self.const_uint(self.type_u32(), i as u64) + } + + fn const_u64(&self, i: u64) -> RValue<'gcc> { + self.const_uint(self.type_u64(), i) + } + + fn const_usize(&self, i: u64) -> RValue<'gcc> { + let bit_size = self.data_layout().pointer_size.bits(); + if bit_size < 64 { + // make sure it doesn't overflow + assert!(i < (1 << bit_size)); + } + + self.const_uint(self.usize_type, i) + } + + fn const_u8(&self, i: u8) -> RValue<'gcc> { + self.const_uint(self.type_u8(), i as u64) + } + + fn const_real(&self, typ: Type<'gcc>, val: f64) -> RValue<'gcc> { + self.context.new_rvalue_from_double(typ, val) + } + + fn const_str(&self, s: &str) -> (RValue<'gcc>, RValue<'gcc>) { + let str_global = *self + .const_str_cache + .borrow_mut() + .raw_entry_mut() + .from_key(s) + .or_insert_with(|| (s.to_owned(), self.global_string(s))) + .1; + let len = s.len(); + let cs = self.const_ptrcast(str_global.get_address(None), + self.type_ptr_to(self.layout_of(self.tcx.types.str_).gcc_type(self)), + ); + (cs, self.const_usize(len as u64)) + } + + fn const_struct(&self, values: &[RValue<'gcc>], packed: bool) -> RValue<'gcc> { + let fields: Vec<_> = values.iter() + .map(|value| value.get_type()) + .collect(); + // TODO(antoyo): cache the type? It's anonymous, so probably not. + let typ = self.type_struct(&fields, packed); + let struct_type = typ.is_struct().expect("struct type"); + self.context.new_struct_constructor(None, struct_type.as_type(), None, values) + } + + fn const_to_opt_uint(&self, _v: RValue<'gcc>) -> Option<u64> { + // TODO(antoyo) + None + } + + fn const_to_opt_u128(&self, _v: RValue<'gcc>, _sign_ext: bool) -> Option<u128> { + // TODO(antoyo) + None + } + + fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> { + let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() }; + match cv { + Scalar::Int(int) => { + let data = int.assert_bits(layout.size(self)); + + // FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code + // the paths for floating-point values. + if ty == self.float_type { + return self.context.new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64); + } + else if ty == self.double_type { + return self.context.new_rvalue_from_double(ty, f64::from_bits(data as u64)); + } + + let value = self.const_uint_big(self.type_ix(bitsize), data); + let bytesize = layout.size(self).bytes(); + if bitsize > 1 && ty.is_integral() && bytesize as u32 == ty.get_size() { + // NOTE: since the intrinsic _xabort is called with a bitcast, which + // is non-const, but expects a constant, do a normal cast instead of a bitcast. + // FIXME(antoyo): fix bitcast to work in constant contexts. + // TODO(antoyo): perhaps only use bitcast for pointers? + self.context.new_cast(None, value, ty) + } + else { + // TODO(bjorn3): assert size is correct + self.const_bitcast(value, ty) + } + } + Scalar::Ptr(ptr, _size) => { + let (alloc_id, offset) = ptr.into_parts(); + let base_addr = + match self.tcx.global_alloc(alloc_id) { + GlobalAlloc::Memory(alloc) => { + let init = const_alloc_to_gcc(self, alloc); + let alloc = alloc.inner(); + let value = + match alloc.mutability { + Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None), + _ => self.static_addr_of(init, alloc.align, None), + }; + if !self.sess().fewer_names() { + // TODO(antoyo): set value name. + } + value + }, + GlobalAlloc::Function(fn_instance) => { + self.get_fn_addr(fn_instance) + }, + GlobalAlloc::VTable(ty, trait_ref) => { + let alloc = self.tcx.global_alloc(self.tcx.vtable_allocation((ty, trait_ref))).unwrap_memory(); + let init = const_alloc_to_gcc(self, alloc); + self.static_addr_of(init, alloc.inner().align, None) + } + GlobalAlloc::Static(def_id) => { + assert!(self.tcx.is_static(def_id)); + self.get_static(def_id).get_address(None) + }, + }; + let ptr_type = base_addr.get_type(); + let base_addr = self.const_bitcast(base_addr, self.usize_type); + let offset = self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64); + let ptr = self.const_bitcast(base_addr + offset, ptr_type); + if !matches!(layout.primitive(), Pointer(_)) { + self.const_bitcast(ptr.dereference(None).to_rvalue(), ty) + } + else { + self.const_bitcast(ptr, ty) + } + } + } + } + + fn const_data_from_alloc(&self, alloc: ConstAllocation<'tcx>) -> Self::Value { + const_alloc_to_gcc(self, alloc) + } + + fn from_const_alloc(&self, layout: TyAndLayout<'tcx>, alloc: ConstAllocation<'tcx>, offset: Size) -> PlaceRef<'tcx, RValue<'gcc>> { + assert_eq!(alloc.inner().align, layout.align.abi); + let ty = self.type_ptr_to(layout.gcc_type(self)); + let value = + if layout.size == Size::ZERO { + let value = self.const_usize(alloc.inner().align.bytes()); + self.const_bitcast(value, ty) + } + else { + let init = const_alloc_to_gcc(self, alloc); + let base_addr = self.static_addr_of(init, alloc.inner().align, None); + + let array = self.const_bitcast(base_addr, self.type_i8p()); + let value = self.context.new_array_access(None, array, self.const_usize(offset.bytes())).get_address(None); + self.const_bitcast(value, ty) + }; + PlaceRef::new_sized(value, layout) + } + + fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> { + self.context.new_cast(None, val, ty) + } +} + +pub trait SignType<'gcc, 'tcx> { + fn is_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; + fn to_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; +} + +impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> { + fn is_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.is_i8(cx) || self.is_i16(cx) || self.is_i32(cx) || self.is_i64(cx) || self.is_i128(cx) + } + + fn is_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.is_u8(cx) || self.is_u16(cx) || self.is_u32(cx) || self.is_u64(cx) || self.is_u128(cx) + } + + fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { + if self.is_u8(cx) { + cx.i8_type + } + else if self.is_u16(cx) { + cx.i16_type + } + else if self.is_u32(cx) { + cx.i32_type + } + else if self.is_u64(cx) { + cx.i64_type + } + else if self.is_u128(cx) { + cx.i128_type + } + else if self.is_uchar(cx) { + cx.char_type + } + else if self.is_ushort(cx) { + cx.short_type + } + else if self.is_uint(cx) { + cx.int_type + } + else if self.is_ulong(cx) { + cx.long_type + } + else if self.is_ulonglong(cx) { + cx.longlong_type + } + else { + self.clone() + } + } + + fn to_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { + if self.is_i8(cx) { + cx.u8_type + } + else if self.is_i16(cx) { + cx.u16_type + } + else if self.is_i32(cx) { + cx.u32_type + } + else if self.is_i64(cx) { + cx.u64_type + } + else if self.is_i128(cx) { + cx.u128_type + } + else if self.is_char(cx) { + cx.uchar_type + } + else if self.is_short(cx) { + cx.ushort_type + } + else if self.is_int(cx) { + cx.uint_type + } + else if self.is_long(cx) { + cx.ulong_type + } + else if self.is_longlong(cx) { + cx.ulonglong_type + } + else { + self.clone() + } + } +} + +pub trait TypeReflection<'gcc, 'tcx> { + fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_ulong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_ulonglong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_char(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_short(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_int(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_long(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_longlong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + + fn is_i8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_u8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_i16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_u16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_i32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_u32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_i64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_u64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_u128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + + fn is_f32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_f64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + + fn is_vector(&self) -> bool; +} + +impl<'gcc, 'tcx> TypeReflection<'gcc, 'tcx> for Type<'gcc> { + fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.uchar_type + } + + fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.ushort_type + } + + fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.uint_type + } + + fn is_ulong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.ulong_type + } + + fn is_ulonglong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.ulonglong_type + } + + fn is_char(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.char_type + } + + fn is_short(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.short_type + } + + fn is_int(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.int_type + } + + fn is_long(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.long_type + } + + fn is_longlong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.longlong_type + } + + fn is_i8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.i8_type + } + + fn is_u8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.u8_type + } + + fn is_i16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.i16_type + } + + fn is_u16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.u16_type + } + + fn is_i32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.i32_type + } + + fn is_u32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.u32_type + } + + fn is_i64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.i64_type + } + + fn is_u64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.u64_type + } + + fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.i128_type.unqualified() + } + + fn is_u128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.u128_type.unqualified() + } + + fn is_f32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.context.new_type::<f32>() + } + + fn is_f64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.context.new_type::<f64>() + } + + fn is_vector(&self) -> bool { + let mut typ = self.clone(); + loop { + if typ.dyncast_vector().is_some() { + return true; + } + + let old_type = typ; + typ = typ.unqualified(); + if old_type == typ { + break; + } + } + + false + } +} diff --git a/compiler/rustc_codegen_gcc/src/consts.rs b/compiler/rustc_codegen_gcc/src/consts.rs new file mode 100644 index 00000000000..792ab8f890d --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/consts.rs @@ -0,0 +1,392 @@ +#[cfg(feature = "master")] +use gccjit::FnAttribute; +use gccjit::{Function, GlobalKind, LValue, RValue, ToRValue, Type}; +use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, DerivedTypeMethods, StaticMethods}; +use rustc_middle::span_bug; +use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; +use rustc_middle::mir::mono::MonoItem; +use rustc_middle::ty::{self, Instance, Ty}; +use rustc_middle::ty::layout::LayoutOf; +use rustc_middle::mir::interpret::{self, ConstAllocation, ErrorHandled, Scalar as InterpScalar, read_target_uint}; +use rustc_span::def_id::DefId; +use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size, WrappingRange}; + +use crate::base; +use crate::context::CodegenCx; +use crate::errors::InvalidMinimumAlignment; +use crate::type_of::LayoutGccExt; + +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + pub fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> { + if value.get_type() == self.bool_type.make_pointer() { + if let Some(pointee) = typ.get_pointee() { + if pointee.dyncast_vector().is_some() { + panic!() + } + } + } + // NOTE: since bitcast makes a value non-constant, don't bitcast if not necessary as some + // SIMD builtins require a constant value. + self.bitcast_if_needed(value, typ) + } +} + +fn set_global_alignment<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, gv: LValue<'gcc>, mut align: Align) { + // The target may require greater alignment for globals than the type does. + // Note: GCC and Clang also allow `__attribute__((aligned))` on variables, + // which can force it to be smaller. Rust doesn't support this yet. + if let Some(min) = cx.sess().target.min_global_align { + match Align::from_bits(min) { + Ok(min) => align = align.max(min), + Err(err) => { + cx.sess().emit_err(InvalidMinimumAlignment { err }); + } + } + } + gv.set_alignment(align.bytes() as i32); +} + +impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { + fn static_addr_of(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> { + // TODO(antoyo): implement a proper rvalue comparison in libgccjit instead of doing the + // following: + for (value, variable) in &*self.const_globals.borrow() { + if format!("{:?}", value) == format!("{:?}", cv) { + if let Some(global_variable) = self.global_lvalues.borrow().get(variable) { + let alignment = align.bits() as i32; + if alignment > global_variable.get_alignment() { + global_variable.set_alignment(alignment); + } + } + return *variable; + } + } + let global_value = self.static_addr_of_mut(cv, align, kind); + #[cfg(feature = "master")] + self.global_lvalues.borrow().get(&global_value) + .expect("`static_addr_of_mut` did not add the global to `self.global_lvalues`") + .global_set_readonly(); + self.const_globals.borrow_mut().insert(cv, global_value); + global_value + } + + fn codegen_static(&self, def_id: DefId, is_mutable: bool) { + let attrs = self.tcx.codegen_fn_attrs(def_id); + + let value = + match codegen_static_initializer(&self, def_id) { + Ok((value, _)) => value, + // Error has already been reported + Err(_) => return, + }; + + let global = self.get_static(def_id); + + // boolean SSA values are i1, but they have to be stored in i8 slots, + // otherwise some LLVM optimization passes don't work as expected + let val_llty = self.val_ty(value); + let value = + if val_llty == self.type_i1() { + unimplemented!(); + } + else { + value + }; + + let instance = Instance::mono(self.tcx, def_id); + let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); + let gcc_type = self.layout_of(ty).gcc_type(self); + + set_global_alignment(self, global, self.align_of(ty)); + + let value = self.bitcast_if_needed(value, gcc_type); + global.global_set_initializer_rvalue(value); + + // As an optimization, all shared statics which do not have interior + // mutability are placed into read-only memory. + if !is_mutable { + if self.type_is_freeze(ty) { + #[cfg(feature = "master")] + global.global_set_readonly(); + } + } + + if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { + // Do not allow LLVM to change the alignment of a TLS on macOS. + // + // By default a global's alignment can be freely increased. + // This allows LLVM to generate more performant instructions + // e.g., using load-aligned into a SIMD register. + // + // However, on macOS 10.10 or below, the dynamic linker does not + // respect any alignment given on the TLS (radar 24221680). + // This will violate the alignment assumption, and causing segfault at runtime. + // + // This bug is very easy to trigger. In `println!` and `panic!`, + // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS, + // which the values would be `mem::replace`d on initialization. + // The implementation of `mem::replace` will use SIMD + // whenever the size is 32 bytes or higher. LLVM notices SIMD is used + // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary, + // which macOS's dyld disregarded and causing crashes + // (see issues #51794, #51758, #50867, #48866 and #44056). + // + // To workaround the bug, we trick LLVM into not increasing + // the global's alignment by explicitly assigning a section to it + // (equivalent to automatically generating a `#[link_section]` attribute). + // See the comment in the `GlobalValue::canIncreaseAlignment()` function + // of `lib/IR/Globals.cpp` for why this works. + // + // When the alignment is not increased, the optimized `mem::replace` + // will use load-unaligned instructions instead, and thus avoiding the crash. + // + // We could remove this hack whenever we decide to drop macOS 10.10 support. + if self.tcx.sess.target.options.is_like_osx { + // The `inspect` method is okay here because we checked for provenance, and + // because we are doing this access to inspect the final interpreter state + // (not as part of the interpreter execution). + // + // FIXME: This check requires that the (arbitrary) value of undefined bytes + // happens to be zero. Instead, we should only check the value of defined bytes + // and set all undefined bytes to zero if this allocation is headed for the + // BSS. + unimplemented!(); + } + } + + // Wasm statics with custom link sections get special treatment as they + // go into custom sections of the wasm executable. + if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") { + if let Some(_section) = attrs.link_section { + unimplemented!(); + } + } else { + // TODO(antoyo): set link section. + } + + if attrs.flags.contains(CodegenFnAttrFlags::USED) || attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) { + self.add_used_global(global.to_rvalue()); + } + } + + /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*. + fn add_used_global(&self, _global: RValue<'gcc>) { + // TODO(antoyo) + } + + fn add_compiler_used_global(&self, global: RValue<'gcc>) { + // NOTE: seems like GCC does not make the distinction between compiler.used and used. + self.add_used_global(global); + } +} + +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + #[cfg_attr(not(feature="master"), allow(unused_variables))] + pub fn add_used_function(&self, function: Function<'gcc>) { + #[cfg(feature = "master")] + function.add_attribute(FnAttribute::Used); + } + + pub fn static_addr_of_mut(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> { + let global = + match kind { + Some(kind) if !self.tcx.sess.fewer_names() => { + let name = self.generate_local_symbol_name(kind); + // TODO(antoyo): check if it's okay that no link_section is set. + + let typ = self.val_ty(cv).get_aligned(align.bytes()); + let global = self.declare_private_global(&name[..], typ); + global + } + _ => { + let typ = self.val_ty(cv).get_aligned(align.bytes()); + let global = self.declare_unnamed_global(typ); + global + }, + }; + global.global_set_initializer_rvalue(cv); + // TODO(antoyo): set unnamed address. + let rvalue = global.get_address(None); + self.global_lvalues.borrow_mut().insert(rvalue, global); + rvalue + } + + pub fn get_static(&self, def_id: DefId) -> LValue<'gcc> { + let instance = Instance::mono(self.tcx, def_id); + let fn_attrs = self.tcx.codegen_fn_attrs(def_id); + if let Some(&global) = self.instances.borrow().get(&instance) { + return global; + } + + let defined_in_current_codegen_unit = + self.codegen_unit.items().contains_key(&MonoItem::Static(def_id)); + assert!( + !defined_in_current_codegen_unit, + "consts::get_static() should always hit the cache for \ + statics defined in the same CGU, but did not for `{:?}`", + def_id + ); + + let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); + let sym = self.tcx.symbol_name(instance).name; + + let global = + if def_id.is_local() && !self.tcx.is_foreign_item(def_id) { + let llty = self.layout_of(ty).gcc_type(self); + if let Some(global) = self.get_declared_value(sym) { + if self.val_ty(global) != self.type_ptr_to(llty) { + span_bug!(self.tcx.def_span(def_id), "Conflicting types for static"); + } + } + + let is_tls = fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); + let global = self.declare_global( + &sym, + llty, + GlobalKind::Exported, + is_tls, + fn_attrs.link_section, + ); + + if !self.tcx.is_reachable_non_generic(def_id) { + // TODO(antoyo): set visibility. + } + + global + } else { + check_and_apply_linkage(&self, &fn_attrs, ty, sym) + }; + + if !def_id.is_local() { + let needs_dll_storage_attr = false; // TODO(antoyo) + + // If this assertion triggers, there's something wrong with commandline + // argument validation. + debug_assert!( + !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled() + && self.tcx.sess.target.options.is_like_msvc + && self.tcx.sess.opts.cg.prefer_dynamic) + ); + + if needs_dll_storage_attr { + // This item is external but not foreign, i.e., it originates from an external Rust + // crate. Since we don't know whether this crate will be linked dynamically or + // statically in the final application, we always mark such symbols as 'dllimport'. + // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs + // to make things work. + // + // However, in some scenarios we defer emission of statics to downstream + // crates, so there are cases where a static with an upstream DefId + // is actually present in the current crate. We can find out via the + // is_codegened_item query. + if !self.tcx.is_codegened_item(def_id) { + unimplemented!(); + } + } + } + + // TODO(antoyo): set dll storage class. + + self.instances.borrow_mut().insert(instance, global); + global + } +} + +pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAllocation<'tcx>) -> RValue<'gcc> { + let alloc = alloc.inner(); + let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1); + let dl = cx.data_layout(); + let pointer_size = dl.pointer_size.bytes() as usize; + + let mut next_offset = 0; + for &(offset, alloc_id) in alloc.provenance().ptrs().iter() { + let offset = offset.bytes(); + assert_eq!(offset as usize as u64, offset); + let offset = offset as usize; + if offset > next_offset { + // This `inspect` is okay since we have checked that it is not within a pointer with provenance, it + // is within the bounds of the allocation, and it doesn't affect interpreter execution + // (we inspect the result after interpreter execution). Any undef byte is replaced with + // some arbitrary byte value. + // + // FIXME: relay undef bytes to codegen as undef const bytes + let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(next_offset..offset); + llvals.push(cx.const_bytes(bytes)); + } + let ptr_offset = + read_target_uint( dl.endian, + // This `inspect` is okay since it is within the bounds of the allocation, it doesn't + // affect interpreter execution (we inspect the result after interpreter execution), + // and we properly interpret the provenance as a relocation pointer offset. + alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)), + ) + .expect("const_alloc_to_llvm: could not read relocation pointer") + as u64; + + let address_space = cx.tcx.global_alloc(alloc_id).address_space(cx); + + llvals.push(cx.scalar_to_backend( + InterpScalar::from_pointer( + interpret::Pointer::new(alloc_id, Size::from_bytes(ptr_offset)), + &cx.tcx, + ), + abi::Scalar::Initialized { value: Primitive::Pointer(address_space), valid_range: WrappingRange::full(dl.pointer_size) }, + cx.type_i8p_ext(address_space), + )); + next_offset = offset + pointer_size; + } + if alloc.len() >= next_offset { + let range = next_offset..alloc.len(); + // This `inspect` is okay since we have check that it is after all provenance, it is + // within the bounds of the allocation, and it doesn't affect interpreter execution (we + // inspect the result after interpreter execution). Any undef byte is replaced with some + // arbitrary byte value. + // + // FIXME: relay undef bytes to codegen as undef const bytes + let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range); + llvals.push(cx.const_bytes(bytes)); + } + + cx.const_struct(&llvals, true) +} + +pub fn codegen_static_initializer<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, def_id: DefId) -> Result<(RValue<'gcc>, ConstAllocation<'tcx>), ErrorHandled> { + let alloc = cx.tcx.eval_static_initializer(def_id)?; + Ok((const_alloc_to_gcc(cx, alloc), alloc)) +} + +fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &CodegenFnAttrs, ty: Ty<'tcx>, sym: &str) -> LValue<'gcc> { + let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); + let gcc_type = cx.layout_of(ty).gcc_type(cx); + if let Some(linkage) = attrs.import_linkage { + // Declare a symbol `foo` with the desired linkage. + let global1 = cx.declare_global_with_linkage(&sym, cx.type_i8(), base::global_linkage_to_gcc(linkage)); + + // Declare an internal global `extern_with_linkage_foo` which + // is initialized with the address of `foo`. If `foo` is + // discarded during linking (for example, if `foo` has weak + // linkage and there are no definitions), then + // `extern_with_linkage_foo` will instead be initialized to + // zero. + let mut real_name = "_rust_extern_with_linkage_".to_string(); + real_name.push_str(&sym); + let global2 = cx.define_global(&real_name, gcc_type, is_tls, attrs.link_section); + // TODO(antoyo): set linkage. + let value = cx.const_ptrcast(global1.get_address(None), gcc_type); + global2.global_set_initializer_rvalue(value); + // TODO(antoyo): use global_set_initializer() when it will work. + global2 + } + else { + // Generate an external declaration. + // FIXME(nagisa): investigate whether it can be changed into define_global + + // Thread-local statics in some other crate need to *always* be linked + // against in a thread-local fashion, so we need to be sure to apply the + // thread-local attribute locally if it was present remotely. If we + // don't do this then linker errors can be generated where the linker + // complains that one object files has a thread local version of the + // symbol and another one doesn't. + cx.declare_global(&sym, gcc_type, GlobalKind::Imported, is_tls, attrs.link_section) + } +} diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs new file mode 100644 index 00000000000..661681bdb50 --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/context.rs @@ -0,0 +1,553 @@ +use std::cell::{Cell, RefCell}; + +use gccjit::{Block, CType, Context, Function, FunctionPtrType, FunctionType, LValue, RValue, Type}; +use rustc_codegen_ssa::base::wants_msvc_seh; +use rustc_codegen_ssa::traits::{ + BackendTypes, + BaseTypeMethods, + MiscMethods, +}; +use rustc_data_structures::base_n; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_middle::span_bug; +use rustc_middle::mir::mono::CodegenUnit; +use rustc_middle::ty::{self, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt}; +use rustc_middle::ty::layout::{FnAbiError, FnAbiOf, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout, LayoutOfHelpers}; +use rustc_session::Session; +use rustc_span::{Span, source_map::respan}; +use rustc_target::abi::{call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx}; +use rustc_target::spec::{HasTargetSpec, Target, TlsModel}; + +use crate::callee::get_fn; + +#[derive(Clone)] +pub struct FuncSig<'gcc> { + pub params: Vec<Type<'gcc>>, + pub return_type: Type<'gcc>, +} + +pub struct CodegenCx<'gcc, 'tcx> { + pub check_overflow: bool, + pub codegen_unit: &'tcx CodegenUnit<'tcx>, + pub context: &'gcc Context<'gcc>, + + // TODO(bjorn3): Can this field be removed? + pub current_func: RefCell<Option<Function<'gcc>>>, + pub normal_function_addresses: RefCell<FxHashSet<RValue<'gcc>>>, + pub function_address_names: RefCell<FxHashMap<RValue<'gcc>, String>>, + + pub functions: RefCell<FxHashMap<String, Function<'gcc>>>, + pub intrinsics: RefCell<FxHashMap<String, Function<'gcc>>>, + + pub tls_model: gccjit::TlsModel, + + pub bool_type: Type<'gcc>, + pub i8_type: Type<'gcc>, + pub i16_type: Type<'gcc>, + pub i32_type: Type<'gcc>, + pub i64_type: Type<'gcc>, + pub i128_type: Type<'gcc>, + pub isize_type: Type<'gcc>, + + pub u8_type: Type<'gcc>, + pub u16_type: Type<'gcc>, + pub u32_type: Type<'gcc>, + pub u64_type: Type<'gcc>, + pub u128_type: Type<'gcc>, + pub usize_type: Type<'gcc>, + + pub char_type: Type<'gcc>, + pub uchar_type: Type<'gcc>, + pub short_type: Type<'gcc>, + pub ushort_type: Type<'gcc>, + pub int_type: Type<'gcc>, + pub uint_type: Type<'gcc>, + pub long_type: Type<'gcc>, + pub ulong_type: Type<'gcc>, + pub longlong_type: Type<'gcc>, + pub ulonglong_type: Type<'gcc>, + pub sizet_type: Type<'gcc>, + + pub supports_128bit_integers: bool, + + pub float_type: Type<'gcc>, + pub double_type: Type<'gcc>, + + pub linkage: Cell<FunctionType>, + pub scalar_types: RefCell<FxHashMap<Ty<'tcx>, Type<'gcc>>>, + pub types: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), Type<'gcc>>>, + pub tcx: TyCtxt<'tcx>, + + pub struct_types: RefCell<FxHashMap<Vec<Type<'gcc>>, Type<'gcc>>>, + + /// Cache instances of monomorphic and polymorphic items + pub instances: RefCell<FxHashMap<Instance<'tcx>, LValue<'gcc>>>, + /// Cache function instances of monomorphic and polymorphic items + pub function_instances: RefCell<FxHashMap<Instance<'tcx>, Function<'gcc>>>, + /// Cache generated vtables + pub vtables: RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>>, + + // TODO(antoyo): improve the SSA API to not require those. + /// Mapping from function pointer type to indexes of on stack parameters. + pub on_stack_params: RefCell<FxHashMap<FunctionPtrType<'gcc>, FxHashSet<usize>>>, + /// Mapping from function to indexes of on stack parameters. + pub on_stack_function_params: RefCell<FxHashMap<Function<'gcc>, FxHashSet<usize>>>, + + /// Cache of emitted const globals (value -> global) + pub const_globals: RefCell<FxHashMap<RValue<'gcc>, RValue<'gcc>>>, + + /// Map from the address of a global variable (rvalue) to the global variable itself (lvalue). + /// TODO(antoyo): remove when the rustc API is fixed. + pub global_lvalues: RefCell<FxHashMap<RValue<'gcc>, LValue<'gcc>>>, + + /// Cache of constant strings, + pub const_str_cache: RefCell<FxHashMap<String, LValue<'gcc>>>, + + /// Cache of globals. + pub globals: RefCell<FxHashMap<String, RValue<'gcc>>>, + + /// A counter that is used for generating local symbol names + local_gen_sym_counter: Cell<usize>, + + eh_personality: Cell<Option<RValue<'gcc>>>, + pub rust_try_fn: Cell<Option<(Type<'gcc>, Function<'gcc>)>>, + + pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>, + + /// NOTE: a hack is used because the rustc API is not suitable to libgccjit and as such, + /// `const_undef()` returns struct as pointer so that they can later be assigned a value. + /// As such, this set remembers which of these pointers were returned by this function so that + /// they can be dereferenced later. + /// FIXME(antoyo): fix the rustc API to avoid having this hack. + pub structs_as_pointer: RefCell<FxHashSet<RValue<'gcc>>>, + + pub cleanup_blocks: RefCell<FxHashSet<Block<'gcc>>>, +} + +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>, supports_128bit_integers: bool) -> Self { + let check_overflow = tcx.sess.overflow_checks(); + + let i8_type = context.new_c_type(CType::Int8t); + let i16_type = context.new_c_type(CType::Int16t); + let i32_type = context.new_c_type(CType::Int32t); + let i64_type = context.new_c_type(CType::Int64t); + let u8_type = context.new_c_type(CType::UInt8t); + let u16_type = context.new_c_type(CType::UInt16t); + let u32_type = context.new_c_type(CType::UInt32t); + let u64_type = context.new_c_type(CType::UInt64t); + + let (i128_type, u128_type) = + if supports_128bit_integers { + let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?; + let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?; + (i128_type, u128_type) + } + else { + let i128_type = context.new_array_type(None, i64_type, 2); + let u128_type = context.new_array_type(None, u64_type, 2); + (i128_type, u128_type) + }; + + let tls_model = to_gcc_tls_mode(tcx.sess.tls_model()); + + let float_type = context.new_type::<f32>(); + let double_type = context.new_type::<f64>(); + + let char_type = context.new_c_type(CType::Char); + let uchar_type = context.new_c_type(CType::UChar); + let short_type = context.new_c_type(CType::Short); + let ushort_type = context.new_c_type(CType::UShort); + let int_type = context.new_c_type(CType::Int); + let uint_type = context.new_c_type(CType::UInt); + let long_type = context.new_c_type(CType::Long); + let ulong_type = context.new_c_type(CType::ULong); + let longlong_type = context.new_c_type(CType::LongLong); + let ulonglong_type = context.new_c_type(CType::ULongLong); + let sizet_type = context.new_c_type(CType::SizeT); + + let isize_type = context.new_c_type(CType::LongLong); + let usize_type = context.new_c_type(CType::ULongLong); + let bool_type = context.new_type::<bool>(); + + // TODO(antoyo): only have those assertions on x86_64. + assert_eq!(isize_type.get_size(), i64_type.get_size()); + assert_eq!(usize_type.get_size(), u64_type.get_size()); + + let mut functions = FxHashMap::default(); + let builtins = [ + "__builtin_unreachable", "abort", "__builtin_expect", "__builtin_add_overflow", "__builtin_mul_overflow", + "__builtin_saddll_overflow", /*"__builtin_sadd_overflow",*/ "__builtin_smulll_overflow", /*"__builtin_smul_overflow",*/ + "__builtin_ssubll_overflow", /*"__builtin_ssub_overflow",*/ "__builtin_sub_overflow", "__builtin_uaddll_overflow", + "__builtin_uadd_overflow", "__builtin_umulll_overflow", "__builtin_umul_overflow", "__builtin_usubll_overflow", + "__builtin_usub_overflow", "sqrtf", "sqrt", "__builtin_powif", "__builtin_powi", "sinf", "sin", "cosf", "cos", + "powf", "pow", "expf", "exp", "exp2f", "exp2", "logf", "log", "log10f", "log10", "log2f", "log2", "fmaf", + "fma", "fabsf", "fabs", "fminf", "fmin", "fmaxf", "fmax", "copysignf", "copysign", "floorf", "floor", "ceilf", + "ceil", "truncf", "trunc", "rintf", "rint", "nearbyintf", "nearbyint", "roundf", "round", + "__builtin_expect_with_probability", + ]; + + for builtin in builtins.iter() { + functions.insert(builtin.to_string(), context.get_builtin_function(builtin)); + } + + Self { + check_overflow, + codegen_unit, + context, + current_func: RefCell::new(None), + normal_function_addresses: Default::default(), + function_address_names: Default::default(), + functions: RefCell::new(functions), + intrinsics: RefCell::new(FxHashMap::default()), + + tls_model, + + bool_type, + i8_type, + i16_type, + i32_type, + i64_type, + i128_type, + isize_type, + usize_type, + u8_type, + u16_type, + u32_type, + u64_type, + u128_type, + char_type, + uchar_type, + short_type, + ushort_type, + int_type, + uint_type, + long_type, + ulong_type, + longlong_type, + ulonglong_type, + sizet_type, + + supports_128bit_integers, + + float_type, + double_type, + + linkage: Cell::new(FunctionType::Internal), + instances: Default::default(), + function_instances: Default::default(), + on_stack_params: Default::default(), + on_stack_function_params: Default::default(), + vtables: Default::default(), + const_globals: Default::default(), + global_lvalues: Default::default(), + const_str_cache: Default::default(), + globals: Default::default(), + scalar_types: Default::default(), + types: Default::default(), + tcx, + struct_types: Default::default(), + local_gen_sym_counter: Cell::new(0), + eh_personality: Cell::new(None), + rust_try_fn: Cell::new(None), + pointee_infos: Default::default(), + structs_as_pointer: Default::default(), + cleanup_blocks: Default::default(), + } + } + + pub fn rvalue_as_function(&self, value: RValue<'gcc>) -> Function<'gcc> { + let function: Function<'gcc> = unsafe { std::mem::transmute(value) }; + debug_assert!(self.functions.borrow().values().any(|value| *value == function), + "{:?} ({:?}) is not a function", value, value.get_type()); + function + } + + pub fn is_native_int_type(&self, typ: Type<'gcc>) -> bool { + let types = [ + self.u8_type, + self.u16_type, + self.u32_type, + self.u64_type, + self.i8_type, + self.i16_type, + self.i32_type, + self.i64_type, + ]; + + for native_type in types { + if native_type.is_compatible_with(typ) { + return true; + } + } + + self.supports_128bit_integers && + (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ)) + } + + pub fn is_non_native_int_type(&self, typ: Type<'gcc>) -> bool { + !self.supports_128bit_integers && + (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ)) + } + + pub fn is_native_int_type_or_bool(&self, typ: Type<'gcc>) -> bool { + self.is_native_int_type(typ) || typ.is_compatible_with(self.bool_type) + } + + pub fn is_int_type_or_bool(&self, typ: Type<'gcc>) -> bool { + self.is_native_int_type(typ) || self.is_non_native_int_type(typ) || typ.is_compatible_with(self.bool_type) + } + + pub fn sess(&self) -> &'tcx Session { + &self.tcx.sess + } + + pub fn bitcast_if_needed(&self, value: RValue<'gcc>, expected_type: Type<'gcc>) -> RValue<'gcc> { + if value.get_type() != expected_type { + self.context.new_bitcast(None, value, expected_type) + } + else { + value + } + } +} + +impl<'gcc, 'tcx> BackendTypes for CodegenCx<'gcc, 'tcx> { + type Value = RValue<'gcc>; + type Function = RValue<'gcc>; + + type BasicBlock = Block<'gcc>; + type Type = Type<'gcc>; + type Funclet = (); // TODO(antoyo) + + type DIScope = (); // TODO(antoyo) + type DILocation = (); // TODO(antoyo) + type DIVariable = (); // TODO(antoyo) +} + +impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { + fn vtables(&self) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>> { + &self.vtables + } + + fn get_fn(&self, instance: Instance<'tcx>) -> RValue<'gcc> { + let func = get_fn(self, instance); + *self.current_func.borrow_mut() = Some(func); + // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API. + unsafe { std::mem::transmute(func) } + } + + fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> { + let func_name = self.tcx.symbol_name(instance).name; + + let func = + if self.intrinsics.borrow().contains_key(func_name) { + self.intrinsics.borrow()[func_name].clone() + } + else { + get_fn(self, instance) + }; + let ptr = func.get_address(None); + + // TODO(antoyo): don't do this twice: i.e. in declare_fn and here. + // FIXME(antoyo): the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI). + + self.normal_function_addresses.borrow_mut().insert(ptr); + self.function_address_names.borrow_mut().insert(ptr, func_name.to_string()); + + ptr + } + + fn eh_personality(&self) -> RValue<'gcc> { + // The exception handling personality function. + // + // If our compilation unit has the `eh_personality` lang item somewhere + // within it, then we just need to codegen that. Otherwise, we're + // building an rlib which will depend on some upstream implementation of + // this function, so we just codegen a generic reference to it. We don't + // specify any of the types for the function, we just make it a symbol + // that LLVM can later use. + // + // Note that MSVC is a little special here in that we don't use the + // `eh_personality` lang item at all. Currently LLVM has support for + // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the + // *name of the personality function* to decide what kind of unwind side + // tables/landing pads to emit. It looks like Dwarf is used by default, + // injecting a dependency on the `_Unwind_Resume` symbol for resuming + // an "exception", but for MSVC we want to force SEH. This means that we + // can't actually have the personality function be our standard + // `rust_eh_personality` function, but rather we wired it up to the + // CRT's custom personality function, which forces LLVM to consider + // landing pads as "landing pads for SEH". + if let Some(llpersonality) = self.eh_personality.get() { + return llpersonality; + } + let tcx = self.tcx; + let func = + match tcx.lang_items().eh_personality() { + Some(def_id) if !wants_msvc_seh(self.sess()) => { + let instance = + ty::Instance::resolve( + tcx, + ty::ParamEnv::reveal_all(), + def_id, + ty::List::empty(), + ) + .unwrap().unwrap(); + + let symbol_name = tcx.symbol_name(instance).name; + let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty()); + self.linkage.set(FunctionType::Extern); + let func = self.declare_fn(symbol_name, &fn_abi); + let func: RValue<'gcc> = unsafe { std::mem::transmute(func) }; + func + }, + _ => { + let name = + if wants_msvc_seh(self.sess()) { + "__CxxFrameHandler3" + } + else { + "rust_eh_personality" + }; + let func = self.declare_func(name, self.type_i32(), &[], true); + unsafe { std::mem::transmute(func) } + } + }; + // TODO(antoyo): apply target cpu attributes. + self.eh_personality.set(Some(func)); + func + } + + fn sess(&self) -> &Session { + &self.tcx.sess + } + + fn check_overflow(&self) -> bool { + self.check_overflow + } + + fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx> { + self.codegen_unit + } + + fn set_frame_pointer_type(&self, _llfn: RValue<'gcc>) { + // TODO(antoyo) + } + + fn apply_target_cpu_attr(&self, _llfn: RValue<'gcc>) { + // TODO(antoyo) + } + + fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> { + let entry_name = self.sess().target.entry_name.as_ref(); + if self.get_declared_value(entry_name).is_none() { + Some(self.declare_entry_fn(entry_name, fn_type, ())) + } + else { + // If the symbol already exists, it is an error: for example, the user wrote + // #[no_mangle] extern "C" fn main(..) {..} + // instead of #[start] + None + } + } +} + +impl<'gcc, 'tcx> HasTyCtxt<'tcx> for CodegenCx<'gcc, 'tcx> { + fn tcx(&self) -> TyCtxt<'tcx> { + self.tcx + } +} + +impl<'gcc, 'tcx> HasDataLayout for CodegenCx<'gcc, 'tcx> { + fn data_layout(&self) -> &TargetDataLayout { + &self.tcx.data_layout + } +} + +impl<'gcc, 'tcx> HasTargetSpec for CodegenCx<'gcc, 'tcx> { + fn target_spec(&self) -> &Target { + &self.tcx.sess.target + } +} + +impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> { + type LayoutOfResult = TyAndLayout<'tcx>; + + #[inline] + fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! { + if let LayoutError::SizeOverflow(_) = err { + self.sess().emit_fatal(respan(span, err)) + } else { + span_bug!(span, "failed to get layout for `{}`: {}", ty, err) + } + } +} + +impl<'gcc, 'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> { + type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>; + + #[inline] + fn handle_fn_abi_err( + &self, + err: FnAbiError<'tcx>, + span: Span, + fn_abi_request: FnAbiRequest<'tcx>, + ) -> ! { + if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err { + self.sess().emit_fatal(respan(span, err)) + } else { + match fn_abi_request { + FnAbiRequest::OfFnPtr { sig, extra_args } => { + span_bug!( + span, + "`fn_abi_of_fn_ptr({}, {:?})` failed: {}", + sig, + extra_args, + err + ); + } + FnAbiRequest::OfInstance { instance, extra_args } => { + span_bug!( + span, + "`fn_abi_of_instance({}, {:?})` failed: {}", + instance, + extra_args, + err + ); + } + } + } + } +} + +impl<'tcx, 'gcc> HasParamEnv<'tcx> for CodegenCx<'gcc, 'tcx> { + fn param_env(&self) -> ParamEnv<'tcx> { + ParamEnv::reveal_all() + } +} + +impl<'b, 'tcx> CodegenCx<'b, 'tcx> { + /// Generates a new symbol name with the given prefix. This symbol name must + /// only be used for definitions with `internal` or `private` linkage. + pub fn generate_local_symbol_name(&self, prefix: &str) -> String { + let idx = self.local_gen_sym_counter.get(); + self.local_gen_sym_counter.set(idx + 1); + // Include a '.' character, so there can be no accidental conflicts with + // user defined names + let mut name = String::with_capacity(prefix.len() + 6); + name.push_str(prefix); + name.push_str("."); + base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name); + name + } +} + +fn to_gcc_tls_mode(tls_model: TlsModel) -> gccjit::TlsModel { + match tls_model { + TlsModel::GeneralDynamic => gccjit::TlsModel::GlobalDynamic, + TlsModel::LocalDynamic => gccjit::TlsModel::LocalDynamic, + TlsModel::InitialExec => gccjit::TlsModel::InitialExec, + TlsModel::LocalExec => gccjit::TlsModel::LocalExec, + } +} diff --git a/compiler/rustc_codegen_gcc/src/coverageinfo.rs b/compiler/rustc_codegen_gcc/src/coverageinfo.rs new file mode 100644 index 00000000000..872fc2472e2 --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/coverageinfo.rs @@ -0,0 +1,69 @@ +use gccjit::RValue; +use rustc_codegen_ssa::traits::{CoverageInfoBuilderMethods, CoverageInfoMethods}; +use rustc_hir::def_id::DefId; +use rustc_middle::mir::coverage::{ + CodeRegion, + CounterValueReference, + ExpressionOperandId, + InjectedExpressionId, + Op, +}; +use rustc_middle::ty::Instance; + +use crate::builder::Builder; +use crate::context::CodegenCx; + +impl<'a, 'gcc, 'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { + fn set_function_source_hash( + &mut self, + _instance: Instance<'tcx>, + _function_source_hash: u64, + ) -> bool { + unimplemented!(); + } + + fn add_coverage_counter(&mut self, _instance: Instance<'tcx>, _id: CounterValueReference, _region: CodeRegion) -> bool { + // TODO(antoyo) + false + } + + fn add_coverage_counter_expression(&mut self, _instance: Instance<'tcx>, _id: InjectedExpressionId, _lhs: ExpressionOperandId, _op: Op, _rhs: ExpressionOperandId, _region: Option<CodeRegion>) -> bool { + // TODO(antoyo) + false + } + + fn add_coverage_unreachable(&mut self, _instance: Instance<'tcx>, _region: CodeRegion) -> bool { + // TODO(antoyo) + false + } +} + +impl<'gcc, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> { + fn coverageinfo_finalize(&self) { + // TODO(antoyo) + } + + fn get_pgo_func_name_var(&self, _instance: Instance<'tcx>) -> RValue<'gcc> { + unimplemented!(); + } + + /// Functions with MIR-based coverage are normally codegenned _only_ if + /// called. LLVM coverage tools typically expect every function to be + /// defined (even if unused), with at least one call to LLVM intrinsic + /// `instrprof.increment`. + /// + /// Codegen a small function that will never be called, with one counter + /// that will never be incremented. + /// + /// For used/called functions, the coverageinfo was already added to the + /// `function_coverage_map` (keyed by function `Instance`) during codegen. + /// But in this case, since the unused function was _not_ previously + /// codegenned, collect the coverage `CodeRegion`s from the MIR and add + /// them. The first `CodeRegion` is used to add a single counter, with the + /// same counter ID used in the injected `instrprof.increment` intrinsic + /// call. Since the function is never called, all other `CodeRegion`s can be + /// added as `unreachable_region`s. + fn define_unused_fn(&self, _def_id: DefId) { + unimplemented!(); + } +} diff --git a/compiler/rustc_codegen_gcc/src/debuginfo.rs b/compiler/rustc_codegen_gcc/src/debuginfo.rs new file mode 100644 index 00000000000..a81585d4128 --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/debuginfo.rs @@ -0,0 +1,103 @@ +use gccjit::RValue; +use rustc_codegen_ssa::mir::debuginfo::{FunctionDebugContext, VariableKind}; +use rustc_codegen_ssa::traits::{DebugInfoBuilderMethods, DebugInfoMethods}; +use rustc_middle::mir; +use rustc_middle::ty::{Instance, PolyExistentialTraitRef, Ty}; +use rustc_span::{SourceFile, Span, Symbol}; +use rustc_target::abi::call::FnAbi; +use rustc_target::abi::Size; +use std::ops::Range; + +use crate::builder::Builder; +use crate::context::CodegenCx; + +impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> { + // FIXME(eddyb) find a common convention for all of the debuginfo-related + // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.). + fn dbg_var_addr( + &mut self, + _dbg_var: Self::DIVariable, + _scope_metadata: Self::DIScope, + _variable_alloca: Self::Value, + _direct_offset: Size, + _indirect_offsets: &[Size], + _fragment: Option<Range<Size>>, + ) { + unimplemented!(); + } + + fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) { + // TODO(antoyo): insert reference to gdb debug scripts section global. + } + + fn set_var_name(&mut self, _value: RValue<'gcc>, _name: &str) { + unimplemented!(); + } + + fn set_dbg_loc(&mut self, _dbg_loc: Self::DILocation) { + unimplemented!(); + } +} + +impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> { + fn create_vtable_debuginfo( + &self, + _ty: Ty<'tcx>, + _trait_ref: Option<PolyExistentialTraitRef<'tcx>>, + _vtable: Self::Value, + ) { + // TODO(antoyo) + } + + fn create_function_debug_context( + &self, + _instance: Instance<'tcx>, + _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, + _llfn: RValue<'gcc>, + _mir: &mir::Body<'tcx>, + ) -> Option<FunctionDebugContext<Self::DIScope, Self::DILocation>> { + // TODO(antoyo) + None + } + + fn extend_scope_to_file( + &self, + _scope_metadata: Self::DIScope, + _file: &SourceFile, + ) -> Self::DIScope { + unimplemented!(); + } + + fn debuginfo_finalize(&self) { + // TODO(antoyo) + } + + fn create_dbg_var( + &self, + _variable_name: Symbol, + _variable_type: Ty<'tcx>, + _scope_metadata: Self::DIScope, + _variable_kind: VariableKind, + _span: Span, + ) -> Self::DIVariable { + unimplemented!(); + } + + fn dbg_scope_fn( + &self, + _instance: Instance<'tcx>, + _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, + _maybe_definition_llfn: Option<RValue<'gcc>>, + ) -> Self::DIScope { + unimplemented!(); + } + + fn dbg_loc( + &self, + _scope: Self::DIScope, + _inlined_at: Option<Self::DILocation>, + _span: Span, + ) -> Self::DILocation { + unimplemented!(); + } +} diff --git a/compiler/rustc_codegen_gcc/src/declare.rs b/compiler/rustc_codegen_gcc/src/declare.rs new file mode 100644 index 00000000000..4748e7e4be2 --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/declare.rs @@ -0,0 +1,142 @@ +use gccjit::{Function, FunctionType, GlobalKind, LValue, RValue, Type}; +use rustc_codegen_ssa::traits::BaseTypeMethods; +use rustc_middle::ty::Ty; +use rustc_span::Symbol; +use rustc_target::abi::call::FnAbi; + +use crate::abi::FnAbiGccExt; +use crate::context::CodegenCx; +use crate::intrinsic::llvm; + +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + pub fn get_or_insert_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> { + if self.globals.borrow().contains_key(name) { + let typ = self.globals.borrow()[name].get_type(); + let global = self.context.new_global(None, GlobalKind::Imported, typ, name); + if is_tls { + global.set_tls_model(self.tls_model); + } + if let Some(link_section) = link_section { + global.set_link_section(link_section.as_str()); + } + global + } + else { + self.declare_global(name, ty, GlobalKind::Exported, is_tls, link_section) + } + } + + pub fn declare_unnamed_global(&self, ty: Type<'gcc>) -> LValue<'gcc> { + let name = self.generate_local_symbol_name("global"); + self.context.new_global(None, GlobalKind::Internal, ty, &name) + } + + pub fn declare_global_with_linkage(&self, name: &str, ty: Type<'gcc>, linkage: GlobalKind) -> LValue<'gcc> { + let global = self.context.new_global(None, linkage, ty, name); + let global_address = global.get_address(None); + self.globals.borrow_mut().insert(name.to_string(), global_address); + global + } + + pub fn declare_func(&self, name: &str, return_type: Type<'gcc>, params: &[Type<'gcc>], variadic: bool) -> Function<'gcc> { + self.linkage.set(FunctionType::Extern); + declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic) + } + + pub fn declare_global(&self, name: &str, ty: Type<'gcc>, global_kind: GlobalKind, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> { + let global = self.context.new_global(None, global_kind, ty, name); + if is_tls { + global.set_tls_model(self.tls_model); + } + if let Some(link_section) = link_section { + global.set_link_section(link_section.as_str()); + } + let global_address = global.get_address(None); + self.globals.borrow_mut().insert(name.to_string(), global_address); + global + } + + pub fn declare_private_global(&self, name: &str, ty: Type<'gcc>) -> LValue<'gcc> { + let global = self.context.new_global(None, GlobalKind::Internal, ty, name); + let global_address = global.get_address(None); + self.globals.borrow_mut().insert(name.to_string(), global_address); + global + } + + pub fn declare_entry_fn(&self, name: &str, _fn_type: Type<'gcc>, callconv: () /*llvm::CCallConv*/) -> RValue<'gcc> { + // TODO(antoyo): use the fn_type parameter. + let const_string = self.context.new_type::<u8>().make_pointer().make_pointer(); + let return_type = self.type_i32(); + let variadic = false; + self.linkage.set(FunctionType::Exported); + let func = declare_raw_fn(self, name, callconv, return_type, &[self.type_i32(), const_string], variadic); + // NOTE: it is needed to set the current_func here as well, because get_fn() is not called + // for the main function. + *self.current_func.borrow_mut() = Some(func); + // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API. + unsafe { std::mem::transmute(func) } + } + + pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Function<'gcc> { + let (return_type, params, variadic, on_stack_param_indices) = fn_abi.gcc_type(self); + let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, ¶ms, variadic); + self.on_stack_function_params.borrow_mut().insert(func, on_stack_param_indices); + func + } + + pub fn define_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> { + self.get_or_insert_global(name, ty, is_tls, link_section) + } + + pub fn get_declared_value(&self, name: &str) -> Option<RValue<'gcc>> { + // TODO(antoyo): use a different field than globals, because this seems to return a function? + self.globals.borrow().get(name).cloned() + } +} + +/// Declare a function. +/// +/// If there’s a value with the same name already declared, the function will +/// update the declaration and return existing Value instead. +fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*llvm::CallConv*/, return_type: Type<'gcc>, param_types: &[Type<'gcc>], variadic: bool) -> Function<'gcc> { + if name.starts_with("llvm.") { + let intrinsic = llvm::intrinsic(name, cx); + cx.intrinsics.borrow_mut().insert(name.to_string(), intrinsic); + return intrinsic; + } + let func = + if cx.functions.borrow().contains_key(name) { + cx.functions.borrow()[name] + } + else { + let params: Vec<_> = param_types.into_iter().enumerate() + .map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO(antoyo): set name. + .collect(); + let func = cx.context.new_function(None, cx.linkage.get(), return_type, ¶ms, mangle_name(name), variadic); + cx.functions.borrow_mut().insert(name.to_string(), func); + func + }; + + // TODO(antoyo): set function calling convention. + // TODO(antoyo): set unnamed address. + // TODO(antoyo): set no red zone function attribute. + // TODO(antoyo): set attributes for optimisation. + // TODO(antoyo): set attributes for non lazy bind. + + // FIXME(antoyo): invalid cast. + func +} + +// FIXME(antoyo): this is a hack because libgccjit currently only supports alpha, num and _. +// Unsupported characters: `$` and `.`. +pub fn mangle_name(name: &str) -> String { + name.replace(|char: char| { + if !char.is_alphanumeric() && char != '_' { + debug_assert!("$.".contains(char), "Unsupported char in function name: {}", char); + true + } + else { + false + } + }, "_") +} diff --git a/compiler/rustc_codegen_gcc/src/errors.rs b/compiler/rustc_codegen_gcc/src/errors.rs new file mode 100644 index 00000000000..5ea39606c08 --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/errors.rs @@ -0,0 +1,229 @@ +use rustc_errors::{DiagnosticArgValue, IntoDiagnosticArg}; +use rustc_macros::Diagnostic; +use rustc_middle::ty::Ty; +use rustc_span::{Span, Symbol}; +use std::borrow::Cow; + +struct ExitCode(Option<i32>); + +impl IntoDiagnosticArg for ExitCode { + fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> { + let ExitCode(exit_code) = self; + match exit_code { + Some(t) => t.into_diagnostic_arg(), + None => DiagnosticArgValue::Str(Cow::Borrowed("<signal>")), + } + } +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_monomorphization_basic_integer, code = "E0511")] +pub(crate) struct InvalidMonomorphizationBasicInteger<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub ty: Ty<'a>, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_monomorphization_invalid_float_vector, code = "E0511")] +pub(crate) struct InvalidMonomorphizationInvalidFloatVector<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub elem_ty: &'a str, + pub vec_ty: Ty<'a>, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_monomorphization_not_float, code = "E0511")] +pub(crate) struct InvalidMonomorphizationNotFloat<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub ty: Ty<'a>, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_monomorphization_unrecognized, code = "E0511")] +pub(crate) struct InvalidMonomorphizationUnrecognized { + #[primary_span] + pub span: Span, + pub name: Symbol, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_monomorphization_expected_signed_unsigned, code = "E0511")] +pub(crate) struct InvalidMonomorphizationExpectedSignedUnsigned<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub elem_ty: Ty<'a>, + pub vec_ty: Ty<'a>, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_monomorphization_unsupported_element, code = "E0511")] +pub(crate) struct InvalidMonomorphizationUnsupportedElement<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub in_ty: Ty<'a>, + pub elem_ty: Ty<'a>, + pub ret_ty: Ty<'a>, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_monomorphization_invalid_bitmask, code = "E0511")] +pub(crate) struct InvalidMonomorphizationInvalidBitmask<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub ty: Ty<'a>, + pub expected_int_bits: u64, + pub expected_bytes: u64, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_monomorphization_simd_shuffle, code = "E0511")] +pub(crate) struct InvalidMonomorphizationSimdShuffle<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub ty: Ty<'a>, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_monomorphization_expected_simd, code = "E0511")] +pub(crate) struct InvalidMonomorphizationExpectedSimd<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub position: &'a str, + pub found_ty: Ty<'a>, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_monomorphization_mask_type, code = "E0511")] +pub(crate) struct InvalidMonomorphizationMaskType<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub ty: Ty<'a>, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_monomorphization_return_length, code = "E0511")] +pub(crate) struct InvalidMonomorphizationReturnLength<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub in_len: u64, + pub ret_ty: Ty<'a>, + pub out_len: u64, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_monomorphization_return_length_input_type, code = "E0511")] +pub(crate) struct InvalidMonomorphizationReturnLengthInputType<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub in_len: u64, + pub in_ty: Ty<'a>, + pub ret_ty: Ty<'a>, + pub out_len: u64, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_monomorphization_return_element, code = "E0511")] +pub(crate) struct InvalidMonomorphizationReturnElement<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub in_elem: Ty<'a>, + pub in_ty: Ty<'a>, + pub ret_ty: Ty<'a>, + pub out_ty: Ty<'a>, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_monomorphization_return_type, code = "E0511")] +pub(crate) struct InvalidMonomorphizationReturnType<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub in_elem: Ty<'a>, + pub in_ty: Ty<'a>, + pub ret_ty: Ty<'a>, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_monomorphization_inserted_type, code = "E0511")] +pub(crate) struct InvalidMonomorphizationInsertedType<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub in_elem: Ty<'a>, + pub in_ty: Ty<'a>, + pub out_ty: Ty<'a>, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_monomorphization_return_integer_type, code = "E0511")] +pub(crate) struct InvalidMonomorphizationReturnIntegerType<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub ret_ty: Ty<'a>, + pub out_ty: Ty<'a>, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_monomorphization_mismatched_lengths, code = "E0511")] +pub(crate) struct InvalidMonomorphizationMismatchedLengths { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub m_len: u64, + pub v_len: u64, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_monomorphization_unsupported_cast, code = "E0511")] +pub(crate) struct InvalidMonomorphizationUnsupportedCast<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub in_ty: Ty<'a>, + pub in_elem: Ty<'a>, + pub ret_ty: Ty<'a>, + pub out_elem: Ty<'a>, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_monomorphization_unsupported_operation, code = "E0511")] +pub(crate) struct InvalidMonomorphizationUnsupportedOperation<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub in_ty: Ty<'a>, + pub in_elem: Ty<'a>, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_lto_not_supported)] +pub(crate) struct LTONotSupported; + +#[derive(Diagnostic)] +#[diag(codegen_gcc_unwinding_inline_asm)] +pub(crate) struct UnwindingInlineAsm { + #[primary_span] + pub span: Span, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_minimum_alignment)] +pub(crate) struct InvalidMinimumAlignment { + pub err: String, +} diff --git a/compiler/rustc_codegen_gcc/src/int.rs b/compiler/rustc_codegen_gcc/src/int.rs new file mode 100644 index 00000000000..0cf1204791d --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/int.rs @@ -0,0 +1,746 @@ +//! Module to handle integer operations. +//! This module exists because some integer types are not supported on some gcc platforms, e.g. +//! 128-bit integers on 32-bit platforms and thus require to be handled manually. + +use std::convert::TryFrom; + +use gccjit::{ComparisonOp, FunctionType, RValue, ToRValue, Type, UnaryOp, BinaryOp}; +use rustc_codegen_ssa::common::{IntPredicate, TypeKind}; +use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeMethods, BuilderMethods, OverflowOp}; +use rustc_middle::ty::Ty; + +use crate::builder::ToGccComp; +use crate::{builder::Builder, common::{SignType, TypeReflection}, context::CodegenCx}; + +impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { + pub fn gcc_urem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // 128-bit unsigned %: __umodti3 + self.multiplicative_operation(BinaryOp::Modulo, "mod", false, a, b) + } + + pub fn gcc_srem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // 128-bit signed %: __modti3 + self.multiplicative_operation(BinaryOp::Modulo, "mod", true, a, b) + } + + pub fn gcc_not(&self, a: RValue<'gcc>) -> RValue<'gcc> { + let typ = a.get_type(); + if self.is_native_int_type_or_bool(typ) { + let operation = + if typ.is_bool() { + UnaryOp::LogicalNegate + } + else { + UnaryOp::BitwiseNegate + }; + self.cx.context.new_unary_op(None, operation, typ, a) + } + else { + // TODO(antoyo): use __negdi2 and __negti2 instead? + let element_type = typ.dyncast_array().expect("element type"); + let values = [ + self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.low(a)), + self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.high(a)), + ]; + self.cx.context.new_array_constructor(None, typ, &values) + } + } + + pub fn gcc_neg(&self, a: RValue<'gcc>) -> RValue<'gcc> { + let a_type = a.get_type(); + if self.is_native_int_type(a_type) { + self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a) + } + else { + let param_a = self.context.new_parameter(None, a_type, "a"); + let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a], "__negti2", false); + self.context.new_call(None, func, &[a]) + } + } + + pub fn gcc_and(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.cx.bitwise_operation(BinaryOp::BitwiseAnd, a, b) + } + + pub fn gcc_lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + let a_type = a.get_type(); + let b_type = b.get_type(); + let a_native = self.is_native_int_type(a_type); + let b_native = self.is_native_int_type(b_type); + if a_native && b_native { + // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by a signed number. + // TODO(antoyo): cast to unsigned to do a logical shift if that does not work. + if a_type.is_signed(self) != b_type.is_signed(self) { + let b = self.context.new_cast(None, b, a_type); + a >> b + } + else { + a >> b + } + } + else if a_native && !b_native { + self.gcc_lshr(a, self.gcc_int_cast(b, a_type)) + } + else { + // NOTE: we cannot use the lshr builtin because it's calling hi() (to get the most + // significant half of the number) which uses lshr. + + let native_int_type = a_type.dyncast_array().expect("get element type"); + + let func = self.current_func(); + let then_block = func.new_block("then"); + let else_block = func.new_block("else"); + let after_block = func.new_block("after"); + let b0_block = func.new_block("b0"); + let actual_else_block = func.new_block("actual_else"); + + let result = func.new_local(None, a_type, "shiftResult"); + + let sixty_four = self.gcc_int(native_int_type, 64); + let sixty_three = self.gcc_int(native_int_type, 63); + let zero = self.gcc_zero(native_int_type); + let b = self.gcc_int_cast(b, native_int_type); + let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero); + self.llbb().end_with_conditional(None, condition, then_block, else_block); + + // TODO(antoyo): take endianness into account. + let shift_value = self.gcc_sub(b, sixty_four); + let high = self.high(a); + let sign = + if a_type.is_signed(self) { + high >> sixty_three + } + else { + zero + }; + let values = [ + high >> shift_value, + sign, + ]; + let array_value = self.context.new_array_constructor(None, a_type, &values); + then_block.add_assignment(None, result, array_value); + then_block.end_with_jump(None, after_block); + + let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero); + else_block.end_with_conditional(None, condition, b0_block, actual_else_block); + + b0_block.add_assignment(None, result, a); + b0_block.end_with_jump(None, after_block); + + let shift_value = self.gcc_sub(sixty_four, b); + // NOTE: cast low to its unsigned type in order to perform a logical right shift. + let unsigned_type = native_int_type.to_unsigned(&self.cx); + let casted_low = self.context.new_cast(None, self.low(a), unsigned_type); + let shifted_low = casted_low >> self.context.new_cast(None, b, unsigned_type); + let shifted_low = self.context.new_cast(None, shifted_low, native_int_type); + let values = [ + (high << shift_value) | shifted_low, + high >> b, + ]; + let array_value = self.context.new_array_constructor(None, a_type, &values); + actual_else_block.add_assignment(None, result, array_value); + actual_else_block.end_with_jump(None, after_block); + + // NOTE: since jumps were added in a place rustc does not expect, the current block in the + // state need to be updated. + self.switch_to_block(after_block); + + result.to_rvalue() + } + } + + fn additive_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> { + let a_type = a.get_type(); + let b_type = b.get_type(); + if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) { + if a_type != b_type { + if a_type.is_vector() { + // Vector types need to be bitcast. + // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. + b = self.context.new_bitcast(None, b, a.get_type()); + } + else { + b = self.context.new_cast(None, b, a.get_type()); + } + } + self.context.new_binary_op(None, operation, a_type, a, b) + } + else { + let signed = a_type.is_compatible_with(self.i128_type); + let func_name = + match (operation, signed) { + (BinaryOp::Plus, true) => "__rust_i128_add", + (BinaryOp::Plus, false) => "__rust_u128_add", + (BinaryOp::Minus, true) => "__rust_i128_sub", + (BinaryOp::Minus, false) => "__rust_u128_sub", + _ => unreachable!("unexpected additive operation {:?}", operation), + }; + let param_a = self.context.new_parameter(None, a_type, "a"); + let param_b = self.context.new_parameter(None, b_type, "b"); + let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false); + self.context.new_call(None, func, &[a, b]) + } + } + + pub fn gcc_add(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.additive_operation(BinaryOp::Plus, a, b) + } + + pub fn gcc_mul(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.multiplicative_operation(BinaryOp::Mult, "mul", true, a, b) + } + + pub fn gcc_sub(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.additive_operation(BinaryOp::Minus, a, b) + } + + fn multiplicative_operation(&self, operation: BinaryOp, operation_name: &str, signed: bool, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + let a_type = a.get_type(); + let b_type = b.get_type(); + if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) { + self.context.new_binary_op(None, operation, a_type, a, b) + } + else { + let sign = + if signed { + "" + } + else { + "u" + }; + let func_name = format!("__{}{}ti3", sign, operation_name); + let param_a = self.context.new_parameter(None, a_type, "a"); + let param_b = self.context.new_parameter(None, b_type, "b"); + let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false); + self.context.new_call(None, func, &[a, b]) + } + } + + pub fn gcc_sdiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // TODO(antoyo): check if the types are signed? + // 128-bit, signed: __divti3 + // TODO(antoyo): convert the arguments to signed? + self.multiplicative_operation(BinaryOp::Divide, "div", true, a, b) + } + + pub fn gcc_udiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // 128-bit, unsigned: __udivti3 + self.multiplicative_operation(BinaryOp::Divide, "div", false, a, b) + } + + pub fn gcc_checked_binop(&self, oop: OverflowOp, typ: Ty<'_>, lhs: <Self as BackendTypes>::Value, rhs: <Self as BackendTypes>::Value) -> (<Self as BackendTypes>::Value, <Self as BackendTypes>::Value) { + use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*}; + + let new_kind = + match typ.kind() { + Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)), + Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)), + t @ (Uint(_) | Int(_)) => t.clone(), + _ => panic!("tried to get overflow intrinsic for op applied to non-int type"), + }; + + // TODO(antoyo): remove duplication with intrinsic? + let name = + if self.is_native_int_type(lhs.get_type()) { + match oop { + OverflowOp::Add => + match new_kind { + Int(I8) => "__builtin_add_overflow", + Int(I16) => "__builtin_add_overflow", + Int(I32) => "__builtin_sadd_overflow", + Int(I64) => "__builtin_saddll_overflow", + Int(I128) => "__builtin_add_overflow", + + Uint(U8) => "__builtin_add_overflow", + Uint(U16) => "__builtin_add_overflow", + Uint(U32) => "__builtin_uadd_overflow", + Uint(U64) => "__builtin_uaddll_overflow", + Uint(U128) => "__builtin_add_overflow", + + _ => unreachable!(), + }, + OverflowOp::Sub => + match new_kind { + Int(I8) => "__builtin_sub_overflow", + Int(I16) => "__builtin_sub_overflow", + Int(I32) => "__builtin_ssub_overflow", + Int(I64) => "__builtin_ssubll_overflow", + Int(I128) => "__builtin_sub_overflow", + + Uint(U8) => "__builtin_sub_overflow", + Uint(U16) => "__builtin_sub_overflow", + Uint(U32) => "__builtin_usub_overflow", + Uint(U64) => "__builtin_usubll_overflow", + Uint(U128) => "__builtin_sub_overflow", + + _ => unreachable!(), + }, + OverflowOp::Mul => + match new_kind { + Int(I8) => "__builtin_mul_overflow", + Int(I16) => "__builtin_mul_overflow", + Int(I32) => "__builtin_smul_overflow", + Int(I64) => "__builtin_smulll_overflow", + Int(I128) => "__builtin_mul_overflow", + + Uint(U8) => "__builtin_mul_overflow", + Uint(U16) => "__builtin_mul_overflow", + Uint(U32) => "__builtin_umul_overflow", + Uint(U64) => "__builtin_umulll_overflow", + Uint(U128) => "__builtin_mul_overflow", + + _ => unreachable!(), + }, + } + } + else { + match new_kind { + Int(I128) | Uint(U128) => { + let func_name = + match oop { + OverflowOp::Add => + match new_kind { + Int(I128) => "__rust_i128_addo", + Uint(U128) => "__rust_u128_addo", + _ => unreachable!(), + }, + OverflowOp::Sub => + match new_kind { + Int(I128) => "__rust_i128_subo", + Uint(U128) => "__rust_u128_subo", + _ => unreachable!(), + }, + OverflowOp::Mul => + match new_kind { + Int(I128) => "__rust_i128_mulo", // TODO(antoyo): use __muloti4d instead? + Uint(U128) => "__rust_u128_mulo", + _ => unreachable!(), + }, + }; + let a_type = lhs.get_type(); + let b_type = rhs.get_type(); + let param_a = self.context.new_parameter(None, a_type, "a"); + let param_b = self.context.new_parameter(None, b_type, "b"); + let result_field = self.context.new_field(None, a_type, "result"); + let overflow_field = self.context.new_field(None, self.bool_type, "overflow"); + let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]); + let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false); + let result = self.context.new_call(None, func, &[lhs, rhs]); + let overflow = result.access_field(None, overflow_field); + let int_result = result.access_field(None, result_field); + return (int_result, overflow); + }, + _ => { + match oop { + OverflowOp::Mul => + match new_kind { + Int(I32) => "__mulosi4", + Int(I64) => "__mulodi4", + _ => unreachable!(), + }, + _ => unimplemented!("overflow operation for {:?}", new_kind), + } + } + } + }; + + let intrinsic = self.context.get_builtin_function(&name); + let res = self.current_func() + // TODO(antoyo): is it correct to use rhs type instead of the parameter typ? + .new_local(None, rhs.get_type(), "binopResult") + .get_address(None); + let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None); + (res.dereference(None).to_rvalue(), overflow) + } + + pub fn gcc_icmp(&self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> { + let a_type = lhs.get_type(); + let b_type = rhs.get_type(); + if self.is_non_native_int_type(a_type) || self.is_non_native_int_type(b_type) { + let signed = a_type.is_compatible_with(self.i128_type); + let sign = + if signed { + "" + } + else { + "u" + }; + let func_name = format!("__{}cmpti2", sign); + let param_a = self.context.new_parameter(None, a_type, "a"); + let param_b = self.context.new_parameter(None, b_type, "b"); + let func = self.context.new_function(None, FunctionType::Extern, self.int_type, &[param_a, param_b], func_name, false); + let cmp = self.context.new_call(None, func, &[lhs, rhs]); + let (op, limit) = + match op { + IntPredicate::IntEQ => { + return self.context.new_comparison(None, ComparisonOp::Equals, cmp, self.context.new_rvalue_one(self.int_type)); + }, + IntPredicate::IntNE => { + return self.context.new_comparison(None, ComparisonOp::NotEquals, cmp, self.context.new_rvalue_one(self.int_type)); + }, + IntPredicate::IntUGT => (ComparisonOp::Equals, 2), + IntPredicate::IntUGE => (ComparisonOp::GreaterThanEquals, 1), + IntPredicate::IntULT => (ComparisonOp::Equals, 0), + IntPredicate::IntULE => (ComparisonOp::LessThanEquals, 1), + IntPredicate::IntSGT => (ComparisonOp::Equals, 2), + IntPredicate::IntSGE => (ComparisonOp::GreaterThanEquals, 1), + IntPredicate::IntSLT => (ComparisonOp::Equals, 0), + IntPredicate::IntSLE => (ComparisonOp::LessThanEquals, 1), + }; + self.context.new_comparison(None, op, cmp, self.context.new_rvalue_from_int(self.int_type, limit)) + } + else if a_type.get_pointee().is_some() && b_type.get_pointee().is_some() { + // NOTE: gcc cannot compare pointers to different objects, but rustc does that, so cast them to usize. + lhs = self.context.new_bitcast(None, lhs, self.usize_type); + rhs = self.context.new_bitcast(None, rhs, self.usize_type); + self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs) + } + else { + if a_type != b_type { + // NOTE: because libgccjit cannot compare function pointers. + if a_type.dyncast_function_ptr_type().is_some() && b_type.dyncast_function_ptr_type().is_some() { + lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer()); + rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer()); + } + // NOTE: hack because we try to cast a vector type to the same vector type. + else if format!("{:?}", a_type) != format!("{:?}", b_type) { + rhs = self.context.new_cast(None, rhs, a_type); + } + } + self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs) + } + } + + pub fn gcc_xor(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + let a_type = a.get_type(); + let b_type = b.get_type(); + if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) { + a ^ b + } + else { + let values = [ + self.low(a) ^ self.low(b), + self.high(a) ^ self.high(b), + ]; + self.context.new_array_constructor(None, a_type, &values) + } + } + + pub fn gcc_shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + let a_type = a.get_type(); + let b_type = b.get_type(); + let a_native = self.is_native_int_type(a_type); + let b_native = self.is_native_int_type(b_type); + if a_native && b_native { + // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number. + if a_type.is_unsigned(self) && b_type.is_signed(self) { + let a = self.context.new_cast(None, a, b_type); + let result = a << b; + self.context.new_cast(None, result, a_type) + } + else if a_type.is_signed(self) && b_type.is_unsigned(self) { + let b = self.context.new_cast(None, b, a_type); + a << b + } + else { + a << b + } + } + else if a_native && !b_native { + self.gcc_shl(a, self.gcc_int_cast(b, a_type)) + } + else { + // NOTE: we cannot use the ashl builtin because it's calling widen_hi() which uses ashl. + let native_int_type = a_type.dyncast_array().expect("get element type"); + + let func = self.current_func(); + let then_block = func.new_block("then"); + let else_block = func.new_block("else"); + let after_block = func.new_block("after"); + let b0_block = func.new_block("b0"); + let actual_else_block = func.new_block("actual_else"); + + let result = func.new_local(None, a_type, "shiftResult"); + + let b = self.gcc_int_cast(b, native_int_type); + let sixty_four = self.gcc_int(native_int_type, 64); + let zero = self.gcc_zero(native_int_type); + let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero); + self.llbb().end_with_conditional(None, condition, then_block, else_block); + + // TODO(antoyo): take endianness into account. + let values = [ + zero, + self.low(a) << (b - sixty_four), + ]; + let array_value = self.context.new_array_constructor(None, a_type, &values); + then_block.add_assignment(None, result, array_value); + then_block.end_with_jump(None, after_block); + + let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero); + else_block.end_with_conditional(None, condition, b0_block, actual_else_block); + + b0_block.add_assignment(None, result, a); + b0_block.end_with_jump(None, after_block); + + // NOTE: cast low to its unsigned type in order to perform a logical right shift. + let unsigned_type = native_int_type.to_unsigned(&self.cx); + let casted_low = self.context.new_cast(None, self.low(a), unsigned_type); + let shift_value = self.context.new_cast(None, sixty_four - b, unsigned_type); + let high_low = self.context.new_cast(None, casted_low >> shift_value, native_int_type); + let values = [ + self.low(a) << b, + (self.high(a) << b) | high_low, + ]; + + let array_value = self.context.new_array_constructor(None, a_type, &values); + actual_else_block.add_assignment(None, result, array_value); + actual_else_block.end_with_jump(None, after_block); + + // NOTE: since jumps were added in a place rustc does not expect, the current block in the + // state need to be updated. + self.switch_to_block(after_block); + + result.to_rvalue() + } + } + + pub fn gcc_bswap(&mut self, mut arg: RValue<'gcc>, width: u64) -> RValue<'gcc> { + let arg_type = arg.get_type(); + if !self.is_native_int_type(arg_type) { + let native_int_type = arg_type.dyncast_array().expect("get element type"); + let lsb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 0)).to_rvalue(); + let swapped_lsb = self.gcc_bswap(lsb, width / 2); + let swapped_lsb = self.context.new_cast(None, swapped_lsb, native_int_type); + let msb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 1)).to_rvalue(); + let swapped_msb = self.gcc_bswap(msb, width / 2); + let swapped_msb = self.context.new_cast(None, swapped_msb, native_int_type); + + // NOTE: we also need to swap the two elements here, in addition to swapping inside + // the elements themselves like done above. + return self.context.new_array_constructor(None, arg_type, &[swapped_msb, swapped_lsb]); + } + + // TODO(antoyo): check if it's faster to use string literals and a + // match instead of format!. + let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width)); + // FIXME(antoyo): this cast should not be necessary. Remove + // when having proper sized integer types. + let param_type = bswap.get_param(0).to_rvalue().get_type(); + if param_type != arg_type { + arg = self.bitcast(arg, param_type); + } + self.cx.context.new_call(None, bswap, &[arg]) + } +} + +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + pub fn gcc_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> { + if self.is_native_int_type_or_bool(typ) { + self.context.new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from")) + } + else { + // NOTE: set the sign in high. + self.from_low_high(typ, int, -(int.is_negative() as i64)) + } + } + + pub fn gcc_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> { + if self.is_native_int_type_or_bool(typ) { + self.context.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64) + } + else { + self.from_low_high(typ, int as i64, 0) + } + } + + pub fn gcc_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> { + let low = num as u64; + let high = (num >> 64) as u64; + if num >> 64 != 0 { + // FIXME(antoyo): use a new function new_rvalue_from_unsigned_long()? + if self.is_native_int_type(typ) { + let low = self.context.new_rvalue_from_long(self.u64_type, low as i64); + let high = self.context.new_rvalue_from_long(typ, high as i64); + + let sixty_four = self.context.new_rvalue_from_long(typ, 64); + let shift = high << sixty_four; + shift | self.context.new_cast(None, low, typ) + } + else { + self.from_low_high(typ, low as i64, high as i64) + } + } + else if typ.is_i128(self) { + let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64); + self.gcc_int_cast(num, typ) + } + else { + self.gcc_uint(typ, num as u64) + } + } + + pub fn gcc_zero(&self, typ: Type<'gcc>) -> RValue<'gcc> { + if self.is_native_int_type_or_bool(typ) { + self.context.new_rvalue_zero(typ) + } + else { + self.from_low_high(typ, 0, 0) + } + } + + pub fn gcc_int_width(&self, typ: Type<'gcc>) -> u64 { + if self.is_native_int_type_or_bool(typ) { + typ.get_size() as u64 * 8 + } + else { + // NOTE: the only unsupported types are u128 and i128. + 128 + } + } + + fn bitwise_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> { + let a_type = a.get_type(); + let b_type = b.get_type(); + let a_native = self.is_native_int_type_or_bool(a_type); + let b_native = self.is_native_int_type_or_bool(b_type); + if a_type.is_vector() && b_type.is_vector() { + self.context.new_binary_op(None, operation, a_type, a, b) + } + else if a_native && b_native { + if a_type != b_type { + b = self.context.new_cast(None, b, a_type); + } + self.context.new_binary_op(None, operation, a_type, a, b) + } + else { + assert!(!a_native && !b_native, "both types should either be native or non-native for or operation"); + let native_int_type = a_type.dyncast_array().expect("get element type"); + let values = [ + self.context.new_binary_op(None, operation, native_int_type, self.low(a), self.low(b)), + self.context.new_binary_op(None, operation, native_int_type, self.high(a), self.high(b)), + ]; + self.context.new_array_constructor(None, a_type, &values) + } + } + + pub fn gcc_or(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.bitwise_operation(BinaryOp::BitwiseOr, a, b) + } + + // TODO(antoyo): can we use https://github.com/rust-lang/compiler-builtins/blob/master/src/int/mod.rs#L379 instead? + pub fn gcc_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { + let value_type = value.get_type(); + if self.is_native_int_type_or_bool(dest_typ) && self.is_native_int_type_or_bool(value_type) { + self.context.new_cast(None, value, dest_typ) + } + else if self.is_native_int_type_or_bool(dest_typ) { + self.context.new_cast(None, self.low(value), dest_typ) + } + else if self.is_native_int_type_or_bool(value_type) { + let dest_element_type = dest_typ.dyncast_array().expect("get element type"); + + // NOTE: set the sign of the value. + let zero = self.context.new_rvalue_zero(value_type); + let is_negative = self.context.new_comparison(None, ComparisonOp::LessThan, value, zero); + let is_negative = self.gcc_int_cast(is_negative, dest_element_type); + let values = [ + self.context.new_cast(None, value, dest_element_type), + self.context.new_unary_op(None, UnaryOp::Minus, dest_element_type, is_negative), + ]; + self.context.new_array_constructor(None, dest_typ, &values) + } + else { + // Since u128 and i128 are the only types that can be unsupported, we know the type of + // value and the destination type have the same size, so a bitcast is fine. + + // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. + self.context.new_bitcast(None, value, dest_typ) + } + } + + fn int_to_float_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { + let value_type = value.get_type(); + if self.is_native_int_type_or_bool(value_type) { + return self.context.new_cast(None, value, dest_typ); + } + + let name_suffix = + match self.type_kind(dest_typ) { + TypeKind::Float => "tisf", + TypeKind::Double => "tidf", + kind => panic!("cannot cast a non-native integer to type {:?}", kind), + }; + let sign = + if signed { + "" + } + else { + "un" + }; + let func_name = format!("__float{}{}", sign, name_suffix); + let param = self.context.new_parameter(None, value_type, "n"); + let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false); + self.context.new_call(None, func, &[value]) + } + + pub fn gcc_int_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { + self.int_to_float_cast(true, value, dest_typ) + } + + pub fn gcc_uint_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { + self.int_to_float_cast(false, value, dest_typ) + } + + fn float_to_int_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { + let value_type = value.get_type(); + if self.is_native_int_type_or_bool(dest_typ) { + return self.context.new_cast(None, value, dest_typ); + } + + let name_suffix = + match self.type_kind(value_type) { + TypeKind::Float => "sfti", + TypeKind::Double => "dfti", + kind => panic!("cannot cast a {:?} to non-native integer", kind), + }; + let sign = + if signed { + "" + } + else { + "uns" + }; + let func_name = format!("__fix{}{}", sign, name_suffix); + let param = self.context.new_parameter(None, value_type, "n"); + let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false); + self.context.new_call(None, func, &[value]) + } + + pub fn gcc_float_to_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { + self.float_to_int_cast(true, value, dest_typ) + } + + pub fn gcc_float_to_uint_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { + self.float_to_int_cast(false, value, dest_typ) + } + + fn high(&self, value: RValue<'gcc>) -> RValue<'gcc> { + self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 1)) + .to_rvalue() + } + + fn low(&self, value: RValue<'gcc>) -> RValue<'gcc> { + self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 0)) + .to_rvalue() + } + + fn from_low_high(&self, typ: Type<'gcc>, low: i64, high: i64) -> RValue<'gcc> { + let native_int_type = typ.dyncast_array().expect("get element type"); + let values = [ + self.context.new_rvalue_from_long(native_int_type, low), + self.context.new_rvalue_from_long(native_int_type, high), + ]; + self.context.new_array_constructor(None, typ, &values) + } +} diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs b/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs new file mode 100644 index 00000000000..8a4559355ea --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs @@ -0,0 +1,8037 @@ +// File generated by `rustc_codegen_gcc/tools/generate_intrinsics.py` +// DO NOT EDIT IT! +match name { + // AMDGPU + "llvm.AMDGPU.div.fixup.f32" => "__builtin_amdgpu_div_fixup", + "llvm.AMDGPU.div.fixup.f64" => "__builtin_amdgpu_div_fixup", + "llvm.AMDGPU.div.fixup.v2f64" => "__builtin_amdgpu_div_fixup", + "llvm.AMDGPU.div.fixup.v4f32" => "__builtin_amdgpu_div_fixup", + "llvm.AMDGPU.div.fmas.f32" => "__builtin_amdgpu_div_fmas", + "llvm.AMDGPU.div.fmas.f64" => "__builtin_amdgpu_div_fmas", + "llvm.AMDGPU.div.fmas.v2f64" => "__builtin_amdgpu_div_fmas", + "llvm.AMDGPU.div.fmas.v4f32" => "__builtin_amdgpu_div_fmas", + "llvm.AMDGPU.ldexp.f32" => "__builtin_amdgpu_ldexp", + "llvm.AMDGPU.ldexp.f64" => "__builtin_amdgpu_ldexp", + "llvm.AMDGPU.ldexp.v2f64" => "__builtin_amdgpu_ldexp", + "llvm.AMDGPU.ldexp.v4f32" => "__builtin_amdgpu_ldexp", + "llvm.AMDGPU.rcp.f32" => "__builtin_amdgpu_rcp", + "llvm.AMDGPU.rcp.f64" => "__builtin_amdgpu_rcp", + "llvm.AMDGPU.rcp.v2f64" => "__builtin_amdgpu_rcp", + "llvm.AMDGPU.rcp.v4f32" => "__builtin_amdgpu_rcp", + "llvm.AMDGPU.rsq.clamped.f32" => "__builtin_amdgpu_rsq_clamped", + "llvm.AMDGPU.rsq.clamped.f64" => "__builtin_amdgpu_rsq_clamped", + "llvm.AMDGPU.rsq.clamped.v2f64" => "__builtin_amdgpu_rsq_clamped", + "llvm.AMDGPU.rsq.clamped.v4f32" => "__builtin_amdgpu_rsq_clamped", + "llvm.AMDGPU.rsq.f32" => "__builtin_amdgpu_rsq", + "llvm.AMDGPU.rsq.f64" => "__builtin_amdgpu_rsq", + "llvm.AMDGPU.rsq.v2f64" => "__builtin_amdgpu_rsq", + "llvm.AMDGPU.rsq.v4f32" => "__builtin_amdgpu_rsq", + "llvm.AMDGPU.trig.preop.f32" => "__builtin_amdgpu_trig_preop", + "llvm.AMDGPU.trig.preop.f64" => "__builtin_amdgpu_trig_preop", + "llvm.AMDGPU.trig.preop.v2f64" => "__builtin_amdgpu_trig_preop", + "llvm.AMDGPU.trig.preop.v4f32" => "__builtin_amdgpu_trig_preop", + // aarch64 + "llvm.aarch64.dmb" => "__builtin_arm_dmb", + "llvm.aarch64.dsb" => "__builtin_arm_dsb", + "llvm.aarch64.isb" => "__builtin_arm_isb", + "llvm.aarch64.prefetch" => "__builtin_arm_prefetch", + "llvm.aarch64.sve.aesd" => "__builtin_sve_svaesd_u8", + "llvm.aarch64.sve.aese" => "__builtin_sve_svaese_u8", + "llvm.aarch64.sve.aesimc" => "__builtin_sve_svaesimc_u8", + "llvm.aarch64.sve.aesmc" => "__builtin_sve_svaesmc_u8", + "llvm.aarch64.sve.rax1" => "__builtin_sve_svrax1_u64", + "llvm.aarch64.sve.rdffr" => "__builtin_sve_svrdffr", + "llvm.aarch64.sve.rdffr.z" => "__builtin_sve_svrdffr_z", + "llvm.aarch64.sve.setffr" => "__builtin_sve_svsetffr", + "llvm.aarch64.sve.sm4e" => "__builtin_sve_svsm4e_u32", + "llvm.aarch64.sve.sm4ekey" => "__builtin_sve_svsm4ekey_u32", + "llvm.aarch64.sve.wrffr" => "__builtin_sve_svwrffr", + "llvm.aarch64.tcancel" => "__builtin_arm_tcancel", + "llvm.aarch64.tcommit" => "__builtin_arm_tcommit", + "llvm.aarch64.tstart" => "__builtin_arm_tstart", + "llvm.aarch64.ttest" => "__builtin_arm_ttest", + // amdgcn + "llvm.amdgcn.alignbyte" => "__builtin_amdgcn_alignbyte", + "llvm.amdgcn.buffer.wbinvl1" => "__builtin_amdgcn_buffer_wbinvl1", + "llvm.amdgcn.buffer.wbinvl1.sc" => "__builtin_amdgcn_buffer_wbinvl1_sc", + "llvm.amdgcn.buffer.wbinvl1.vol" => "__builtin_amdgcn_buffer_wbinvl1_vol", + "llvm.amdgcn.cubeid" => "__builtin_amdgcn_cubeid", + "llvm.amdgcn.cubema" => "__builtin_amdgcn_cubema", + "llvm.amdgcn.cubesc" => "__builtin_amdgcn_cubesc", + "llvm.amdgcn.cubetc" => "__builtin_amdgcn_cubetc", + "llvm.amdgcn.cvt.f32.bf8" => "__builtin_amdgcn_cvt_f32_bf8", + "llvm.amdgcn.cvt.f32.fp8" => "__builtin_amdgcn_cvt_f32_fp8", + "llvm.amdgcn.cvt.pk.bf8.f32" => "__builtin_amdgcn_cvt_pk_bf8_f32", + "llvm.amdgcn.cvt.pk.f32.bf8" => "__builtin_amdgcn_cvt_pk_f32_bf8", + "llvm.amdgcn.cvt.pk.f32.fp8" => "__builtin_amdgcn_cvt_pk_f32_fp8", + "llvm.amdgcn.cvt.pk.fp8.f32" => "__builtin_amdgcn_cvt_pk_fp8_f32", + "llvm.amdgcn.cvt.pk.i16" => "__builtin_amdgcn_cvt_pk_i16", + "llvm.amdgcn.cvt.pk.u16" => "__builtin_amdgcn_cvt_pk_u16", + "llvm.amdgcn.cvt.pk.u8.f32" => "__builtin_amdgcn_cvt_pk_u8_f32", + "llvm.amdgcn.cvt.pknorm.i16" => "__builtin_amdgcn_cvt_pknorm_i16", + "llvm.amdgcn.cvt.pknorm.u16" => "__builtin_amdgcn_cvt_pknorm_u16", + "llvm.amdgcn.cvt.pkrtz" => "__builtin_amdgcn_cvt_pkrtz", + "llvm.amdgcn.cvt.sr.bf8.f32" => "__builtin_amdgcn_cvt_sr_bf8_f32", + "llvm.amdgcn.cvt.sr.fp8.f32" => "__builtin_amdgcn_cvt_sr_fp8_f32", + "llvm.amdgcn.dispatch.id" => "__builtin_amdgcn_dispatch_id", + "llvm.amdgcn.ds.add.gs.reg.rtn" => "__builtin_amdgcn_ds_add_gs_reg_rtn", + "llvm.amdgcn.ds.bpermute" => "__builtin_amdgcn_ds_bpermute", + "llvm.amdgcn.ds.fadd.v2bf16" => "__builtin_amdgcn_ds_atomic_fadd_v2bf16", + "llvm.amdgcn.ds.gws.barrier" => "__builtin_amdgcn_ds_gws_barrier", + "llvm.amdgcn.ds.gws.init" => "__builtin_amdgcn_ds_gws_init", + "llvm.amdgcn.ds.gws.sema.br" => "__builtin_amdgcn_ds_gws_sema_br", + "llvm.amdgcn.ds.gws.sema.p" => "__builtin_amdgcn_ds_gws_sema_p", + "llvm.amdgcn.ds.gws.sema.release.all" => "__builtin_amdgcn_ds_gws_sema_release_all", + "llvm.amdgcn.ds.gws.sema.v" => "__builtin_amdgcn_ds_gws_sema_v", + "llvm.amdgcn.ds.permute" => "__builtin_amdgcn_ds_permute", + "llvm.amdgcn.ds.sub.gs.reg.rtn" => "__builtin_amdgcn_ds_sub_gs_reg_rtn", + "llvm.amdgcn.ds.swizzle" => "__builtin_amdgcn_ds_swizzle", + "llvm.amdgcn.endpgm" => "__builtin_amdgcn_endpgm", + "llvm.amdgcn.fdot2" => "__builtin_amdgcn_fdot2", + "llvm.amdgcn.fdot2.bf16.bf16" => "__builtin_amdgcn_fdot2_bf16_bf16", + "llvm.amdgcn.fdot2.f16.f16" => "__builtin_amdgcn_fdot2_f16_f16", + "llvm.amdgcn.fdot2.f32.bf16" => "__builtin_amdgcn_fdot2_f32_bf16", + "llvm.amdgcn.fmul.legacy" => "__builtin_amdgcn_fmul_legacy", + "llvm.amdgcn.groupstaticsize" => "__builtin_amdgcn_groupstaticsize", + "llvm.amdgcn.iglp.opt" => "__builtin_amdgcn_iglp_opt", + "llvm.amdgcn.implicit.buffer.ptr" => "__builtin_amdgcn_implicit_buffer_ptr", + "llvm.amdgcn.implicitarg.ptr" => "__builtin_amdgcn_implicitarg_ptr", + "llvm.amdgcn.interp.mov" => "__builtin_amdgcn_interp_mov", + "llvm.amdgcn.interp.p1" => "__builtin_amdgcn_interp_p1", + "llvm.amdgcn.interp.p1.f16" => "__builtin_amdgcn_interp_p1_f16", + "llvm.amdgcn.interp.p2" => "__builtin_amdgcn_interp_p2", + "llvm.amdgcn.interp.p2.f16" => "__builtin_amdgcn_interp_p2_f16", + "llvm.amdgcn.is.private" => "__builtin_amdgcn_is_private", + "llvm.amdgcn.is.shared" => "__builtin_amdgcn_is_shared", + "llvm.amdgcn.kernarg.segment.ptr" => "__builtin_amdgcn_kernarg_segment_ptr", + "llvm.amdgcn.lerp" => "__builtin_amdgcn_lerp", + "llvm.amdgcn.mbcnt.hi" => "__builtin_amdgcn_mbcnt_hi", + "llvm.amdgcn.mbcnt.lo" => "__builtin_amdgcn_mbcnt_lo", + "llvm.amdgcn.mfma.f32.16x16x16bf16.1k" => "__builtin_amdgcn_mfma_f32_16x16x16bf16_1k", + "llvm.amdgcn.mfma.f32.16x16x16f16" => "__builtin_amdgcn_mfma_f32_16x16x16f16", + "llvm.amdgcn.mfma.f32.16x16x1f32" => "__builtin_amdgcn_mfma_f32_16x16x1f32", + "llvm.amdgcn.mfma.f32.16x16x2bf16" => "__builtin_amdgcn_mfma_f32_16x16x2bf16", + "llvm.amdgcn.mfma.f32.16x16x32.bf8.bf8" => "__builtin_amdgcn_mfma_f32_16x16x32_bf8_bf8", + "llvm.amdgcn.mfma.f32.16x16x32.bf8.fp8" => "__builtin_amdgcn_mfma_f32_16x16x32_bf8_fp8", + "llvm.amdgcn.mfma.f32.16x16x32.fp8.bf8" => "__builtin_amdgcn_mfma_f32_16x16x32_fp8_bf8", + "llvm.amdgcn.mfma.f32.16x16x32.fp8.fp8" => "__builtin_amdgcn_mfma_f32_16x16x32_fp8_fp8", + "llvm.amdgcn.mfma.f32.16x16x4bf16.1k" => "__builtin_amdgcn_mfma_f32_16x16x4bf16_1k", + "llvm.amdgcn.mfma.f32.16x16x4f16" => "__builtin_amdgcn_mfma_f32_16x16x4f16", + "llvm.amdgcn.mfma.f32.16x16x4f32" => "__builtin_amdgcn_mfma_f32_16x16x4f32", + "llvm.amdgcn.mfma.f32.16x16x8.xf32" => "__builtin_amdgcn_mfma_f32_16x16x8_xf32", + "llvm.amdgcn.mfma.f32.16x16x8bf16" => "__builtin_amdgcn_mfma_f32_16x16x8bf16", + "llvm.amdgcn.mfma.f32.32x32x16.bf8.bf8" => "__builtin_amdgcn_mfma_f32_32x32x16_bf8_bf8", + "llvm.amdgcn.mfma.f32.32x32x16.bf8.fp8" => "__builtin_amdgcn_mfma_f32_32x32x16_bf8_fp8", + "llvm.amdgcn.mfma.f32.32x32x16.fp8.bf8" => "__builtin_amdgcn_mfma_f32_32x32x16_fp8_bf8", + "llvm.amdgcn.mfma.f32.32x32x16.fp8.fp8" => "__builtin_amdgcn_mfma_f32_32x32x16_fp8_fp8", + "llvm.amdgcn.mfma.f32.32x32x1f32" => "__builtin_amdgcn_mfma_f32_32x32x1f32", + "llvm.amdgcn.mfma.f32.32x32x2bf16" => "__builtin_amdgcn_mfma_f32_32x32x2bf16", + "llvm.amdgcn.mfma.f32.32x32x2f32" => "__builtin_amdgcn_mfma_f32_32x32x2f32", + "llvm.amdgcn.mfma.f32.32x32x4.xf32" => "__builtin_amdgcn_mfma_f32_32x32x4_xf32", + "llvm.amdgcn.mfma.f32.32x32x4bf16" => "__builtin_amdgcn_mfma_f32_32x32x4bf16", + "llvm.amdgcn.mfma.f32.32x32x4bf16.1k" => "__builtin_amdgcn_mfma_f32_32x32x4bf16_1k", + "llvm.amdgcn.mfma.f32.32x32x4f16" => "__builtin_amdgcn_mfma_f32_32x32x4f16", + "llvm.amdgcn.mfma.f32.32x32x8bf16.1k" => "__builtin_amdgcn_mfma_f32_32x32x8bf16_1k", + "llvm.amdgcn.mfma.f32.32x32x8f16" => "__builtin_amdgcn_mfma_f32_32x32x8f16", + "llvm.amdgcn.mfma.f32.4x4x1f32" => "__builtin_amdgcn_mfma_f32_4x4x1f32", + "llvm.amdgcn.mfma.f32.4x4x2bf16" => "__builtin_amdgcn_mfma_f32_4x4x2bf16", + "llvm.amdgcn.mfma.f32.4x4x4bf16.1k" => "__builtin_amdgcn_mfma_f32_4x4x4bf16_1k", + "llvm.amdgcn.mfma.f32.4x4x4f16" => "__builtin_amdgcn_mfma_f32_4x4x4f16", + "llvm.amdgcn.mfma.f64.16x16x4f64" => "__builtin_amdgcn_mfma_f64_16x16x4f64", + "llvm.amdgcn.mfma.f64.4x4x4f64" => "__builtin_amdgcn_mfma_f64_4x4x4f64", + "llvm.amdgcn.mfma.i32.16x16x16i8" => "__builtin_amdgcn_mfma_i32_16x16x16i8", + "llvm.amdgcn.mfma.i32.16x16x32.i8" => "__builtin_amdgcn_mfma_i32_16x16x32_i8", + "llvm.amdgcn.mfma.i32.16x16x4i8" => "__builtin_amdgcn_mfma_i32_16x16x4i8", + "llvm.amdgcn.mfma.i32.32x32x16.i8" => "__builtin_amdgcn_mfma_i32_32x32x16_i8", + "llvm.amdgcn.mfma.i32.32x32x4i8" => "__builtin_amdgcn_mfma_i32_32x32x4i8", + "llvm.amdgcn.mfma.i32.32x32x8i8" => "__builtin_amdgcn_mfma_i32_32x32x8i8", + "llvm.amdgcn.mfma.i32.4x4x4i8" => "__builtin_amdgcn_mfma_i32_4x4x4i8", + "llvm.amdgcn.mqsad.pk.u16.u8" => "__builtin_amdgcn_mqsad_pk_u16_u8", + "llvm.amdgcn.mqsad.u32.u8" => "__builtin_amdgcn_mqsad_u32_u8", + "llvm.amdgcn.msad.u8" => "__builtin_amdgcn_msad_u8", + "llvm.amdgcn.perm" => "__builtin_amdgcn_perm", + "llvm.amdgcn.permlane16" => "__builtin_amdgcn_permlane16", + "llvm.amdgcn.permlane64" => "__builtin_amdgcn_permlane64", + "llvm.amdgcn.permlanex16" => "__builtin_amdgcn_permlanex16", + "llvm.amdgcn.qsad.pk.u16.u8" => "__builtin_amdgcn_qsad_pk_u16_u8", + "llvm.amdgcn.queue.ptr" => "__builtin_amdgcn_queue_ptr", + "llvm.amdgcn.rcp.legacy" => "__builtin_amdgcn_rcp_legacy", + "llvm.amdgcn.readfirstlane" => "__builtin_amdgcn_readfirstlane", + "llvm.amdgcn.readlane" => "__builtin_amdgcn_readlane", + "llvm.amdgcn.rsq.legacy" => "__builtin_amdgcn_rsq_legacy", + "llvm.amdgcn.s.barrier" => "__builtin_amdgcn_s_barrier", + "llvm.amdgcn.s.dcache.inv" => "__builtin_amdgcn_s_dcache_inv", + "llvm.amdgcn.s.dcache.inv.vol" => "__builtin_amdgcn_s_dcache_inv_vol", + "llvm.amdgcn.s.dcache.wb" => "__builtin_amdgcn_s_dcache_wb", + "llvm.amdgcn.s.dcache.wb.vol" => "__builtin_amdgcn_s_dcache_wb_vol", + "llvm.amdgcn.s.decperflevel" => "__builtin_amdgcn_s_decperflevel", + "llvm.amdgcn.s.get.waveid.in.workgroup" => "__builtin_amdgcn_s_get_waveid_in_workgroup", + "llvm.amdgcn.s.getpc" => "__builtin_amdgcn_s_getpc", + "llvm.amdgcn.s.getreg" => "__builtin_amdgcn_s_getreg", + "llvm.amdgcn.s.incperflevel" => "__builtin_amdgcn_s_incperflevel", + "llvm.amdgcn.s.memrealtime" => "__builtin_amdgcn_s_memrealtime", + "llvm.amdgcn.s.memtime" => "__builtin_amdgcn_s_memtime", + "llvm.amdgcn.s.sendmsg" => "__builtin_amdgcn_s_sendmsg", + "llvm.amdgcn.s.sendmsghalt" => "__builtin_amdgcn_s_sendmsghalt", + "llvm.amdgcn.s.setprio" => "__builtin_amdgcn_s_setprio", + "llvm.amdgcn.s.setreg" => "__builtin_amdgcn_s_setreg", + "llvm.amdgcn.s.sleep" => "__builtin_amdgcn_s_sleep", + "llvm.amdgcn.s.wait.event.export.ready" => "__builtin_amdgcn_s_wait_event_export_ready", + "llvm.amdgcn.s.waitcnt" => "__builtin_amdgcn_s_waitcnt", + "llvm.amdgcn.sad.hi.u8" => "__builtin_amdgcn_sad_hi_u8", + "llvm.amdgcn.sad.u16" => "__builtin_amdgcn_sad_u16", + "llvm.amdgcn.sad.u8" => "__builtin_amdgcn_sad_u8", + "llvm.amdgcn.sched.barrier" => "__builtin_amdgcn_sched_barrier", + "llvm.amdgcn.sched.group.barrier" => "__builtin_amdgcn_sched_group_barrier", + "llvm.amdgcn.sdot2" => "__builtin_amdgcn_sdot2", + "llvm.amdgcn.sdot4" => "__builtin_amdgcn_sdot4", + "llvm.amdgcn.sdot8" => "__builtin_amdgcn_sdot8", + "llvm.amdgcn.smfmac.f32.16x16x32.bf16" => "__builtin_amdgcn_smfmac_f32_16x16x32_bf16", + "llvm.amdgcn.smfmac.f32.16x16x32.f16" => "__builtin_amdgcn_smfmac_f32_16x16x32_f16", + "llvm.amdgcn.smfmac.f32.16x16x64.bf8.bf8" => "__builtin_amdgcn_smfmac_f32_16x16x64_bf8_bf8", + "llvm.amdgcn.smfmac.f32.16x16x64.bf8.fp8" => "__builtin_amdgcn_smfmac_f32_16x16x64_bf8_fp8", + "llvm.amdgcn.smfmac.f32.16x16x64.fp8.bf8" => "__builtin_amdgcn_smfmac_f32_16x16x64_fp8_bf8", + "llvm.amdgcn.smfmac.f32.16x16x64.fp8.fp8" => "__builtin_amdgcn_smfmac_f32_16x16x64_fp8_fp8", + "llvm.amdgcn.smfmac.f32.32x32x16.bf16" => "__builtin_amdgcn_smfmac_f32_32x32x16_bf16", + "llvm.amdgcn.smfmac.f32.32x32x16.f16" => "__builtin_amdgcn_smfmac_f32_32x32x16_f16", + "llvm.amdgcn.smfmac.f32.32x32x32.bf8.bf8" => "__builtin_amdgcn_smfmac_f32_32x32x32_bf8_bf8", + "llvm.amdgcn.smfmac.f32.32x32x32.bf8.fp8" => "__builtin_amdgcn_smfmac_f32_32x32x32_bf8_fp8", + "llvm.amdgcn.smfmac.f32.32x32x32.fp8.bf8" => "__builtin_amdgcn_smfmac_f32_32x32x32_fp8_bf8", + "llvm.amdgcn.smfmac.f32.32x32x32.fp8.fp8" => "__builtin_amdgcn_smfmac_f32_32x32x32_fp8_fp8", + "llvm.amdgcn.smfmac.i32.16x16x64.i8" => "__builtin_amdgcn_smfmac_i32_16x16x64_i8", + "llvm.amdgcn.smfmac.i32.32x32x32.i8" => "__builtin_amdgcn_smfmac_i32_32x32x32_i8", + "llvm.amdgcn.sudot4" => "__builtin_amdgcn_sudot4", + "llvm.amdgcn.sudot8" => "__builtin_amdgcn_sudot8", + "llvm.amdgcn.udot2" => "__builtin_amdgcn_udot2", + "llvm.amdgcn.udot4" => "__builtin_amdgcn_udot4", + "llvm.amdgcn.udot8" => "__builtin_amdgcn_udot8", + "llvm.amdgcn.wave.barrier" => "__builtin_amdgcn_wave_barrier", + "llvm.amdgcn.wavefrontsize" => "__builtin_amdgcn_wavefrontsize", + "llvm.amdgcn.workgroup.id.x" => "__builtin_amdgcn_workgroup_id_x", + "llvm.amdgcn.workgroup.id.y" => "__builtin_amdgcn_workgroup_id_y", + "llvm.amdgcn.workgroup.id.z" => "__builtin_amdgcn_workgroup_id_z", + "llvm.amdgcn.writelane" => "__builtin_amdgcn_writelane", + // arm + "llvm.arm.cdp" => "__builtin_arm_cdp", + "llvm.arm.cdp2" => "__builtin_arm_cdp2", + "llvm.arm.cmse.tt" => "__builtin_arm_cmse_TT", + "llvm.arm.cmse.tta" => "__builtin_arm_cmse_TTA", + "llvm.arm.cmse.ttat" => "__builtin_arm_cmse_TTAT", + "llvm.arm.cmse.ttt" => "__builtin_arm_cmse_TTT", + "llvm.arm.dmb" => "__builtin_arm_dmb", + "llvm.arm.dsb" => "__builtin_arm_dsb", + "llvm.arm.get.fpscr" => "__builtin_arm_get_fpscr", + "llvm.arm.isb" => "__builtin_arm_isb", + "llvm.arm.ldc" => "__builtin_arm_ldc", + "llvm.arm.ldc2" => "__builtin_arm_ldc2", + "llvm.arm.ldc2l" => "__builtin_arm_ldc2l", + "llvm.arm.ldcl" => "__builtin_arm_ldcl", + "llvm.arm.mcr" => "__builtin_arm_mcr", + "llvm.arm.mcr2" => "__builtin_arm_mcr2", + "llvm.arm.mcrr" => "__builtin_arm_mcrr", + "llvm.arm.mcrr2" => "__builtin_arm_mcrr2", + "llvm.arm.mrc" => "__builtin_arm_mrc", + "llvm.arm.mrc2" => "__builtin_arm_mrc2", + "llvm.arm.qadd" => "__builtin_arm_qadd", + "llvm.arm.qadd16" => "__builtin_arm_qadd16", + "llvm.arm.qadd8" => "__builtin_arm_qadd8", + "llvm.arm.qasx" => "__builtin_arm_qasx", + "llvm.arm.qsax" => "__builtin_arm_qsax", + "llvm.arm.qsub" => "__builtin_arm_qsub", + "llvm.arm.qsub16" => "__builtin_arm_qsub16", + "llvm.arm.qsub8" => "__builtin_arm_qsub8", + "llvm.arm.sadd16" => "__builtin_arm_sadd16", + "llvm.arm.sadd8" => "__builtin_arm_sadd8", + "llvm.arm.sasx" => "__builtin_arm_sasx", + "llvm.arm.sel" => "__builtin_arm_sel", + "llvm.arm.set.fpscr" => "__builtin_arm_set_fpscr", + "llvm.arm.shadd16" => "__builtin_arm_shadd16", + "llvm.arm.shadd8" => "__builtin_arm_shadd8", + "llvm.arm.shasx" => "__builtin_arm_shasx", + "llvm.arm.shsax" => "__builtin_arm_shsax", + "llvm.arm.shsub16" => "__builtin_arm_shsub16", + "llvm.arm.shsub8" => "__builtin_arm_shsub8", + "llvm.arm.smlabb" => "__builtin_arm_smlabb", + "llvm.arm.smlabt" => "__builtin_arm_smlabt", + "llvm.arm.smlad" => "__builtin_arm_smlad", + "llvm.arm.smladx" => "__builtin_arm_smladx", + "llvm.arm.smlald" => "__builtin_arm_smlald", + "llvm.arm.smlaldx" => "__builtin_arm_smlaldx", + "llvm.arm.smlatb" => "__builtin_arm_smlatb", + "llvm.arm.smlatt" => "__builtin_arm_smlatt", + "llvm.arm.smlawb" => "__builtin_arm_smlawb", + "llvm.arm.smlawt" => "__builtin_arm_smlawt", + "llvm.arm.smlsd" => "__builtin_arm_smlsd", + "llvm.arm.smlsdx" => "__builtin_arm_smlsdx", + "llvm.arm.smlsld" => "__builtin_arm_smlsld", + "llvm.arm.smlsldx" => "__builtin_arm_smlsldx", + "llvm.arm.smuad" => "__builtin_arm_smuad", + "llvm.arm.smuadx" => "__builtin_arm_smuadx", + "llvm.arm.smulbb" => "__builtin_arm_smulbb", + "llvm.arm.smulbt" => "__builtin_arm_smulbt", + "llvm.arm.smultb" => "__builtin_arm_smultb", + "llvm.arm.smultt" => "__builtin_arm_smultt", + "llvm.arm.smulwb" => "__builtin_arm_smulwb", + "llvm.arm.smulwt" => "__builtin_arm_smulwt", + "llvm.arm.smusd" => "__builtin_arm_smusd", + "llvm.arm.smusdx" => "__builtin_arm_smusdx", + "llvm.arm.ssat" => "__builtin_arm_ssat", + "llvm.arm.ssat16" => "__builtin_arm_ssat16", + "llvm.arm.ssax" => "__builtin_arm_ssax", + "llvm.arm.ssub16" => "__builtin_arm_ssub16", + "llvm.arm.ssub8" => "__builtin_arm_ssub8", + "llvm.arm.stc" => "__builtin_arm_stc", + "llvm.arm.stc2" => "__builtin_arm_stc2", + "llvm.arm.stc2l" => "__builtin_arm_stc2l", + "llvm.arm.stcl" => "__builtin_arm_stcl", + "llvm.arm.sxtab16" => "__builtin_arm_sxtab16", + "llvm.arm.sxtb16" => "__builtin_arm_sxtb16", + "llvm.arm.thread.pointer" => "__builtin_thread_pointer", + "llvm.arm.uadd16" => "__builtin_arm_uadd16", + "llvm.arm.uadd8" => "__builtin_arm_uadd8", + "llvm.arm.uasx" => "__builtin_arm_uasx", + "llvm.arm.uhadd16" => "__builtin_arm_uhadd16", + "llvm.arm.uhadd8" => "__builtin_arm_uhadd8", + "llvm.arm.uhasx" => "__builtin_arm_uhasx", + "llvm.arm.uhsax" => "__builtin_arm_uhsax", + "llvm.arm.uhsub16" => "__builtin_arm_uhsub16", + "llvm.arm.uhsub8" => "__builtin_arm_uhsub8", + "llvm.arm.uqadd16" => "__builtin_arm_uqadd16", + "llvm.arm.uqadd8" => "__builtin_arm_uqadd8", + "llvm.arm.uqasx" => "__builtin_arm_uqasx", + "llvm.arm.uqsax" => "__builtin_arm_uqsax", + "llvm.arm.uqsub16" => "__builtin_arm_uqsub16", + "llvm.arm.uqsub8" => "__builtin_arm_uqsub8", + "llvm.arm.usad8" => "__builtin_arm_usad8", + "llvm.arm.usada8" => "__builtin_arm_usada8", + "llvm.arm.usat" => "__builtin_arm_usat", + "llvm.arm.usat16" => "__builtin_arm_usat16", + "llvm.arm.usax" => "__builtin_arm_usax", + "llvm.arm.usub16" => "__builtin_arm_usub16", + "llvm.arm.usub8" => "__builtin_arm_usub8", + "llvm.arm.uxtab16" => "__builtin_arm_uxtab16", + "llvm.arm.uxtb16" => "__builtin_arm_uxtb16", + // bpf + "llvm.bpf.btf.type.id" => "__builtin_bpf_btf_type_id", + "llvm.bpf.compare" => "__builtin_bpf_compare", + "llvm.bpf.load.byte" => "__builtin_bpf_load_byte", + "llvm.bpf.load.half" => "__builtin_bpf_load_half", + "llvm.bpf.load.word" => "__builtin_bpf_load_word", + "llvm.bpf.passthrough" => "__builtin_bpf_passthrough", + "llvm.bpf.preserve.enum.value" => "__builtin_bpf_preserve_enum_value", + "llvm.bpf.preserve.field.info" => "__builtin_bpf_preserve_field_info", + "llvm.bpf.preserve.type.info" => "__builtin_bpf_preserve_type_info", + "llvm.bpf.pseudo" => "__builtin_bpf_pseudo", + // cuda + "llvm.cuda.syncthreads" => "__syncthreads", + // dx + "llvm.dx.create.handle" => "__builtin_hlsl_create_handle", + // hexagon + "llvm.hexagon.A2.abs" => "__builtin_HEXAGON_A2_abs", + "llvm.hexagon.A2.absp" => "__builtin_HEXAGON_A2_absp", + "llvm.hexagon.A2.abssat" => "__builtin_HEXAGON_A2_abssat", + "llvm.hexagon.A2.add" => "__builtin_HEXAGON_A2_add", + "llvm.hexagon.A2.addh.h16.hh" => "__builtin_HEXAGON_A2_addh_h16_hh", + "llvm.hexagon.A2.addh.h16.hl" => "__builtin_HEXAGON_A2_addh_h16_hl", + "llvm.hexagon.A2.addh.h16.lh" => "__builtin_HEXAGON_A2_addh_h16_lh", + "llvm.hexagon.A2.addh.h16.ll" => "__builtin_HEXAGON_A2_addh_h16_ll", + "llvm.hexagon.A2.addh.h16.sat.hh" => "__builtin_HEXAGON_A2_addh_h16_sat_hh", + "llvm.hexagon.A2.addh.h16.sat.hl" => "__builtin_HEXAGON_A2_addh_h16_sat_hl", + "llvm.hexagon.A2.addh.h16.sat.lh" => "__builtin_HEXAGON_A2_addh_h16_sat_lh", + "llvm.hexagon.A2.addh.h16.sat.ll" => "__builtin_HEXAGON_A2_addh_h16_sat_ll", + "llvm.hexagon.A2.addh.l16.hl" => "__builtin_HEXAGON_A2_addh_l16_hl", + "llvm.hexagon.A2.addh.l16.ll" => "__builtin_HEXAGON_A2_addh_l16_ll", + "llvm.hexagon.A2.addh.l16.sat.hl" => "__builtin_HEXAGON_A2_addh_l16_sat_hl", + "llvm.hexagon.A2.addh.l16.sat.ll" => "__builtin_HEXAGON_A2_addh_l16_sat_ll", + "llvm.hexagon.A2.addi" => "__builtin_HEXAGON_A2_addi", + "llvm.hexagon.A2.addp" => "__builtin_HEXAGON_A2_addp", + "llvm.hexagon.A2.addpsat" => "__builtin_HEXAGON_A2_addpsat", + "llvm.hexagon.A2.addsat" => "__builtin_HEXAGON_A2_addsat", + "llvm.hexagon.A2.addsp" => "__builtin_HEXAGON_A2_addsp", + "llvm.hexagon.A2.and" => "__builtin_HEXAGON_A2_and", + "llvm.hexagon.A2.andir" => "__builtin_HEXAGON_A2_andir", + "llvm.hexagon.A2.andp" => "__builtin_HEXAGON_A2_andp", + "llvm.hexagon.A2.aslh" => "__builtin_HEXAGON_A2_aslh", + "llvm.hexagon.A2.asrh" => "__builtin_HEXAGON_A2_asrh", + "llvm.hexagon.A2.combine.hh" => "__builtin_HEXAGON_A2_combine_hh", + "llvm.hexagon.A2.combine.hl" => "__builtin_HEXAGON_A2_combine_hl", + "llvm.hexagon.A2.combine.lh" => "__builtin_HEXAGON_A2_combine_lh", + "llvm.hexagon.A2.combine.ll" => "__builtin_HEXAGON_A2_combine_ll", + "llvm.hexagon.A2.combineii" => "__builtin_HEXAGON_A2_combineii", + "llvm.hexagon.A2.combinew" => "__builtin_HEXAGON_A2_combinew", + "llvm.hexagon.A2.max" => "__builtin_HEXAGON_A2_max", + "llvm.hexagon.A2.maxp" => "__builtin_HEXAGON_A2_maxp", + "llvm.hexagon.A2.maxu" => "__builtin_HEXAGON_A2_maxu", + "llvm.hexagon.A2.maxup" => "__builtin_HEXAGON_A2_maxup", + "llvm.hexagon.A2.min" => "__builtin_HEXAGON_A2_min", + "llvm.hexagon.A2.minp" => "__builtin_HEXAGON_A2_minp", + "llvm.hexagon.A2.minu" => "__builtin_HEXAGON_A2_minu", + "llvm.hexagon.A2.minup" => "__builtin_HEXAGON_A2_minup", + "llvm.hexagon.A2.neg" => "__builtin_HEXAGON_A2_neg", + "llvm.hexagon.A2.negp" => "__builtin_HEXAGON_A2_negp", + "llvm.hexagon.A2.negsat" => "__builtin_HEXAGON_A2_negsat", + "llvm.hexagon.A2.not" => "__builtin_HEXAGON_A2_not", + "llvm.hexagon.A2.notp" => "__builtin_HEXAGON_A2_notp", + "llvm.hexagon.A2.or" => "__builtin_HEXAGON_A2_or", + "llvm.hexagon.A2.orir" => "__builtin_HEXAGON_A2_orir", + "llvm.hexagon.A2.orp" => "__builtin_HEXAGON_A2_orp", + "llvm.hexagon.A2.roundsat" => "__builtin_HEXAGON_A2_roundsat", + "llvm.hexagon.A2.sat" => "__builtin_HEXAGON_A2_sat", + "llvm.hexagon.A2.satb" => "__builtin_HEXAGON_A2_satb", + "llvm.hexagon.A2.sath" => "__builtin_HEXAGON_A2_sath", + "llvm.hexagon.A2.satub" => "__builtin_HEXAGON_A2_satub", + "llvm.hexagon.A2.satuh" => "__builtin_HEXAGON_A2_satuh", + "llvm.hexagon.A2.sub" => "__builtin_HEXAGON_A2_sub", + "llvm.hexagon.A2.subh.h16.hh" => "__builtin_HEXAGON_A2_subh_h16_hh", + "llvm.hexagon.A2.subh.h16.hl" => "__builtin_HEXAGON_A2_subh_h16_hl", + "llvm.hexagon.A2.subh.h16.lh" => "__builtin_HEXAGON_A2_subh_h16_lh", + "llvm.hexagon.A2.subh.h16.ll" => "__builtin_HEXAGON_A2_subh_h16_ll", + "llvm.hexagon.A2.subh.h16.sat.hh" => "__builtin_HEXAGON_A2_subh_h16_sat_hh", + "llvm.hexagon.A2.subh.h16.sat.hl" => "__builtin_HEXAGON_A2_subh_h16_sat_hl", + "llvm.hexagon.A2.subh.h16.sat.lh" => "__builtin_HEXAGON_A2_subh_h16_sat_lh", + "llvm.hexagon.A2.subh.h16.sat.ll" => "__builtin_HEXAGON_A2_subh_h16_sat_ll", + "llvm.hexagon.A2.subh.l16.hl" => "__builtin_HEXAGON_A2_subh_l16_hl", + "llvm.hexagon.A2.subh.l16.ll" => "__builtin_HEXAGON_A2_subh_l16_ll", + "llvm.hexagon.A2.subh.l16.sat.hl" => "__builtin_HEXAGON_A2_subh_l16_sat_hl", + "llvm.hexagon.A2.subh.l16.sat.ll" => "__builtin_HEXAGON_A2_subh_l16_sat_ll", + "llvm.hexagon.A2.subp" => "__builtin_HEXAGON_A2_subp", + "llvm.hexagon.A2.subri" => "__builtin_HEXAGON_A2_subri", + "llvm.hexagon.A2.subsat" => "__builtin_HEXAGON_A2_subsat", + "llvm.hexagon.A2.svaddh" => "__builtin_HEXAGON_A2_svaddh", + "llvm.hexagon.A2.svaddhs" => "__builtin_HEXAGON_A2_svaddhs", + "llvm.hexagon.A2.svadduhs" => "__builtin_HEXAGON_A2_svadduhs", + "llvm.hexagon.A2.svavgh" => "__builtin_HEXAGON_A2_svavgh", + "llvm.hexagon.A2.svavghs" => "__builtin_HEXAGON_A2_svavghs", + "llvm.hexagon.A2.svnavgh" => "__builtin_HEXAGON_A2_svnavgh", + "llvm.hexagon.A2.svsubh" => "__builtin_HEXAGON_A2_svsubh", + "llvm.hexagon.A2.svsubhs" => "__builtin_HEXAGON_A2_svsubhs", + "llvm.hexagon.A2.svsubuhs" => "__builtin_HEXAGON_A2_svsubuhs", + "llvm.hexagon.A2.swiz" => "__builtin_HEXAGON_A2_swiz", + "llvm.hexagon.A2.sxtb" => "__builtin_HEXAGON_A2_sxtb", + "llvm.hexagon.A2.sxth" => "__builtin_HEXAGON_A2_sxth", + "llvm.hexagon.A2.sxtw" => "__builtin_HEXAGON_A2_sxtw", + "llvm.hexagon.A2.tfr" => "__builtin_HEXAGON_A2_tfr", + "llvm.hexagon.A2.tfrih" => "__builtin_HEXAGON_A2_tfrih", + "llvm.hexagon.A2.tfril" => "__builtin_HEXAGON_A2_tfril", + "llvm.hexagon.A2.tfrp" => "__builtin_HEXAGON_A2_tfrp", + "llvm.hexagon.A2.tfrpi" => "__builtin_HEXAGON_A2_tfrpi", + "llvm.hexagon.A2.tfrsi" => "__builtin_HEXAGON_A2_tfrsi", + "llvm.hexagon.A2.vabsh" => "__builtin_HEXAGON_A2_vabsh", + "llvm.hexagon.A2.vabshsat" => "__builtin_HEXAGON_A2_vabshsat", + "llvm.hexagon.A2.vabsw" => "__builtin_HEXAGON_A2_vabsw", + "llvm.hexagon.A2.vabswsat" => "__builtin_HEXAGON_A2_vabswsat", + "llvm.hexagon.A2.vaddb.map" => "__builtin_HEXAGON_A2_vaddb_map", + "llvm.hexagon.A2.vaddh" => "__builtin_HEXAGON_A2_vaddh", + "llvm.hexagon.A2.vaddhs" => "__builtin_HEXAGON_A2_vaddhs", + "llvm.hexagon.A2.vaddub" => "__builtin_HEXAGON_A2_vaddub", + "llvm.hexagon.A2.vaddubs" => "__builtin_HEXAGON_A2_vaddubs", + "llvm.hexagon.A2.vadduhs" => "__builtin_HEXAGON_A2_vadduhs", + "llvm.hexagon.A2.vaddw" => "__builtin_HEXAGON_A2_vaddw", + "llvm.hexagon.A2.vaddws" => "__builtin_HEXAGON_A2_vaddws", + "llvm.hexagon.A2.vavgh" => "__builtin_HEXAGON_A2_vavgh", + "llvm.hexagon.A2.vavghcr" => "__builtin_HEXAGON_A2_vavghcr", + "llvm.hexagon.A2.vavghr" => "__builtin_HEXAGON_A2_vavghr", + "llvm.hexagon.A2.vavgub" => "__builtin_HEXAGON_A2_vavgub", + "llvm.hexagon.A2.vavgubr" => "__builtin_HEXAGON_A2_vavgubr", + "llvm.hexagon.A2.vavguh" => "__builtin_HEXAGON_A2_vavguh", + "llvm.hexagon.A2.vavguhr" => "__builtin_HEXAGON_A2_vavguhr", + "llvm.hexagon.A2.vavguw" => "__builtin_HEXAGON_A2_vavguw", + "llvm.hexagon.A2.vavguwr" => "__builtin_HEXAGON_A2_vavguwr", + "llvm.hexagon.A2.vavgw" => "__builtin_HEXAGON_A2_vavgw", + "llvm.hexagon.A2.vavgwcr" => "__builtin_HEXAGON_A2_vavgwcr", + "llvm.hexagon.A2.vavgwr" => "__builtin_HEXAGON_A2_vavgwr", + "llvm.hexagon.A2.vcmpbeq" => "__builtin_HEXAGON_A2_vcmpbeq", + "llvm.hexagon.A2.vcmpbgtu" => "__builtin_HEXAGON_A2_vcmpbgtu", + "llvm.hexagon.A2.vcmpheq" => "__builtin_HEXAGON_A2_vcmpheq", + "llvm.hexagon.A2.vcmphgt" => "__builtin_HEXAGON_A2_vcmphgt", + "llvm.hexagon.A2.vcmphgtu" => "__builtin_HEXAGON_A2_vcmphgtu", + "llvm.hexagon.A2.vcmpweq" => "__builtin_HEXAGON_A2_vcmpweq", + "llvm.hexagon.A2.vcmpwgt" => "__builtin_HEXAGON_A2_vcmpwgt", + "llvm.hexagon.A2.vcmpwgtu" => "__builtin_HEXAGON_A2_vcmpwgtu", + "llvm.hexagon.A2.vconj" => "__builtin_HEXAGON_A2_vconj", + "llvm.hexagon.A2.vmaxb" => "__builtin_HEXAGON_A2_vmaxb", + "llvm.hexagon.A2.vmaxh" => "__builtin_HEXAGON_A2_vmaxh", + "llvm.hexagon.A2.vmaxub" => "__builtin_HEXAGON_A2_vmaxub", + "llvm.hexagon.A2.vmaxuh" => "__builtin_HEXAGON_A2_vmaxuh", + "llvm.hexagon.A2.vmaxuw" => "__builtin_HEXAGON_A2_vmaxuw", + "llvm.hexagon.A2.vmaxw" => "__builtin_HEXAGON_A2_vmaxw", + "llvm.hexagon.A2.vminb" => "__builtin_HEXAGON_A2_vminb", + "llvm.hexagon.A2.vminh" => "__builtin_HEXAGON_A2_vminh", + "llvm.hexagon.A2.vminub" => "__builtin_HEXAGON_A2_vminub", + "llvm.hexagon.A2.vminuh" => "__builtin_HEXAGON_A2_vminuh", + "llvm.hexagon.A2.vminuw" => "__builtin_HEXAGON_A2_vminuw", + "llvm.hexagon.A2.vminw" => "__builtin_HEXAGON_A2_vminw", + "llvm.hexagon.A2.vnavgh" => "__builtin_HEXAGON_A2_vnavgh", + "llvm.hexagon.A2.vnavghcr" => "__builtin_HEXAGON_A2_vnavghcr", + "llvm.hexagon.A2.vnavghr" => "__builtin_HEXAGON_A2_vnavghr", + "llvm.hexagon.A2.vnavgw" => "__builtin_HEXAGON_A2_vnavgw", + "llvm.hexagon.A2.vnavgwcr" => "__builtin_HEXAGON_A2_vnavgwcr", + "llvm.hexagon.A2.vnavgwr" => "__builtin_HEXAGON_A2_vnavgwr", + "llvm.hexagon.A2.vraddub" => "__builtin_HEXAGON_A2_vraddub", + "llvm.hexagon.A2.vraddub.acc" => "__builtin_HEXAGON_A2_vraddub_acc", + "llvm.hexagon.A2.vrsadub" => "__builtin_HEXAGON_A2_vrsadub", + "llvm.hexagon.A2.vrsadub.acc" => "__builtin_HEXAGON_A2_vrsadub_acc", + "llvm.hexagon.A2.vsubb.map" => "__builtin_HEXAGON_A2_vsubb_map", + "llvm.hexagon.A2.vsubh" => "__builtin_HEXAGON_A2_vsubh", + "llvm.hexagon.A2.vsubhs" => "__builtin_HEXAGON_A2_vsubhs", + "llvm.hexagon.A2.vsubub" => "__builtin_HEXAGON_A2_vsubub", + "llvm.hexagon.A2.vsububs" => "__builtin_HEXAGON_A2_vsububs", + "llvm.hexagon.A2.vsubuhs" => "__builtin_HEXAGON_A2_vsubuhs", + "llvm.hexagon.A2.vsubw" => "__builtin_HEXAGON_A2_vsubw", + "llvm.hexagon.A2.vsubws" => "__builtin_HEXAGON_A2_vsubws", + "llvm.hexagon.A2.xor" => "__builtin_HEXAGON_A2_xor", + "llvm.hexagon.A2.xorp" => "__builtin_HEXAGON_A2_xorp", + "llvm.hexagon.A2.zxtb" => "__builtin_HEXAGON_A2_zxtb", + "llvm.hexagon.A2.zxth" => "__builtin_HEXAGON_A2_zxth", + "llvm.hexagon.A4.andn" => "__builtin_HEXAGON_A4_andn", + "llvm.hexagon.A4.andnp" => "__builtin_HEXAGON_A4_andnp", + "llvm.hexagon.A4.bitsplit" => "__builtin_HEXAGON_A4_bitsplit", + "llvm.hexagon.A4.bitspliti" => "__builtin_HEXAGON_A4_bitspliti", + "llvm.hexagon.A4.boundscheck" => "__builtin_HEXAGON_A4_boundscheck", + "llvm.hexagon.A4.cmpbeq" => "__builtin_HEXAGON_A4_cmpbeq", + "llvm.hexagon.A4.cmpbeqi" => "__builtin_HEXAGON_A4_cmpbeqi", + "llvm.hexagon.A4.cmpbgt" => "__builtin_HEXAGON_A4_cmpbgt", + "llvm.hexagon.A4.cmpbgti" => "__builtin_HEXAGON_A4_cmpbgti", + "llvm.hexagon.A4.cmpbgtu" => "__builtin_HEXAGON_A4_cmpbgtu", + "llvm.hexagon.A4.cmpbgtui" => "__builtin_HEXAGON_A4_cmpbgtui", + "llvm.hexagon.A4.cmpheq" => "__builtin_HEXAGON_A4_cmpheq", + "llvm.hexagon.A4.cmpheqi" => "__builtin_HEXAGON_A4_cmpheqi", + "llvm.hexagon.A4.cmphgt" => "__builtin_HEXAGON_A4_cmphgt", + "llvm.hexagon.A4.cmphgti" => "__builtin_HEXAGON_A4_cmphgti", + "llvm.hexagon.A4.cmphgtu" => "__builtin_HEXAGON_A4_cmphgtu", + "llvm.hexagon.A4.cmphgtui" => "__builtin_HEXAGON_A4_cmphgtui", + "llvm.hexagon.A4.combineir" => "__builtin_HEXAGON_A4_combineir", + "llvm.hexagon.A4.combineri" => "__builtin_HEXAGON_A4_combineri", + "llvm.hexagon.A4.cround.ri" => "__builtin_HEXAGON_A4_cround_ri", + "llvm.hexagon.A4.cround.rr" => "__builtin_HEXAGON_A4_cround_rr", + "llvm.hexagon.A4.modwrapu" => "__builtin_HEXAGON_A4_modwrapu", + "llvm.hexagon.A4.orn" => "__builtin_HEXAGON_A4_orn", + "llvm.hexagon.A4.ornp" => "__builtin_HEXAGON_A4_ornp", + "llvm.hexagon.A4.rcmpeq" => "__builtin_HEXAGON_A4_rcmpeq", + "llvm.hexagon.A4.rcmpeqi" => "__builtin_HEXAGON_A4_rcmpeqi", + "llvm.hexagon.A4.rcmpneq" => "__builtin_HEXAGON_A4_rcmpneq", + "llvm.hexagon.A4.rcmpneqi" => "__builtin_HEXAGON_A4_rcmpneqi", + "llvm.hexagon.A4.round.ri" => "__builtin_HEXAGON_A4_round_ri", + "llvm.hexagon.A4.round.ri.sat" => "__builtin_HEXAGON_A4_round_ri_sat", + "llvm.hexagon.A4.round.rr" => "__builtin_HEXAGON_A4_round_rr", + "llvm.hexagon.A4.round.rr.sat" => "__builtin_HEXAGON_A4_round_rr_sat", + "llvm.hexagon.A4.tlbmatch" => "__builtin_HEXAGON_A4_tlbmatch", + "llvm.hexagon.A4.vcmpbeq.any" => "__builtin_HEXAGON_A4_vcmpbeq_any", + "llvm.hexagon.A4.vcmpbeqi" => "__builtin_HEXAGON_A4_vcmpbeqi", + "llvm.hexagon.A4.vcmpbgt" => "__builtin_HEXAGON_A4_vcmpbgt", + "llvm.hexagon.A4.vcmpbgti" => "__builtin_HEXAGON_A4_vcmpbgti", + "llvm.hexagon.A4.vcmpbgtui" => "__builtin_HEXAGON_A4_vcmpbgtui", + "llvm.hexagon.A4.vcmpheqi" => "__builtin_HEXAGON_A4_vcmpheqi", + "llvm.hexagon.A4.vcmphgti" => "__builtin_HEXAGON_A4_vcmphgti", + "llvm.hexagon.A4.vcmphgtui" => "__builtin_HEXAGON_A4_vcmphgtui", + "llvm.hexagon.A4.vcmpweqi" => "__builtin_HEXAGON_A4_vcmpweqi", + "llvm.hexagon.A4.vcmpwgti" => "__builtin_HEXAGON_A4_vcmpwgti", + "llvm.hexagon.A4.vcmpwgtui" => "__builtin_HEXAGON_A4_vcmpwgtui", + "llvm.hexagon.A4.vrmaxh" => "__builtin_HEXAGON_A4_vrmaxh", + "llvm.hexagon.A4.vrmaxuh" => "__builtin_HEXAGON_A4_vrmaxuh", + "llvm.hexagon.A4.vrmaxuw" => "__builtin_HEXAGON_A4_vrmaxuw", + "llvm.hexagon.A4.vrmaxw" => "__builtin_HEXAGON_A4_vrmaxw", + "llvm.hexagon.A4.vrminh" => "__builtin_HEXAGON_A4_vrminh", + "llvm.hexagon.A4.vrminuh" => "__builtin_HEXAGON_A4_vrminuh", + "llvm.hexagon.A4.vrminuw" => "__builtin_HEXAGON_A4_vrminuw", + "llvm.hexagon.A4.vrminw" => "__builtin_HEXAGON_A4_vrminw", + "llvm.hexagon.A5.vaddhubs" => "__builtin_HEXAGON_A5_vaddhubs", + "llvm.hexagon.A6.vcmpbeq.notany" => "__builtin_HEXAGON_A6_vcmpbeq_notany", + "llvm.hexagon.A7.clip" => "__builtin_HEXAGON_A7_clip", + "llvm.hexagon.A7.croundd.ri" => "__builtin_HEXAGON_A7_croundd_ri", + "llvm.hexagon.A7.croundd.rr" => "__builtin_HEXAGON_A7_croundd_rr", + "llvm.hexagon.A7.vclip" => "__builtin_HEXAGON_A7_vclip", + "llvm.hexagon.C2.all8" => "__builtin_HEXAGON_C2_all8", + "llvm.hexagon.C2.and" => "__builtin_HEXAGON_C2_and", + "llvm.hexagon.C2.andn" => "__builtin_HEXAGON_C2_andn", + "llvm.hexagon.C2.any8" => "__builtin_HEXAGON_C2_any8", + "llvm.hexagon.C2.bitsclr" => "__builtin_HEXAGON_C2_bitsclr", + "llvm.hexagon.C2.bitsclri" => "__builtin_HEXAGON_C2_bitsclri", + "llvm.hexagon.C2.bitsset" => "__builtin_HEXAGON_C2_bitsset", + "llvm.hexagon.C2.cmpeq" => "__builtin_HEXAGON_C2_cmpeq", + "llvm.hexagon.C2.cmpeqi" => "__builtin_HEXAGON_C2_cmpeqi", + "llvm.hexagon.C2.cmpeqp" => "__builtin_HEXAGON_C2_cmpeqp", + "llvm.hexagon.C2.cmpgei" => "__builtin_HEXAGON_C2_cmpgei", + "llvm.hexagon.C2.cmpgeui" => "__builtin_HEXAGON_C2_cmpgeui", + "llvm.hexagon.C2.cmpgt" => "__builtin_HEXAGON_C2_cmpgt", + "llvm.hexagon.C2.cmpgti" => "__builtin_HEXAGON_C2_cmpgti", + "llvm.hexagon.C2.cmpgtp" => "__builtin_HEXAGON_C2_cmpgtp", + "llvm.hexagon.C2.cmpgtu" => "__builtin_HEXAGON_C2_cmpgtu", + "llvm.hexagon.C2.cmpgtui" => "__builtin_HEXAGON_C2_cmpgtui", + "llvm.hexagon.C2.cmpgtup" => "__builtin_HEXAGON_C2_cmpgtup", + "llvm.hexagon.C2.cmplt" => "__builtin_HEXAGON_C2_cmplt", + "llvm.hexagon.C2.cmpltu" => "__builtin_HEXAGON_C2_cmpltu", + "llvm.hexagon.C2.mask" => "__builtin_HEXAGON_C2_mask", + "llvm.hexagon.C2.mux" => "__builtin_HEXAGON_C2_mux", + "llvm.hexagon.C2.muxii" => "__builtin_HEXAGON_C2_muxii", + "llvm.hexagon.C2.muxir" => "__builtin_HEXAGON_C2_muxir", + "llvm.hexagon.C2.muxri" => "__builtin_HEXAGON_C2_muxri", + "llvm.hexagon.C2.not" => "__builtin_HEXAGON_C2_not", + "llvm.hexagon.C2.or" => "__builtin_HEXAGON_C2_or", + "llvm.hexagon.C2.orn" => "__builtin_HEXAGON_C2_orn", + "llvm.hexagon.C2.pxfer.map" => "__builtin_HEXAGON_C2_pxfer_map", + "llvm.hexagon.C2.tfrpr" => "__builtin_HEXAGON_C2_tfrpr", + "llvm.hexagon.C2.tfrrp" => "__builtin_HEXAGON_C2_tfrrp", + "llvm.hexagon.C2.vitpack" => "__builtin_HEXAGON_C2_vitpack", + "llvm.hexagon.C2.vmux" => "__builtin_HEXAGON_C2_vmux", + "llvm.hexagon.C2.xor" => "__builtin_HEXAGON_C2_xor", + "llvm.hexagon.C4.and.and" => "__builtin_HEXAGON_C4_and_and", + "llvm.hexagon.C4.and.andn" => "__builtin_HEXAGON_C4_and_andn", + "llvm.hexagon.C4.and.or" => "__builtin_HEXAGON_C4_and_or", + "llvm.hexagon.C4.and.orn" => "__builtin_HEXAGON_C4_and_orn", + "llvm.hexagon.C4.cmplte" => "__builtin_HEXAGON_C4_cmplte", + "llvm.hexagon.C4.cmpltei" => "__builtin_HEXAGON_C4_cmpltei", + "llvm.hexagon.C4.cmplteu" => "__builtin_HEXAGON_C4_cmplteu", + "llvm.hexagon.C4.cmplteui" => "__builtin_HEXAGON_C4_cmplteui", + "llvm.hexagon.C4.cmpneq" => "__builtin_HEXAGON_C4_cmpneq", + "llvm.hexagon.C4.cmpneqi" => "__builtin_HEXAGON_C4_cmpneqi", + "llvm.hexagon.C4.fastcorner9" => "__builtin_HEXAGON_C4_fastcorner9", + "llvm.hexagon.C4.fastcorner9.not" => "__builtin_HEXAGON_C4_fastcorner9_not", + "llvm.hexagon.C4.nbitsclr" => "__builtin_HEXAGON_C4_nbitsclr", + "llvm.hexagon.C4.nbitsclri" => "__builtin_HEXAGON_C4_nbitsclri", + "llvm.hexagon.C4.nbitsset" => "__builtin_HEXAGON_C4_nbitsset", + "llvm.hexagon.C4.or.and" => "__builtin_HEXAGON_C4_or_and", + "llvm.hexagon.C4.or.andn" => "__builtin_HEXAGON_C4_or_andn", + "llvm.hexagon.C4.or.or" => "__builtin_HEXAGON_C4_or_or", + "llvm.hexagon.C4.or.orn" => "__builtin_HEXAGON_C4_or_orn", + "llvm.hexagon.F2.conv.d2df" => "__builtin_HEXAGON_F2_conv_d2df", + "llvm.hexagon.F2.conv.d2sf" => "__builtin_HEXAGON_F2_conv_d2sf", + "llvm.hexagon.F2.conv.df2d" => "__builtin_HEXAGON_F2_conv_df2d", + "llvm.hexagon.F2.conv.df2d.chop" => "__builtin_HEXAGON_F2_conv_df2d_chop", + "llvm.hexagon.F2.conv.df2sf" => "__builtin_HEXAGON_F2_conv_df2sf", + "llvm.hexagon.F2.conv.df2ud" => "__builtin_HEXAGON_F2_conv_df2ud", + "llvm.hexagon.F2.conv.df2ud.chop" => "__builtin_HEXAGON_F2_conv_df2ud_chop", + "llvm.hexagon.F2.conv.df2uw" => "__builtin_HEXAGON_F2_conv_df2uw", + "llvm.hexagon.F2.conv.df2uw.chop" => "__builtin_HEXAGON_F2_conv_df2uw_chop", + "llvm.hexagon.F2.conv.df2w" => "__builtin_HEXAGON_F2_conv_df2w", + "llvm.hexagon.F2.conv.df2w.chop" => "__builtin_HEXAGON_F2_conv_df2w_chop", + "llvm.hexagon.F2.conv.sf2d" => "__builtin_HEXAGON_F2_conv_sf2d", + "llvm.hexagon.F2.conv.sf2d.chop" => "__builtin_HEXAGON_F2_conv_sf2d_chop", + "llvm.hexagon.F2.conv.sf2df" => "__builtin_HEXAGON_F2_conv_sf2df", + "llvm.hexagon.F2.conv.sf2ud" => "__builtin_HEXAGON_F2_conv_sf2ud", + "llvm.hexagon.F2.conv.sf2ud.chop" => "__builtin_HEXAGON_F2_conv_sf2ud_chop", + "llvm.hexagon.F2.conv.sf2uw" => "__builtin_HEXAGON_F2_conv_sf2uw", + "llvm.hexagon.F2.conv.sf2uw.chop" => "__builtin_HEXAGON_F2_conv_sf2uw_chop", + "llvm.hexagon.F2.conv.sf2w" => "__builtin_HEXAGON_F2_conv_sf2w", + "llvm.hexagon.F2.conv.sf2w.chop" => "__builtin_HEXAGON_F2_conv_sf2w_chop", + "llvm.hexagon.F2.conv.ud2df" => "__builtin_HEXAGON_F2_conv_ud2df", + "llvm.hexagon.F2.conv.ud2sf" => "__builtin_HEXAGON_F2_conv_ud2sf", + "llvm.hexagon.F2.conv.uw2df" => "__builtin_HEXAGON_F2_conv_uw2df", + "llvm.hexagon.F2.conv.uw2sf" => "__builtin_HEXAGON_F2_conv_uw2sf", + "llvm.hexagon.F2.conv.w2df" => "__builtin_HEXAGON_F2_conv_w2df", + "llvm.hexagon.F2.conv.w2sf" => "__builtin_HEXAGON_F2_conv_w2sf", + "llvm.hexagon.F2.dfadd" => "__builtin_HEXAGON_F2_dfadd", + "llvm.hexagon.F2.dfclass" => "__builtin_HEXAGON_F2_dfclass", + "llvm.hexagon.F2.dfcmpeq" => "__builtin_HEXAGON_F2_dfcmpeq", + "llvm.hexagon.F2.dfcmpge" => "__builtin_HEXAGON_F2_dfcmpge", + "llvm.hexagon.F2.dfcmpgt" => "__builtin_HEXAGON_F2_dfcmpgt", + "llvm.hexagon.F2.dfcmpuo" => "__builtin_HEXAGON_F2_dfcmpuo", + "llvm.hexagon.F2.dffixupd" => "__builtin_HEXAGON_F2_dffixupd", + "llvm.hexagon.F2.dffixupn" => "__builtin_HEXAGON_F2_dffixupn", + "llvm.hexagon.F2.dffixupr" => "__builtin_HEXAGON_F2_dffixupr", + "llvm.hexagon.F2.dffma" => "__builtin_HEXAGON_F2_dffma", + "llvm.hexagon.F2.dffma.lib" => "__builtin_HEXAGON_F2_dffma_lib", + "llvm.hexagon.F2.dffma.sc" => "__builtin_HEXAGON_F2_dffma_sc", + "llvm.hexagon.F2.dffms" => "__builtin_HEXAGON_F2_dffms", + "llvm.hexagon.F2.dffms.lib" => "__builtin_HEXAGON_F2_dffms_lib", + "llvm.hexagon.F2.dfimm.n" => "__builtin_HEXAGON_F2_dfimm_n", + "llvm.hexagon.F2.dfimm.p" => "__builtin_HEXAGON_F2_dfimm_p", + "llvm.hexagon.F2.dfmax" => "__builtin_HEXAGON_F2_dfmax", + "llvm.hexagon.F2.dfmin" => "__builtin_HEXAGON_F2_dfmin", + "llvm.hexagon.F2.dfmpy" => "__builtin_HEXAGON_F2_dfmpy", + "llvm.hexagon.F2.dfmpyfix" => "__builtin_HEXAGON_F2_dfmpyfix", + "llvm.hexagon.F2.dfmpyhh" => "__builtin_HEXAGON_F2_dfmpyhh", + "llvm.hexagon.F2.dfmpylh" => "__builtin_HEXAGON_F2_dfmpylh", + "llvm.hexagon.F2.dfmpyll" => "__builtin_HEXAGON_F2_dfmpyll", + "llvm.hexagon.F2.dfsub" => "__builtin_HEXAGON_F2_dfsub", + "llvm.hexagon.F2.sfadd" => "__builtin_HEXAGON_F2_sfadd", + "llvm.hexagon.F2.sfclass" => "__builtin_HEXAGON_F2_sfclass", + "llvm.hexagon.F2.sfcmpeq" => "__builtin_HEXAGON_F2_sfcmpeq", + "llvm.hexagon.F2.sfcmpge" => "__builtin_HEXAGON_F2_sfcmpge", + "llvm.hexagon.F2.sfcmpgt" => "__builtin_HEXAGON_F2_sfcmpgt", + "llvm.hexagon.F2.sfcmpuo" => "__builtin_HEXAGON_F2_sfcmpuo", + "llvm.hexagon.F2.sffixupd" => "__builtin_HEXAGON_F2_sffixupd", + "llvm.hexagon.F2.sffixupn" => "__builtin_HEXAGON_F2_sffixupn", + "llvm.hexagon.F2.sffixupr" => "__builtin_HEXAGON_F2_sffixupr", + "llvm.hexagon.F2.sffma" => "__builtin_HEXAGON_F2_sffma", + "llvm.hexagon.F2.sffma.lib" => "__builtin_HEXAGON_F2_sffma_lib", + "llvm.hexagon.F2.sffma.sc" => "__builtin_HEXAGON_F2_sffma_sc", + "llvm.hexagon.F2.sffms" => "__builtin_HEXAGON_F2_sffms", + "llvm.hexagon.F2.sffms.lib" => "__builtin_HEXAGON_F2_sffms_lib", + "llvm.hexagon.F2.sfimm.n" => "__builtin_HEXAGON_F2_sfimm_n", + "llvm.hexagon.F2.sfimm.p" => "__builtin_HEXAGON_F2_sfimm_p", + "llvm.hexagon.F2.sfmax" => "__builtin_HEXAGON_F2_sfmax", + "llvm.hexagon.F2.sfmin" => "__builtin_HEXAGON_F2_sfmin", + "llvm.hexagon.F2.sfmpy" => "__builtin_HEXAGON_F2_sfmpy", + "llvm.hexagon.F2.sfsub" => "__builtin_HEXAGON_F2_sfsub", + "llvm.hexagon.L2.loadw.locked" => "__builtin_HEXAGON_L2_loadw_locked", + "llvm.hexagon.L4.loadd.locked" => "__builtin__HEXAGON_L4_loadd_locked", + "llvm.hexagon.M2.acci" => "__builtin_HEXAGON_M2_acci", + "llvm.hexagon.M2.accii" => "__builtin_HEXAGON_M2_accii", + "llvm.hexagon.M2.cmaci.s0" => "__builtin_HEXAGON_M2_cmaci_s0", + "llvm.hexagon.M2.cmacr.s0" => "__builtin_HEXAGON_M2_cmacr_s0", + "llvm.hexagon.M2.cmacs.s0" => "__builtin_HEXAGON_M2_cmacs_s0", + "llvm.hexagon.M2.cmacs.s1" => "__builtin_HEXAGON_M2_cmacs_s1", + "llvm.hexagon.M2.cmacsc.s0" => "__builtin_HEXAGON_M2_cmacsc_s0", + "llvm.hexagon.M2.cmacsc.s1" => "__builtin_HEXAGON_M2_cmacsc_s1", + "llvm.hexagon.M2.cmpyi.s0" => "__builtin_HEXAGON_M2_cmpyi_s0", + "llvm.hexagon.M2.cmpyr.s0" => "__builtin_HEXAGON_M2_cmpyr_s0", + "llvm.hexagon.M2.cmpyrs.s0" => "__builtin_HEXAGON_M2_cmpyrs_s0", + "llvm.hexagon.M2.cmpyrs.s1" => "__builtin_HEXAGON_M2_cmpyrs_s1", + "llvm.hexagon.M2.cmpyrsc.s0" => "__builtin_HEXAGON_M2_cmpyrsc_s0", + "llvm.hexagon.M2.cmpyrsc.s1" => "__builtin_HEXAGON_M2_cmpyrsc_s1", + "llvm.hexagon.M2.cmpys.s0" => "__builtin_HEXAGON_M2_cmpys_s0", + "llvm.hexagon.M2.cmpys.s1" => "__builtin_HEXAGON_M2_cmpys_s1", + "llvm.hexagon.M2.cmpysc.s0" => "__builtin_HEXAGON_M2_cmpysc_s0", + "llvm.hexagon.M2.cmpysc.s1" => "__builtin_HEXAGON_M2_cmpysc_s1", + "llvm.hexagon.M2.cnacs.s0" => "__builtin_HEXAGON_M2_cnacs_s0", + "llvm.hexagon.M2.cnacs.s1" => "__builtin_HEXAGON_M2_cnacs_s1", + "llvm.hexagon.M2.cnacsc.s0" => "__builtin_HEXAGON_M2_cnacsc_s0", + "llvm.hexagon.M2.cnacsc.s1" => "__builtin_HEXAGON_M2_cnacsc_s1", + "llvm.hexagon.M2.dpmpyss.acc.s0" => "__builtin_HEXAGON_M2_dpmpyss_acc_s0", + "llvm.hexagon.M2.dpmpyss.nac.s0" => "__builtin_HEXAGON_M2_dpmpyss_nac_s0", + "llvm.hexagon.M2.dpmpyss.rnd.s0" => "__builtin_HEXAGON_M2_dpmpyss_rnd_s0", + "llvm.hexagon.M2.dpmpyss.s0" => "__builtin_HEXAGON_M2_dpmpyss_s0", + "llvm.hexagon.M2.dpmpyuu.acc.s0" => "__builtin_HEXAGON_M2_dpmpyuu_acc_s0", + "llvm.hexagon.M2.dpmpyuu.nac.s0" => "__builtin_HEXAGON_M2_dpmpyuu_nac_s0", + "llvm.hexagon.M2.dpmpyuu.s0" => "__builtin_HEXAGON_M2_dpmpyuu_s0", + "llvm.hexagon.M2.hmmpyh.rs1" => "__builtin_HEXAGON_M2_hmmpyh_rs1", + "llvm.hexagon.M2.hmmpyh.s1" => "__builtin_HEXAGON_M2_hmmpyh_s1", + "llvm.hexagon.M2.hmmpyl.rs1" => "__builtin_HEXAGON_M2_hmmpyl_rs1", + "llvm.hexagon.M2.hmmpyl.s1" => "__builtin_HEXAGON_M2_hmmpyl_s1", + "llvm.hexagon.M2.maci" => "__builtin_HEXAGON_M2_maci", + "llvm.hexagon.M2.macsin" => "__builtin_HEXAGON_M2_macsin", + "llvm.hexagon.M2.macsip" => "__builtin_HEXAGON_M2_macsip", + "llvm.hexagon.M2.mmachs.rs0" => "__builtin_HEXAGON_M2_mmachs_rs0", + "llvm.hexagon.M2.mmachs.rs1" => "__builtin_HEXAGON_M2_mmachs_rs1", + "llvm.hexagon.M2.mmachs.s0" => "__builtin_HEXAGON_M2_mmachs_s0", + "llvm.hexagon.M2.mmachs.s1" => "__builtin_HEXAGON_M2_mmachs_s1", + "llvm.hexagon.M2.mmacls.rs0" => "__builtin_HEXAGON_M2_mmacls_rs0", + "llvm.hexagon.M2.mmacls.rs1" => "__builtin_HEXAGON_M2_mmacls_rs1", + "llvm.hexagon.M2.mmacls.s0" => "__builtin_HEXAGON_M2_mmacls_s0", + "llvm.hexagon.M2.mmacls.s1" => "__builtin_HEXAGON_M2_mmacls_s1", + "llvm.hexagon.M2.mmacuhs.rs0" => "__builtin_HEXAGON_M2_mmacuhs_rs0", + "llvm.hexagon.M2.mmacuhs.rs1" => "__builtin_HEXAGON_M2_mmacuhs_rs1", + "llvm.hexagon.M2.mmacuhs.s0" => "__builtin_HEXAGON_M2_mmacuhs_s0", + "llvm.hexagon.M2.mmacuhs.s1" => "__builtin_HEXAGON_M2_mmacuhs_s1", + "llvm.hexagon.M2.mmaculs.rs0" => "__builtin_HEXAGON_M2_mmaculs_rs0", + "llvm.hexagon.M2.mmaculs.rs1" => "__builtin_HEXAGON_M2_mmaculs_rs1", + "llvm.hexagon.M2.mmaculs.s0" => "__builtin_HEXAGON_M2_mmaculs_s0", + "llvm.hexagon.M2.mmaculs.s1" => "__builtin_HEXAGON_M2_mmaculs_s1", + "llvm.hexagon.M2.mmpyh.rs0" => "__builtin_HEXAGON_M2_mmpyh_rs0", + "llvm.hexagon.M2.mmpyh.rs1" => "__builtin_HEXAGON_M2_mmpyh_rs1", + "llvm.hexagon.M2.mmpyh.s0" => "__builtin_HEXAGON_M2_mmpyh_s0", + "llvm.hexagon.M2.mmpyh.s1" => "__builtin_HEXAGON_M2_mmpyh_s1", + "llvm.hexagon.M2.mmpyl.rs0" => "__builtin_HEXAGON_M2_mmpyl_rs0", + "llvm.hexagon.M2.mmpyl.rs1" => "__builtin_HEXAGON_M2_mmpyl_rs1", + "llvm.hexagon.M2.mmpyl.s0" => "__builtin_HEXAGON_M2_mmpyl_s0", + "llvm.hexagon.M2.mmpyl.s1" => "__builtin_HEXAGON_M2_mmpyl_s1", + "llvm.hexagon.M2.mmpyuh.rs0" => "__builtin_HEXAGON_M2_mmpyuh_rs0", + "llvm.hexagon.M2.mmpyuh.rs1" => "__builtin_HEXAGON_M2_mmpyuh_rs1", + "llvm.hexagon.M2.mmpyuh.s0" => "__builtin_HEXAGON_M2_mmpyuh_s0", + "llvm.hexagon.M2.mmpyuh.s1" => "__builtin_HEXAGON_M2_mmpyuh_s1", + "llvm.hexagon.M2.mmpyul.rs0" => "__builtin_HEXAGON_M2_mmpyul_rs0", + "llvm.hexagon.M2.mmpyul.rs1" => "__builtin_HEXAGON_M2_mmpyul_rs1", + "llvm.hexagon.M2.mmpyul.s0" => "__builtin_HEXAGON_M2_mmpyul_s0", + "llvm.hexagon.M2.mmpyul.s1" => "__builtin_HEXAGON_M2_mmpyul_s1", + "llvm.hexagon.M2.mnaci" => "__builtin_HEXAGON_M2_mnaci", + "llvm.hexagon.M2.mpy.acc.hh.s0" => "__builtin_HEXAGON_M2_mpy_acc_hh_s0", + "llvm.hexagon.M2.mpy.acc.hh.s1" => "__builtin_HEXAGON_M2_mpy_acc_hh_s1", + "llvm.hexagon.M2.mpy.acc.hl.s0" => "__builtin_HEXAGON_M2_mpy_acc_hl_s0", + "llvm.hexagon.M2.mpy.acc.hl.s1" => "__builtin_HEXAGON_M2_mpy_acc_hl_s1", + "llvm.hexagon.M2.mpy.acc.lh.s0" => "__builtin_HEXAGON_M2_mpy_acc_lh_s0", + "llvm.hexagon.M2.mpy.acc.lh.s1" => "__builtin_HEXAGON_M2_mpy_acc_lh_s1", + "llvm.hexagon.M2.mpy.acc.ll.s0" => "__builtin_HEXAGON_M2_mpy_acc_ll_s0", + "llvm.hexagon.M2.mpy.acc.ll.s1" => "__builtin_HEXAGON_M2_mpy_acc_ll_s1", + "llvm.hexagon.M2.mpy.acc.sat.hh.s0" => "__builtin_HEXAGON_M2_mpy_acc_sat_hh_s0", + "llvm.hexagon.M2.mpy.acc.sat.hh.s1" => "__builtin_HEXAGON_M2_mpy_acc_sat_hh_s1", + "llvm.hexagon.M2.mpy.acc.sat.hl.s0" => "__builtin_HEXAGON_M2_mpy_acc_sat_hl_s0", + "llvm.hexagon.M2.mpy.acc.sat.hl.s1" => "__builtin_HEXAGON_M2_mpy_acc_sat_hl_s1", + "llvm.hexagon.M2.mpy.acc.sat.lh.s0" => "__builtin_HEXAGON_M2_mpy_acc_sat_lh_s0", + "llvm.hexagon.M2.mpy.acc.sat.lh.s1" => "__builtin_HEXAGON_M2_mpy_acc_sat_lh_s1", + "llvm.hexagon.M2.mpy.acc.sat.ll.s0" => "__builtin_HEXAGON_M2_mpy_acc_sat_ll_s0", + "llvm.hexagon.M2.mpy.acc.sat.ll.s1" => "__builtin_HEXAGON_M2_mpy_acc_sat_ll_s1", + "llvm.hexagon.M2.mpy.hh.s0" => "__builtin_HEXAGON_M2_mpy_hh_s0", + "llvm.hexagon.M2.mpy.hh.s1" => "__builtin_HEXAGON_M2_mpy_hh_s1", + "llvm.hexagon.M2.mpy.hl.s0" => "__builtin_HEXAGON_M2_mpy_hl_s0", + "llvm.hexagon.M2.mpy.hl.s1" => "__builtin_HEXAGON_M2_mpy_hl_s1", + "llvm.hexagon.M2.mpy.lh.s0" => "__builtin_HEXAGON_M2_mpy_lh_s0", + "llvm.hexagon.M2.mpy.lh.s1" => "__builtin_HEXAGON_M2_mpy_lh_s1", + "llvm.hexagon.M2.mpy.ll.s0" => "__builtin_HEXAGON_M2_mpy_ll_s0", + "llvm.hexagon.M2.mpy.ll.s1" => "__builtin_HEXAGON_M2_mpy_ll_s1", + "llvm.hexagon.M2.mpy.nac.hh.s0" => "__builtin_HEXAGON_M2_mpy_nac_hh_s0", + "llvm.hexagon.M2.mpy.nac.hh.s1" => "__builtin_HEXAGON_M2_mpy_nac_hh_s1", + "llvm.hexagon.M2.mpy.nac.hl.s0" => "__builtin_HEXAGON_M2_mpy_nac_hl_s0", + "llvm.hexagon.M2.mpy.nac.hl.s1" => "__builtin_HEXAGON_M2_mpy_nac_hl_s1", + "llvm.hexagon.M2.mpy.nac.lh.s0" => "__builtin_HEXAGON_M2_mpy_nac_lh_s0", + "llvm.hexagon.M2.mpy.nac.lh.s1" => "__builtin_HEXAGON_M2_mpy_nac_lh_s1", + "llvm.hexagon.M2.mpy.nac.ll.s0" => "__builtin_HEXAGON_M2_mpy_nac_ll_s0", + "llvm.hexagon.M2.mpy.nac.ll.s1" => "__builtin_HEXAGON_M2_mpy_nac_ll_s1", + "llvm.hexagon.M2.mpy.nac.sat.hh.s0" => "__builtin_HEXAGON_M2_mpy_nac_sat_hh_s0", + "llvm.hexagon.M2.mpy.nac.sat.hh.s1" => "__builtin_HEXAGON_M2_mpy_nac_sat_hh_s1", + "llvm.hexagon.M2.mpy.nac.sat.hl.s0" => "__builtin_HEXAGON_M2_mpy_nac_sat_hl_s0", + "llvm.hexagon.M2.mpy.nac.sat.hl.s1" => "__builtin_HEXAGON_M2_mpy_nac_sat_hl_s1", + "llvm.hexagon.M2.mpy.nac.sat.lh.s0" => "__builtin_HEXAGON_M2_mpy_nac_sat_lh_s0", + "llvm.hexagon.M2.mpy.nac.sat.lh.s1" => "__builtin_HEXAGON_M2_mpy_nac_sat_lh_s1", + "llvm.hexagon.M2.mpy.nac.sat.ll.s0" => "__builtin_HEXAGON_M2_mpy_nac_sat_ll_s0", + "llvm.hexagon.M2.mpy.nac.sat.ll.s1" => "__builtin_HEXAGON_M2_mpy_nac_sat_ll_s1", + "llvm.hexagon.M2.mpy.rnd.hh.s0" => "__builtin_HEXAGON_M2_mpy_rnd_hh_s0", + "llvm.hexagon.M2.mpy.rnd.hh.s1" => "__builtin_HEXAGON_M2_mpy_rnd_hh_s1", + "llvm.hexagon.M2.mpy.rnd.hl.s0" => "__builtin_HEXAGON_M2_mpy_rnd_hl_s0", + "llvm.hexagon.M2.mpy.rnd.hl.s1" => "__builtin_HEXAGON_M2_mpy_rnd_hl_s1", + "llvm.hexagon.M2.mpy.rnd.lh.s0" => "__builtin_HEXAGON_M2_mpy_rnd_lh_s0", + "llvm.hexagon.M2.mpy.rnd.lh.s1" => "__builtin_HEXAGON_M2_mpy_rnd_lh_s1", + "llvm.hexagon.M2.mpy.rnd.ll.s0" => "__builtin_HEXAGON_M2_mpy_rnd_ll_s0", + "llvm.hexagon.M2.mpy.rnd.ll.s1" => "__builtin_HEXAGON_M2_mpy_rnd_ll_s1", + "llvm.hexagon.M2.mpy.sat.hh.s0" => "__builtin_HEXAGON_M2_mpy_sat_hh_s0", + "llvm.hexagon.M2.mpy.sat.hh.s1" => "__builtin_HEXAGON_M2_mpy_sat_hh_s1", + "llvm.hexagon.M2.mpy.sat.hl.s0" => "__builtin_HEXAGON_M2_mpy_sat_hl_s0", + "llvm.hexagon.M2.mpy.sat.hl.s1" => "__builtin_HEXAGON_M2_mpy_sat_hl_s1", + "llvm.hexagon.M2.mpy.sat.lh.s0" => "__builtin_HEXAGON_M2_mpy_sat_lh_s0", + "llvm.hexagon.M2.mpy.sat.lh.s1" => "__builtin_HEXAGON_M2_mpy_sat_lh_s1", + "llvm.hexagon.M2.mpy.sat.ll.s0" => "__builtin_HEXAGON_M2_mpy_sat_ll_s0", + "llvm.hexagon.M2.mpy.sat.ll.s1" => "__builtin_HEXAGON_M2_mpy_sat_ll_s1", + "llvm.hexagon.M2.mpy.sat.rnd.hh.s0" => "__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0", + "llvm.hexagon.M2.mpy.sat.rnd.hh.s1" => "__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1", + "llvm.hexagon.M2.mpy.sat.rnd.hl.s0" => "__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0", + "llvm.hexagon.M2.mpy.sat.rnd.hl.s1" => "__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1", + "llvm.hexagon.M2.mpy.sat.rnd.lh.s0" => "__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0", + "llvm.hexagon.M2.mpy.sat.rnd.lh.s1" => "__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1", + "llvm.hexagon.M2.mpy.sat.rnd.ll.s0" => "__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0", + "llvm.hexagon.M2.mpy.sat.rnd.ll.s1" => "__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1", + "llvm.hexagon.M2.mpy.up" => "__builtin_HEXAGON_M2_mpy_up", + "llvm.hexagon.M2.mpy.up.s1" => "__builtin_HEXAGON_M2_mpy_up_s1", + "llvm.hexagon.M2.mpy.up.s1.sat" => "__builtin_HEXAGON_M2_mpy_up_s1_sat", + "llvm.hexagon.M2.mpyd.acc.hh.s0" => "__builtin_HEXAGON_M2_mpyd_acc_hh_s0", + "llvm.hexagon.M2.mpyd.acc.hh.s1" => "__builtin_HEXAGON_M2_mpyd_acc_hh_s1", + "llvm.hexagon.M2.mpyd.acc.hl.s0" => "__builtin_HEXAGON_M2_mpyd_acc_hl_s0", + "llvm.hexagon.M2.mpyd.acc.hl.s1" => "__builtin_HEXAGON_M2_mpyd_acc_hl_s1", + "llvm.hexagon.M2.mpyd.acc.lh.s0" => "__builtin_HEXAGON_M2_mpyd_acc_lh_s0", + "llvm.hexagon.M2.mpyd.acc.lh.s1" => "__builtin_HEXAGON_M2_mpyd_acc_lh_s1", + "llvm.hexagon.M2.mpyd.acc.ll.s0" => "__builtin_HEXAGON_M2_mpyd_acc_ll_s0", + "llvm.hexagon.M2.mpyd.acc.ll.s1" => "__builtin_HEXAGON_M2_mpyd_acc_ll_s1", + "llvm.hexagon.M2.mpyd.hh.s0" => "__builtin_HEXAGON_M2_mpyd_hh_s0", + "llvm.hexagon.M2.mpyd.hh.s1" => "__builtin_HEXAGON_M2_mpyd_hh_s1", + "llvm.hexagon.M2.mpyd.hl.s0" => "__builtin_HEXAGON_M2_mpyd_hl_s0", + "llvm.hexagon.M2.mpyd.hl.s1" => "__builtin_HEXAGON_M2_mpyd_hl_s1", + "llvm.hexagon.M2.mpyd.lh.s0" => "__builtin_HEXAGON_M2_mpyd_lh_s0", + "llvm.hexagon.M2.mpyd.lh.s1" => "__builtin_HEXAGON_M2_mpyd_lh_s1", + "llvm.hexagon.M2.mpyd.ll.s0" => "__builtin_HEXAGON_M2_mpyd_ll_s0", + "llvm.hexagon.M2.mpyd.ll.s1" => "__builtin_HEXAGON_M2_mpyd_ll_s1", + "llvm.hexagon.M2.mpyd.nac.hh.s0" => "__builtin_HEXAGON_M2_mpyd_nac_hh_s0", + "llvm.hexagon.M2.mpyd.nac.hh.s1" => "__builtin_HEXAGON_M2_mpyd_nac_hh_s1", + "llvm.hexagon.M2.mpyd.nac.hl.s0" => "__builtin_HEXAGON_M2_mpyd_nac_hl_s0", + "llvm.hexagon.M2.mpyd.nac.hl.s1" => "__builtin_HEXAGON_M2_mpyd_nac_hl_s1", + "llvm.hexagon.M2.mpyd.nac.lh.s0" => "__builtin_HEXAGON_M2_mpyd_nac_lh_s0", + "llvm.hexagon.M2.mpyd.nac.lh.s1" => "__builtin_HEXAGON_M2_mpyd_nac_lh_s1", + "llvm.hexagon.M2.mpyd.nac.ll.s0" => "__builtin_HEXAGON_M2_mpyd_nac_ll_s0", + "llvm.hexagon.M2.mpyd.nac.ll.s1" => "__builtin_HEXAGON_M2_mpyd_nac_ll_s1", + "llvm.hexagon.M2.mpyd.rnd.hh.s0" => "__builtin_HEXAGON_M2_mpyd_rnd_hh_s0", + "llvm.hexagon.M2.mpyd.rnd.hh.s1" => "__builtin_HEXAGON_M2_mpyd_rnd_hh_s1", + "llvm.hexagon.M2.mpyd.rnd.hl.s0" => "__builtin_HEXAGON_M2_mpyd_rnd_hl_s0", + "llvm.hexagon.M2.mpyd.rnd.hl.s1" => "__builtin_HEXAGON_M2_mpyd_rnd_hl_s1", + "llvm.hexagon.M2.mpyd.rnd.lh.s0" => "__builtin_HEXAGON_M2_mpyd_rnd_lh_s0", + "llvm.hexagon.M2.mpyd.rnd.lh.s1" => "__builtin_HEXAGON_M2_mpyd_rnd_lh_s1", + "llvm.hexagon.M2.mpyd.rnd.ll.s0" => "__builtin_HEXAGON_M2_mpyd_rnd_ll_s0", + "llvm.hexagon.M2.mpyd.rnd.ll.s1" => "__builtin_HEXAGON_M2_mpyd_rnd_ll_s1", + "llvm.hexagon.M2.mpyi" => "__builtin_HEXAGON_M2_mpyi", + "llvm.hexagon.M2.mpysmi" => "__builtin_HEXAGON_M2_mpysmi", + "llvm.hexagon.M2.mpysu.up" => "__builtin_HEXAGON_M2_mpysu_up", + "llvm.hexagon.M2.mpyu.acc.hh.s0" => "__builtin_HEXAGON_M2_mpyu_acc_hh_s0", + "llvm.hexagon.M2.mpyu.acc.hh.s1" => "__builtin_HEXAGON_M2_mpyu_acc_hh_s1", + "llvm.hexagon.M2.mpyu.acc.hl.s0" => "__builtin_HEXAGON_M2_mpyu_acc_hl_s0", + "llvm.hexagon.M2.mpyu.acc.hl.s1" => "__builtin_HEXAGON_M2_mpyu_acc_hl_s1", + "llvm.hexagon.M2.mpyu.acc.lh.s0" => "__builtin_HEXAGON_M2_mpyu_acc_lh_s0", + "llvm.hexagon.M2.mpyu.acc.lh.s1" => "__builtin_HEXAGON_M2_mpyu_acc_lh_s1", + "llvm.hexagon.M2.mpyu.acc.ll.s0" => "__builtin_HEXAGON_M2_mpyu_acc_ll_s0", + "llvm.hexagon.M2.mpyu.acc.ll.s1" => "__builtin_HEXAGON_M2_mpyu_acc_ll_s1", + "llvm.hexagon.M2.mpyu.hh.s0" => "__builtin_HEXAGON_M2_mpyu_hh_s0", + "llvm.hexagon.M2.mpyu.hh.s1" => "__builtin_HEXAGON_M2_mpyu_hh_s1", + "llvm.hexagon.M2.mpyu.hl.s0" => "__builtin_HEXAGON_M2_mpyu_hl_s0", + "llvm.hexagon.M2.mpyu.hl.s1" => "__builtin_HEXAGON_M2_mpyu_hl_s1", + "llvm.hexagon.M2.mpyu.lh.s0" => "__builtin_HEXAGON_M2_mpyu_lh_s0", + "llvm.hexagon.M2.mpyu.lh.s1" => "__builtin_HEXAGON_M2_mpyu_lh_s1", + "llvm.hexagon.M2.mpyu.ll.s0" => "__builtin_HEXAGON_M2_mpyu_ll_s0", + "llvm.hexagon.M2.mpyu.ll.s1" => "__builtin_HEXAGON_M2_mpyu_ll_s1", + "llvm.hexagon.M2.mpyu.nac.hh.s0" => "__builtin_HEXAGON_M2_mpyu_nac_hh_s0", + "llvm.hexagon.M2.mpyu.nac.hh.s1" => "__builtin_HEXAGON_M2_mpyu_nac_hh_s1", + "llvm.hexagon.M2.mpyu.nac.hl.s0" => "__builtin_HEXAGON_M2_mpyu_nac_hl_s0", + "llvm.hexagon.M2.mpyu.nac.hl.s1" => "__builtin_HEXAGON_M2_mpyu_nac_hl_s1", + "llvm.hexagon.M2.mpyu.nac.lh.s0" => "__builtin_HEXAGON_M2_mpyu_nac_lh_s0", + "llvm.hexagon.M2.mpyu.nac.lh.s1" => "__builtin_HEXAGON_M2_mpyu_nac_lh_s1", + "llvm.hexagon.M2.mpyu.nac.ll.s0" => "__builtin_HEXAGON_M2_mpyu_nac_ll_s0", + "llvm.hexagon.M2.mpyu.nac.ll.s1" => "__builtin_HEXAGON_M2_mpyu_nac_ll_s1", + "llvm.hexagon.M2.mpyu.up" => "__builtin_HEXAGON_M2_mpyu_up", + "llvm.hexagon.M2.mpyud.acc.hh.s0" => "__builtin_HEXAGON_M2_mpyud_acc_hh_s0", + "llvm.hexagon.M2.mpyud.acc.hh.s1" => "__builtin_HEXAGON_M2_mpyud_acc_hh_s1", + "llvm.hexagon.M2.mpyud.acc.hl.s0" => "__builtin_HEXAGON_M2_mpyud_acc_hl_s0", + "llvm.hexagon.M2.mpyud.acc.hl.s1" => "__builtin_HEXAGON_M2_mpyud_acc_hl_s1", + "llvm.hexagon.M2.mpyud.acc.lh.s0" => "__builtin_HEXAGON_M2_mpyud_acc_lh_s0", + "llvm.hexagon.M2.mpyud.acc.lh.s1" => "__builtin_HEXAGON_M2_mpyud_acc_lh_s1", + "llvm.hexagon.M2.mpyud.acc.ll.s0" => "__builtin_HEXAGON_M2_mpyud_acc_ll_s0", + "llvm.hexagon.M2.mpyud.acc.ll.s1" => "__builtin_HEXAGON_M2_mpyud_acc_ll_s1", + "llvm.hexagon.M2.mpyud.hh.s0" => "__builtin_HEXAGON_M2_mpyud_hh_s0", + "llvm.hexagon.M2.mpyud.hh.s1" => "__builtin_HEXAGON_M2_mpyud_hh_s1", + "llvm.hexagon.M2.mpyud.hl.s0" => "__builtin_HEXAGON_M2_mpyud_hl_s0", + "llvm.hexagon.M2.mpyud.hl.s1" => "__builtin_HEXAGON_M2_mpyud_hl_s1", + "llvm.hexagon.M2.mpyud.lh.s0" => "__builtin_HEXAGON_M2_mpyud_lh_s0", + "llvm.hexagon.M2.mpyud.lh.s1" => "__builtin_HEXAGON_M2_mpyud_lh_s1", + "llvm.hexagon.M2.mpyud.ll.s0" => "__builtin_HEXAGON_M2_mpyud_ll_s0", + "llvm.hexagon.M2.mpyud.ll.s1" => "__builtin_HEXAGON_M2_mpyud_ll_s1", + "llvm.hexagon.M2.mpyud.nac.hh.s0" => "__builtin_HEXAGON_M2_mpyud_nac_hh_s0", + "llvm.hexagon.M2.mpyud.nac.hh.s1" => "__builtin_HEXAGON_M2_mpyud_nac_hh_s1", + "llvm.hexagon.M2.mpyud.nac.hl.s0" => "__builtin_HEXAGON_M2_mpyud_nac_hl_s0", + "llvm.hexagon.M2.mpyud.nac.hl.s1" => "__builtin_HEXAGON_M2_mpyud_nac_hl_s1", + "llvm.hexagon.M2.mpyud.nac.lh.s0" => "__builtin_HEXAGON_M2_mpyud_nac_lh_s0", + "llvm.hexagon.M2.mpyud.nac.lh.s1" => "__builtin_HEXAGON_M2_mpyud_nac_lh_s1", + "llvm.hexagon.M2.mpyud.nac.ll.s0" => "__builtin_HEXAGON_M2_mpyud_nac_ll_s0", + "llvm.hexagon.M2.mpyud.nac.ll.s1" => "__builtin_HEXAGON_M2_mpyud_nac_ll_s1", + "llvm.hexagon.M2.mpyui" => "__builtin_HEXAGON_M2_mpyui", + "llvm.hexagon.M2.nacci" => "__builtin_HEXAGON_M2_nacci", + "llvm.hexagon.M2.naccii" => "__builtin_HEXAGON_M2_naccii", + "llvm.hexagon.M2.subacc" => "__builtin_HEXAGON_M2_subacc", + "llvm.hexagon.M2.vabsdiffh" => "__builtin_HEXAGON_M2_vabsdiffh", + "llvm.hexagon.M2.vabsdiffw" => "__builtin_HEXAGON_M2_vabsdiffw", + "llvm.hexagon.M2.vcmac.s0.sat.i" => "__builtin_HEXAGON_M2_vcmac_s0_sat_i", + "llvm.hexagon.M2.vcmac.s0.sat.r" => "__builtin_HEXAGON_M2_vcmac_s0_sat_r", + "llvm.hexagon.M2.vcmpy.s0.sat.i" => "__builtin_HEXAGON_M2_vcmpy_s0_sat_i", + "llvm.hexagon.M2.vcmpy.s0.sat.r" => "__builtin_HEXAGON_M2_vcmpy_s0_sat_r", + "llvm.hexagon.M2.vcmpy.s1.sat.i" => "__builtin_HEXAGON_M2_vcmpy_s1_sat_i", + "llvm.hexagon.M2.vcmpy.s1.sat.r" => "__builtin_HEXAGON_M2_vcmpy_s1_sat_r", + "llvm.hexagon.M2.vdmacs.s0" => "__builtin_HEXAGON_M2_vdmacs_s0", + "llvm.hexagon.M2.vdmacs.s1" => "__builtin_HEXAGON_M2_vdmacs_s1", + "llvm.hexagon.M2.vdmpyrs.s0" => "__builtin_HEXAGON_M2_vdmpyrs_s0", + "llvm.hexagon.M2.vdmpyrs.s1" => "__builtin_HEXAGON_M2_vdmpyrs_s1", + "llvm.hexagon.M2.vdmpys.s0" => "__builtin_HEXAGON_M2_vdmpys_s0", + "llvm.hexagon.M2.vdmpys.s1" => "__builtin_HEXAGON_M2_vdmpys_s1", + "llvm.hexagon.M2.vmac2" => "__builtin_HEXAGON_M2_vmac2", + "llvm.hexagon.M2.vmac2es" => "__builtin_HEXAGON_M2_vmac2es", + "llvm.hexagon.M2.vmac2es.s0" => "__builtin_HEXAGON_M2_vmac2es_s0", + "llvm.hexagon.M2.vmac2es.s1" => "__builtin_HEXAGON_M2_vmac2es_s1", + "llvm.hexagon.M2.vmac2s.s0" => "__builtin_HEXAGON_M2_vmac2s_s0", + "llvm.hexagon.M2.vmac2s.s1" => "__builtin_HEXAGON_M2_vmac2s_s1", + "llvm.hexagon.M2.vmac2su.s0" => "__builtin_HEXAGON_M2_vmac2su_s0", + "llvm.hexagon.M2.vmac2su.s1" => "__builtin_HEXAGON_M2_vmac2su_s1", + "llvm.hexagon.M2.vmpy2es.s0" => "__builtin_HEXAGON_M2_vmpy2es_s0", + "llvm.hexagon.M2.vmpy2es.s1" => "__builtin_HEXAGON_M2_vmpy2es_s1", + "llvm.hexagon.M2.vmpy2s.s0" => "__builtin_HEXAGON_M2_vmpy2s_s0", + "llvm.hexagon.M2.vmpy2s.s0pack" => "__builtin_HEXAGON_M2_vmpy2s_s0pack", + "llvm.hexagon.M2.vmpy2s.s1" => "__builtin_HEXAGON_M2_vmpy2s_s1", + "llvm.hexagon.M2.vmpy2s.s1pack" => "__builtin_HEXAGON_M2_vmpy2s_s1pack", + "llvm.hexagon.M2.vmpy2su.s0" => "__builtin_HEXAGON_M2_vmpy2su_s0", + "llvm.hexagon.M2.vmpy2su.s1" => "__builtin_HEXAGON_M2_vmpy2su_s1", + "llvm.hexagon.M2.vraddh" => "__builtin_HEXAGON_M2_vraddh", + "llvm.hexagon.M2.vradduh" => "__builtin_HEXAGON_M2_vradduh", + "llvm.hexagon.M2.vrcmaci.s0" => "__builtin_HEXAGON_M2_vrcmaci_s0", + "llvm.hexagon.M2.vrcmaci.s0c" => "__builtin_HEXAGON_M2_vrcmaci_s0c", + "llvm.hexagon.M2.vrcmacr.s0" => "__builtin_HEXAGON_M2_vrcmacr_s0", + "llvm.hexagon.M2.vrcmacr.s0c" => "__builtin_HEXAGON_M2_vrcmacr_s0c", + "llvm.hexagon.M2.vrcmpyi.s0" => "__builtin_HEXAGON_M2_vrcmpyi_s0", + "llvm.hexagon.M2.vrcmpyi.s0c" => "__builtin_HEXAGON_M2_vrcmpyi_s0c", + "llvm.hexagon.M2.vrcmpyr.s0" => "__builtin_HEXAGON_M2_vrcmpyr_s0", + "llvm.hexagon.M2.vrcmpyr.s0c" => "__builtin_HEXAGON_M2_vrcmpyr_s0c", + "llvm.hexagon.M2.vrcmpys.acc.s1" => "__builtin_HEXAGON_M2_vrcmpys_acc_s1", + "llvm.hexagon.M2.vrcmpys.s1" => "__builtin_HEXAGON_M2_vrcmpys_s1", + "llvm.hexagon.M2.vrcmpys.s1rp" => "__builtin_HEXAGON_M2_vrcmpys_s1rp", + "llvm.hexagon.M2.vrmac.s0" => "__builtin_HEXAGON_M2_vrmac_s0", + "llvm.hexagon.M2.vrmpy.s0" => "__builtin_HEXAGON_M2_vrmpy_s0", + "llvm.hexagon.M2.xor.xacc" => "__builtin_HEXAGON_M2_xor_xacc", + "llvm.hexagon.M4.and.and" => "__builtin_HEXAGON_M4_and_and", + "llvm.hexagon.M4.and.andn" => "__builtin_HEXAGON_M4_and_andn", + "llvm.hexagon.M4.and.or" => "__builtin_HEXAGON_M4_and_or", + "llvm.hexagon.M4.and.xor" => "__builtin_HEXAGON_M4_and_xor", + "llvm.hexagon.M4.cmpyi.wh" => "__builtin_HEXAGON_M4_cmpyi_wh", + "llvm.hexagon.M4.cmpyi.whc" => "__builtin_HEXAGON_M4_cmpyi_whc", + "llvm.hexagon.M4.cmpyr.wh" => "__builtin_HEXAGON_M4_cmpyr_wh", + "llvm.hexagon.M4.cmpyr.whc" => "__builtin_HEXAGON_M4_cmpyr_whc", + "llvm.hexagon.M4.mac.up.s1.sat" => "__builtin_HEXAGON_M4_mac_up_s1_sat", + "llvm.hexagon.M4.mpyri.addi" => "__builtin_HEXAGON_M4_mpyri_addi", + "llvm.hexagon.M4.mpyri.addr" => "__builtin_HEXAGON_M4_mpyri_addr", + "llvm.hexagon.M4.mpyri.addr.u2" => "__builtin_HEXAGON_M4_mpyri_addr_u2", + "llvm.hexagon.M4.mpyrr.addi" => "__builtin_HEXAGON_M4_mpyrr_addi", + "llvm.hexagon.M4.mpyrr.addr" => "__builtin_HEXAGON_M4_mpyrr_addr", + "llvm.hexagon.M4.nac.up.s1.sat" => "__builtin_HEXAGON_M4_nac_up_s1_sat", + "llvm.hexagon.M4.or.and" => "__builtin_HEXAGON_M4_or_and", + "llvm.hexagon.M4.or.andn" => "__builtin_HEXAGON_M4_or_andn", + "llvm.hexagon.M4.or.or" => "__builtin_HEXAGON_M4_or_or", + "llvm.hexagon.M4.or.xor" => "__builtin_HEXAGON_M4_or_xor", + "llvm.hexagon.M4.pmpyw" => "__builtin_HEXAGON_M4_pmpyw", + "llvm.hexagon.M4.pmpyw.acc" => "__builtin_HEXAGON_M4_pmpyw_acc", + "llvm.hexagon.M4.vpmpyh" => "__builtin_HEXAGON_M4_vpmpyh", + "llvm.hexagon.M4.vpmpyh.acc" => "__builtin_HEXAGON_M4_vpmpyh_acc", + "llvm.hexagon.M4.vrmpyeh.acc.s0" => "__builtin_HEXAGON_M4_vrmpyeh_acc_s0", + "llvm.hexagon.M4.vrmpyeh.acc.s1" => "__builtin_HEXAGON_M4_vrmpyeh_acc_s1", + "llvm.hexagon.M4.vrmpyeh.s0" => "__builtin_HEXAGON_M4_vrmpyeh_s0", + "llvm.hexagon.M4.vrmpyeh.s1" => "__builtin_HEXAGON_M4_vrmpyeh_s1", + "llvm.hexagon.M4.vrmpyoh.acc.s0" => "__builtin_HEXAGON_M4_vrmpyoh_acc_s0", + "llvm.hexagon.M4.vrmpyoh.acc.s1" => "__builtin_HEXAGON_M4_vrmpyoh_acc_s1", + "llvm.hexagon.M4.vrmpyoh.s0" => "__builtin_HEXAGON_M4_vrmpyoh_s0", + "llvm.hexagon.M4.vrmpyoh.s1" => "__builtin_HEXAGON_M4_vrmpyoh_s1", + "llvm.hexagon.M4.xor.and" => "__builtin_HEXAGON_M4_xor_and", + "llvm.hexagon.M4.xor.andn" => "__builtin_HEXAGON_M4_xor_andn", + "llvm.hexagon.M4.xor.or" => "__builtin_HEXAGON_M4_xor_or", + "llvm.hexagon.M4.xor.xacc" => "__builtin_HEXAGON_M4_xor_xacc", + "llvm.hexagon.M5.vdmacbsu" => "__builtin_HEXAGON_M5_vdmacbsu", + "llvm.hexagon.M5.vdmpybsu" => "__builtin_HEXAGON_M5_vdmpybsu", + "llvm.hexagon.M5.vmacbsu" => "__builtin_HEXAGON_M5_vmacbsu", + "llvm.hexagon.M5.vmacbuu" => "__builtin_HEXAGON_M5_vmacbuu", + "llvm.hexagon.M5.vmpybsu" => "__builtin_HEXAGON_M5_vmpybsu", + "llvm.hexagon.M5.vmpybuu" => "__builtin_HEXAGON_M5_vmpybuu", + "llvm.hexagon.M5.vrmacbsu" => "__builtin_HEXAGON_M5_vrmacbsu", + "llvm.hexagon.M5.vrmacbuu" => "__builtin_HEXAGON_M5_vrmacbuu", + "llvm.hexagon.M5.vrmpybsu" => "__builtin_HEXAGON_M5_vrmpybsu", + "llvm.hexagon.M5.vrmpybuu" => "__builtin_HEXAGON_M5_vrmpybuu", + "llvm.hexagon.M6.vabsdiffb" => "__builtin_HEXAGON_M6_vabsdiffb", + "llvm.hexagon.M6.vabsdiffub" => "__builtin_HEXAGON_M6_vabsdiffub", + "llvm.hexagon.M7.dcmpyiw" => "__builtin_HEXAGON_M7_dcmpyiw", + "llvm.hexagon.M7.dcmpyiw.acc" => "__builtin_HEXAGON_M7_dcmpyiw_acc", + "llvm.hexagon.M7.dcmpyiwc" => "__builtin_HEXAGON_M7_dcmpyiwc", + "llvm.hexagon.M7.dcmpyiwc.acc" => "__builtin_HEXAGON_M7_dcmpyiwc_acc", + "llvm.hexagon.M7.dcmpyrw" => "__builtin_HEXAGON_M7_dcmpyrw", + "llvm.hexagon.M7.dcmpyrw.acc" => "__builtin_HEXAGON_M7_dcmpyrw_acc", + "llvm.hexagon.M7.dcmpyrwc" => "__builtin_HEXAGON_M7_dcmpyrwc", + "llvm.hexagon.M7.dcmpyrwc.acc" => "__builtin_HEXAGON_M7_dcmpyrwc_acc", + "llvm.hexagon.M7.vdmpy" => "__builtin_HEXAGON_M7_vdmpy", + "llvm.hexagon.M7.vdmpy.acc" => "__builtin_HEXAGON_M7_vdmpy_acc", + "llvm.hexagon.M7.wcmpyiw" => "__builtin_HEXAGON_M7_wcmpyiw", + "llvm.hexagon.M7.wcmpyiw.rnd" => "__builtin_HEXAGON_M7_wcmpyiw_rnd", + "llvm.hexagon.M7.wcmpyiwc" => "__builtin_HEXAGON_M7_wcmpyiwc", + "llvm.hexagon.M7.wcmpyiwc.rnd" => "__builtin_HEXAGON_M7_wcmpyiwc_rnd", + "llvm.hexagon.M7.wcmpyrw" => "__builtin_HEXAGON_M7_wcmpyrw", + "llvm.hexagon.M7.wcmpyrw.rnd" => "__builtin_HEXAGON_M7_wcmpyrw_rnd", + "llvm.hexagon.M7.wcmpyrwc" => "__builtin_HEXAGON_M7_wcmpyrwc", + "llvm.hexagon.M7.wcmpyrwc.rnd" => "__builtin_HEXAGON_M7_wcmpyrwc_rnd", + "llvm.hexagon.S2.addasl.rrri" => "__builtin_HEXAGON_S2_addasl_rrri", + "llvm.hexagon.S2.asl.i.p" => "__builtin_HEXAGON_S2_asl_i_p", + "llvm.hexagon.S2.asl.i.p.acc" => "__builtin_HEXAGON_S2_asl_i_p_acc", + "llvm.hexagon.S2.asl.i.p.and" => "__builtin_HEXAGON_S2_asl_i_p_and", + "llvm.hexagon.S2.asl.i.p.nac" => "__builtin_HEXAGON_S2_asl_i_p_nac", + "llvm.hexagon.S2.asl.i.p.or" => "__builtin_HEXAGON_S2_asl_i_p_or", + "llvm.hexagon.S2.asl.i.p.xacc" => "__builtin_HEXAGON_S2_asl_i_p_xacc", + "llvm.hexagon.S2.asl.i.r" => "__builtin_HEXAGON_S2_asl_i_r", + "llvm.hexagon.S2.asl.i.r.acc" => "__builtin_HEXAGON_S2_asl_i_r_acc", + "llvm.hexagon.S2.asl.i.r.and" => "__builtin_HEXAGON_S2_asl_i_r_and", + "llvm.hexagon.S2.asl.i.r.nac" => "__builtin_HEXAGON_S2_asl_i_r_nac", + "llvm.hexagon.S2.asl.i.r.or" => "__builtin_HEXAGON_S2_asl_i_r_or", + "llvm.hexagon.S2.asl.i.r.sat" => "__builtin_HEXAGON_S2_asl_i_r_sat", + "llvm.hexagon.S2.asl.i.r.xacc" => "__builtin_HEXAGON_S2_asl_i_r_xacc", + "llvm.hexagon.S2.asl.i.vh" => "__builtin_HEXAGON_S2_asl_i_vh", + "llvm.hexagon.S2.asl.i.vw" => "__builtin_HEXAGON_S2_asl_i_vw", + "llvm.hexagon.S2.asl.r.p" => "__builtin_HEXAGON_S2_asl_r_p", + "llvm.hexagon.S2.asl.r.p.acc" => "__builtin_HEXAGON_S2_asl_r_p_acc", + "llvm.hexagon.S2.asl.r.p.and" => "__builtin_HEXAGON_S2_asl_r_p_and", + "llvm.hexagon.S2.asl.r.p.nac" => "__builtin_HEXAGON_S2_asl_r_p_nac", + "llvm.hexagon.S2.asl.r.p.or" => "__builtin_HEXAGON_S2_asl_r_p_or", + "llvm.hexagon.S2.asl.r.p.xor" => "__builtin_HEXAGON_S2_asl_r_p_xor", + "llvm.hexagon.S2.asl.r.r" => "__builtin_HEXAGON_S2_asl_r_r", + "llvm.hexagon.S2.asl.r.r.acc" => "__builtin_HEXAGON_S2_asl_r_r_acc", + "llvm.hexagon.S2.asl.r.r.and" => "__builtin_HEXAGON_S2_asl_r_r_and", + "llvm.hexagon.S2.asl.r.r.nac" => "__builtin_HEXAGON_S2_asl_r_r_nac", + "llvm.hexagon.S2.asl.r.r.or" => "__builtin_HEXAGON_S2_asl_r_r_or", + "llvm.hexagon.S2.asl.r.r.sat" => "__builtin_HEXAGON_S2_asl_r_r_sat", + "llvm.hexagon.S2.asl.r.vh" => "__builtin_HEXAGON_S2_asl_r_vh", + "llvm.hexagon.S2.asl.r.vw" => "__builtin_HEXAGON_S2_asl_r_vw", + "llvm.hexagon.S2.asr.i.p" => "__builtin_HEXAGON_S2_asr_i_p", + "llvm.hexagon.S2.asr.i.p.acc" => "__builtin_HEXAGON_S2_asr_i_p_acc", + "llvm.hexagon.S2.asr.i.p.and" => "__builtin_HEXAGON_S2_asr_i_p_and", + "llvm.hexagon.S2.asr.i.p.nac" => "__builtin_HEXAGON_S2_asr_i_p_nac", + "llvm.hexagon.S2.asr.i.p.or" => "__builtin_HEXAGON_S2_asr_i_p_or", + "llvm.hexagon.S2.asr.i.p.rnd" => "__builtin_HEXAGON_S2_asr_i_p_rnd", + "llvm.hexagon.S2.asr.i.p.rnd.goodsyntax" => "__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax", + "llvm.hexagon.S2.asr.i.r" => "__builtin_HEXAGON_S2_asr_i_r", + "llvm.hexagon.S2.asr.i.r.acc" => "__builtin_HEXAGON_S2_asr_i_r_acc", + "llvm.hexagon.S2.asr.i.r.and" => "__builtin_HEXAGON_S2_asr_i_r_and", + "llvm.hexagon.S2.asr.i.r.nac" => "__builtin_HEXAGON_S2_asr_i_r_nac", + "llvm.hexagon.S2.asr.i.r.or" => "__builtin_HEXAGON_S2_asr_i_r_or", + "llvm.hexagon.S2.asr.i.r.rnd" => "__builtin_HEXAGON_S2_asr_i_r_rnd", + "llvm.hexagon.S2.asr.i.r.rnd.goodsyntax" => "__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax", + "llvm.hexagon.S2.asr.i.svw.trun" => "__builtin_HEXAGON_S2_asr_i_svw_trun", + "llvm.hexagon.S2.asr.i.vh" => "__builtin_HEXAGON_S2_asr_i_vh", + "llvm.hexagon.S2.asr.i.vw" => "__builtin_HEXAGON_S2_asr_i_vw", + "llvm.hexagon.S2.asr.r.p" => "__builtin_HEXAGON_S2_asr_r_p", + "llvm.hexagon.S2.asr.r.p.acc" => "__builtin_HEXAGON_S2_asr_r_p_acc", + "llvm.hexagon.S2.asr.r.p.and" => "__builtin_HEXAGON_S2_asr_r_p_and", + "llvm.hexagon.S2.asr.r.p.nac" => "__builtin_HEXAGON_S2_asr_r_p_nac", + "llvm.hexagon.S2.asr.r.p.or" => "__builtin_HEXAGON_S2_asr_r_p_or", + "llvm.hexagon.S2.asr.r.p.xor" => "__builtin_HEXAGON_S2_asr_r_p_xor", + "llvm.hexagon.S2.asr.r.r" => "__builtin_HEXAGON_S2_asr_r_r", + "llvm.hexagon.S2.asr.r.r.acc" => "__builtin_HEXAGON_S2_asr_r_r_acc", + "llvm.hexagon.S2.asr.r.r.and" => "__builtin_HEXAGON_S2_asr_r_r_and", + "llvm.hexagon.S2.asr.r.r.nac" => "__builtin_HEXAGON_S2_asr_r_r_nac", + "llvm.hexagon.S2.asr.r.r.or" => "__builtin_HEXAGON_S2_asr_r_r_or", + "llvm.hexagon.S2.asr.r.r.sat" => "__builtin_HEXAGON_S2_asr_r_r_sat", + "llvm.hexagon.S2.asr.r.svw.trun" => "__builtin_HEXAGON_S2_asr_r_svw_trun", + "llvm.hexagon.S2.asr.r.vh" => "__builtin_HEXAGON_S2_asr_r_vh", + "llvm.hexagon.S2.asr.r.vw" => "__builtin_HEXAGON_S2_asr_r_vw", + "llvm.hexagon.S2.brev" => "__builtin_HEXAGON_S2_brev", + "llvm.hexagon.S2.brevp" => "__builtin_HEXAGON_S2_brevp", + "llvm.hexagon.S2.cabacencbin" => "__builtin_HEXAGON_S2_cabacencbin", + "llvm.hexagon.S2.cl0" => "__builtin_HEXAGON_S2_cl0", + "llvm.hexagon.S2.cl0p" => "__builtin_HEXAGON_S2_cl0p", + "llvm.hexagon.S2.cl1" => "__builtin_HEXAGON_S2_cl1", + "llvm.hexagon.S2.cl1p" => "__builtin_HEXAGON_S2_cl1p", + "llvm.hexagon.S2.clb" => "__builtin_HEXAGON_S2_clb", + "llvm.hexagon.S2.clbnorm" => "__builtin_HEXAGON_S2_clbnorm", + "llvm.hexagon.S2.clbp" => "__builtin_HEXAGON_S2_clbp", + "llvm.hexagon.S2.clrbit.i" => "__builtin_HEXAGON_S2_clrbit_i", + "llvm.hexagon.S2.clrbit.r" => "__builtin_HEXAGON_S2_clrbit_r", + "llvm.hexagon.S2.ct0" => "__builtin_HEXAGON_S2_ct0", + "llvm.hexagon.S2.ct0p" => "__builtin_HEXAGON_S2_ct0p", + "llvm.hexagon.S2.ct1" => "__builtin_HEXAGON_S2_ct1", + "llvm.hexagon.S2.ct1p" => "__builtin_HEXAGON_S2_ct1p", + "llvm.hexagon.S2.deinterleave" => "__builtin_HEXAGON_S2_deinterleave", + "llvm.hexagon.S2.extractu" => "__builtin_HEXAGON_S2_extractu", + "llvm.hexagon.S2.extractu.rp" => "__builtin_HEXAGON_S2_extractu_rp", + "llvm.hexagon.S2.extractup" => "__builtin_HEXAGON_S2_extractup", + "llvm.hexagon.S2.extractup.rp" => "__builtin_HEXAGON_S2_extractup_rp", + "llvm.hexagon.S2.insert" => "__builtin_HEXAGON_S2_insert", + "llvm.hexagon.S2.insert.rp" => "__builtin_HEXAGON_S2_insert_rp", + "llvm.hexagon.S2.insertp" => "__builtin_HEXAGON_S2_insertp", + "llvm.hexagon.S2.insertp.rp" => "__builtin_HEXAGON_S2_insertp_rp", + "llvm.hexagon.S2.interleave" => "__builtin_HEXAGON_S2_interleave", + "llvm.hexagon.S2.lfsp" => "__builtin_HEXAGON_S2_lfsp", + "llvm.hexagon.S2.lsl.r.p" => "__builtin_HEXAGON_S2_lsl_r_p", + "llvm.hexagon.S2.lsl.r.p.acc" => "__builtin_HEXAGON_S2_lsl_r_p_acc", + "llvm.hexagon.S2.lsl.r.p.and" => "__builtin_HEXAGON_S2_lsl_r_p_and", + "llvm.hexagon.S2.lsl.r.p.nac" => "__builtin_HEXAGON_S2_lsl_r_p_nac", + "llvm.hexagon.S2.lsl.r.p.or" => "__builtin_HEXAGON_S2_lsl_r_p_or", + "llvm.hexagon.S2.lsl.r.p.xor" => "__builtin_HEXAGON_S2_lsl_r_p_xor", + "llvm.hexagon.S2.lsl.r.r" => "__builtin_HEXAGON_S2_lsl_r_r", + "llvm.hexagon.S2.lsl.r.r.acc" => "__builtin_HEXAGON_S2_lsl_r_r_acc", + "llvm.hexagon.S2.lsl.r.r.and" => "__builtin_HEXAGON_S2_lsl_r_r_and", + "llvm.hexagon.S2.lsl.r.r.nac" => "__builtin_HEXAGON_S2_lsl_r_r_nac", + "llvm.hexagon.S2.lsl.r.r.or" => "__builtin_HEXAGON_S2_lsl_r_r_or", + "llvm.hexagon.S2.lsl.r.vh" => "__builtin_HEXAGON_S2_lsl_r_vh", + "llvm.hexagon.S2.lsl.r.vw" => "__builtin_HEXAGON_S2_lsl_r_vw", + "llvm.hexagon.S2.lsr.i.p" => "__builtin_HEXAGON_S2_lsr_i_p", + "llvm.hexagon.S2.lsr.i.p.acc" => "__builtin_HEXAGON_S2_lsr_i_p_acc", + "llvm.hexagon.S2.lsr.i.p.and" => "__builtin_HEXAGON_S2_lsr_i_p_and", + "llvm.hexagon.S2.lsr.i.p.nac" => "__builtin_HEXAGON_S2_lsr_i_p_nac", + "llvm.hexagon.S2.lsr.i.p.or" => "__builtin_HEXAGON_S2_lsr_i_p_or", + "llvm.hexagon.S2.lsr.i.p.xacc" => "__builtin_HEXAGON_S2_lsr_i_p_xacc", + "llvm.hexagon.S2.lsr.i.r" => "__builtin_HEXAGON_S2_lsr_i_r", + "llvm.hexagon.S2.lsr.i.r.acc" => "__builtin_HEXAGON_S2_lsr_i_r_acc", + "llvm.hexagon.S2.lsr.i.r.and" => "__builtin_HEXAGON_S2_lsr_i_r_and", + "llvm.hexagon.S2.lsr.i.r.nac" => "__builtin_HEXAGON_S2_lsr_i_r_nac", + "llvm.hexagon.S2.lsr.i.r.or" => "__builtin_HEXAGON_S2_lsr_i_r_or", + "llvm.hexagon.S2.lsr.i.r.xacc" => "__builtin_HEXAGON_S2_lsr_i_r_xacc", + "llvm.hexagon.S2.lsr.i.vh" => "__builtin_HEXAGON_S2_lsr_i_vh", + "llvm.hexagon.S2.lsr.i.vw" => "__builtin_HEXAGON_S2_lsr_i_vw", + "llvm.hexagon.S2.lsr.r.p" => "__builtin_HEXAGON_S2_lsr_r_p", + "llvm.hexagon.S2.lsr.r.p.acc" => "__builtin_HEXAGON_S2_lsr_r_p_acc", + "llvm.hexagon.S2.lsr.r.p.and" => "__builtin_HEXAGON_S2_lsr_r_p_and", + "llvm.hexagon.S2.lsr.r.p.nac" => "__builtin_HEXAGON_S2_lsr_r_p_nac", + "llvm.hexagon.S2.lsr.r.p.or" => "__builtin_HEXAGON_S2_lsr_r_p_or", + "llvm.hexagon.S2.lsr.r.p.xor" => "__builtin_HEXAGON_S2_lsr_r_p_xor", + "llvm.hexagon.S2.lsr.r.r" => "__builtin_HEXAGON_S2_lsr_r_r", + "llvm.hexagon.S2.lsr.r.r.acc" => "__builtin_HEXAGON_S2_lsr_r_r_acc", + "llvm.hexagon.S2.lsr.r.r.and" => "__builtin_HEXAGON_S2_lsr_r_r_and", + "llvm.hexagon.S2.lsr.r.r.nac" => "__builtin_HEXAGON_S2_lsr_r_r_nac", + "llvm.hexagon.S2.lsr.r.r.or" => "__builtin_HEXAGON_S2_lsr_r_r_or", + "llvm.hexagon.S2.lsr.r.vh" => "__builtin_HEXAGON_S2_lsr_r_vh", + "llvm.hexagon.S2.lsr.r.vw" => "__builtin_HEXAGON_S2_lsr_r_vw", + "llvm.hexagon.S2.mask" => "__builtin_HEXAGON_S2_mask", + "llvm.hexagon.S2.packhl" => "__builtin_HEXAGON_S2_packhl", + "llvm.hexagon.S2.parityp" => "__builtin_HEXAGON_S2_parityp", + "llvm.hexagon.S2.setbit.i" => "__builtin_HEXAGON_S2_setbit_i", + "llvm.hexagon.S2.setbit.r" => "__builtin_HEXAGON_S2_setbit_r", + "llvm.hexagon.S2.shuffeb" => "__builtin_HEXAGON_S2_shuffeb", + "llvm.hexagon.S2.shuffeh" => "__builtin_HEXAGON_S2_shuffeh", + "llvm.hexagon.S2.shuffob" => "__builtin_HEXAGON_S2_shuffob", + "llvm.hexagon.S2.shuffoh" => "__builtin_HEXAGON_S2_shuffoh", + "llvm.hexagon.S2.storerb.pbr" => "__builtin_brev_stb", + "llvm.hexagon.S2.storerd.pbr" => "__builtin_brev_std", + "llvm.hexagon.S2.storerf.pbr" => "__builtin_brev_sthhi", + "llvm.hexagon.S2.storerh.pbr" => "__builtin_brev_sth", + "llvm.hexagon.S2.storeri.pbr" => "__builtin_brev_stw", + "llvm.hexagon.S2.storew.locked" => "__builtin_HEXAGON_S2_storew_locked", + "llvm.hexagon.S2.svsathb" => "__builtin_HEXAGON_S2_svsathb", + "llvm.hexagon.S2.svsathub" => "__builtin_HEXAGON_S2_svsathub", + "llvm.hexagon.S2.tableidxb.goodsyntax" => "__builtin_HEXAGON_S2_tableidxb_goodsyntax", + "llvm.hexagon.S2.tableidxd.goodsyntax" => "__builtin_HEXAGON_S2_tableidxd_goodsyntax", + "llvm.hexagon.S2.tableidxh.goodsyntax" => "__builtin_HEXAGON_S2_tableidxh_goodsyntax", + "llvm.hexagon.S2.tableidxw.goodsyntax" => "__builtin_HEXAGON_S2_tableidxw_goodsyntax", + "llvm.hexagon.S2.togglebit.i" => "__builtin_HEXAGON_S2_togglebit_i", + "llvm.hexagon.S2.togglebit.r" => "__builtin_HEXAGON_S2_togglebit_r", + "llvm.hexagon.S2.tstbit.i" => "__builtin_HEXAGON_S2_tstbit_i", + "llvm.hexagon.S2.tstbit.r" => "__builtin_HEXAGON_S2_tstbit_r", + "llvm.hexagon.S2.valignib" => "__builtin_HEXAGON_S2_valignib", + "llvm.hexagon.S2.valignrb" => "__builtin_HEXAGON_S2_valignrb", + "llvm.hexagon.S2.vcnegh" => "__builtin_HEXAGON_S2_vcnegh", + "llvm.hexagon.S2.vcrotate" => "__builtin_HEXAGON_S2_vcrotate", + "llvm.hexagon.S2.vrcnegh" => "__builtin_HEXAGON_S2_vrcnegh", + "llvm.hexagon.S2.vrndpackwh" => "__builtin_HEXAGON_S2_vrndpackwh", + "llvm.hexagon.S2.vrndpackwhs" => "__builtin_HEXAGON_S2_vrndpackwhs", + "llvm.hexagon.S2.vsathb" => "__builtin_HEXAGON_S2_vsathb", + "llvm.hexagon.S2.vsathb.nopack" => "__builtin_HEXAGON_S2_vsathb_nopack", + "llvm.hexagon.S2.vsathub" => "__builtin_HEXAGON_S2_vsathub", + "llvm.hexagon.S2.vsathub.nopack" => "__builtin_HEXAGON_S2_vsathub_nopack", + "llvm.hexagon.S2.vsatwh" => "__builtin_HEXAGON_S2_vsatwh", + "llvm.hexagon.S2.vsatwh.nopack" => "__builtin_HEXAGON_S2_vsatwh_nopack", + "llvm.hexagon.S2.vsatwuh" => "__builtin_HEXAGON_S2_vsatwuh", + "llvm.hexagon.S2.vsatwuh.nopack" => "__builtin_HEXAGON_S2_vsatwuh_nopack", + "llvm.hexagon.S2.vsplatrb" => "__builtin_HEXAGON_S2_vsplatrb", + "llvm.hexagon.S2.vsplatrh" => "__builtin_HEXAGON_S2_vsplatrh", + "llvm.hexagon.S2.vspliceib" => "__builtin_HEXAGON_S2_vspliceib", + "llvm.hexagon.S2.vsplicerb" => "__builtin_HEXAGON_S2_vsplicerb", + "llvm.hexagon.S2.vsxtbh" => "__builtin_HEXAGON_S2_vsxtbh", + "llvm.hexagon.S2.vsxthw" => "__builtin_HEXAGON_S2_vsxthw", + "llvm.hexagon.S2.vtrunehb" => "__builtin_HEXAGON_S2_vtrunehb", + "llvm.hexagon.S2.vtrunewh" => "__builtin_HEXAGON_S2_vtrunewh", + "llvm.hexagon.S2.vtrunohb" => "__builtin_HEXAGON_S2_vtrunohb", + "llvm.hexagon.S2.vtrunowh" => "__builtin_HEXAGON_S2_vtrunowh", + "llvm.hexagon.S2.vzxtbh" => "__builtin_HEXAGON_S2_vzxtbh", + "llvm.hexagon.S2.vzxthw" => "__builtin_HEXAGON_S2_vzxthw", + "llvm.hexagon.S4.addaddi" => "__builtin_HEXAGON_S4_addaddi", + "llvm.hexagon.S4.addi.asl.ri" => "__builtin_HEXAGON_S4_addi_asl_ri", + "llvm.hexagon.S4.addi.lsr.ri" => "__builtin_HEXAGON_S4_addi_lsr_ri", + "llvm.hexagon.S4.andi.asl.ri" => "__builtin_HEXAGON_S4_andi_asl_ri", + "llvm.hexagon.S4.andi.lsr.ri" => "__builtin_HEXAGON_S4_andi_lsr_ri", + "llvm.hexagon.S4.clbaddi" => "__builtin_HEXAGON_S4_clbaddi", + "llvm.hexagon.S4.clbpaddi" => "__builtin_HEXAGON_S4_clbpaddi", + "llvm.hexagon.S4.clbpnorm" => "__builtin_HEXAGON_S4_clbpnorm", + "llvm.hexagon.S4.extract" => "__builtin_HEXAGON_S4_extract", + "llvm.hexagon.S4.extract.rp" => "__builtin_HEXAGON_S4_extract_rp", + "llvm.hexagon.S4.extractp" => "__builtin_HEXAGON_S4_extractp", + "llvm.hexagon.S4.extractp.rp" => "__builtin_HEXAGON_S4_extractp_rp", + "llvm.hexagon.S4.lsli" => "__builtin_HEXAGON_S4_lsli", + "llvm.hexagon.S4.ntstbit.i" => "__builtin_HEXAGON_S4_ntstbit_i", + "llvm.hexagon.S4.ntstbit.r" => "__builtin_HEXAGON_S4_ntstbit_r", + "llvm.hexagon.S4.or.andi" => "__builtin_HEXAGON_S4_or_andi", + "llvm.hexagon.S4.or.andix" => "__builtin_HEXAGON_S4_or_andix", + "llvm.hexagon.S4.or.ori" => "__builtin_HEXAGON_S4_or_ori", + "llvm.hexagon.S4.ori.asl.ri" => "__builtin_HEXAGON_S4_ori_asl_ri", + "llvm.hexagon.S4.ori.lsr.ri" => "__builtin_HEXAGON_S4_ori_lsr_ri", + "llvm.hexagon.S4.parity" => "__builtin_HEXAGON_S4_parity", + "llvm.hexagon.S4.stored.locked" => "__builtin_HEXAGON_S4_stored_locked", + "llvm.hexagon.S4.subaddi" => "__builtin_HEXAGON_S4_subaddi", + "llvm.hexagon.S4.subi.asl.ri" => "__builtin_HEXAGON_S4_subi_asl_ri", + "llvm.hexagon.S4.subi.lsr.ri" => "__builtin_HEXAGON_S4_subi_lsr_ri", + "llvm.hexagon.S4.vrcrotate" => "__builtin_HEXAGON_S4_vrcrotate", + "llvm.hexagon.S4.vrcrotate.acc" => "__builtin_HEXAGON_S4_vrcrotate_acc", + "llvm.hexagon.S4.vxaddsubh" => "__builtin_HEXAGON_S4_vxaddsubh", + "llvm.hexagon.S4.vxaddsubhr" => "__builtin_HEXAGON_S4_vxaddsubhr", + "llvm.hexagon.S4.vxaddsubw" => "__builtin_HEXAGON_S4_vxaddsubw", + "llvm.hexagon.S4.vxsubaddh" => "__builtin_HEXAGON_S4_vxsubaddh", + "llvm.hexagon.S4.vxsubaddhr" => "__builtin_HEXAGON_S4_vxsubaddhr", + "llvm.hexagon.S4.vxsubaddw" => "__builtin_HEXAGON_S4_vxsubaddw", + "llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax" => "__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax", + "llvm.hexagon.S5.asrhub.sat" => "__builtin_HEXAGON_S5_asrhub_sat", + "llvm.hexagon.S5.popcountp" => "__builtin_HEXAGON_S5_popcountp", + "llvm.hexagon.S5.vasrhrnd.goodsyntax" => "__builtin_HEXAGON_S5_vasrhrnd_goodsyntax", + "llvm.hexagon.S6.rol.i.p" => "__builtin_HEXAGON_S6_rol_i_p", + "llvm.hexagon.S6.rol.i.p.acc" => "__builtin_HEXAGON_S6_rol_i_p_acc", + "llvm.hexagon.S6.rol.i.p.and" => "__builtin_HEXAGON_S6_rol_i_p_and", + "llvm.hexagon.S6.rol.i.p.nac" => "__builtin_HEXAGON_S6_rol_i_p_nac", + "llvm.hexagon.S6.rol.i.p.or" => "__builtin_HEXAGON_S6_rol_i_p_or", + "llvm.hexagon.S6.rol.i.p.xacc" => "__builtin_HEXAGON_S6_rol_i_p_xacc", + "llvm.hexagon.S6.rol.i.r" => "__builtin_HEXAGON_S6_rol_i_r", + "llvm.hexagon.S6.rol.i.r.acc" => "__builtin_HEXAGON_S6_rol_i_r_acc", + "llvm.hexagon.S6.rol.i.r.and" => "__builtin_HEXAGON_S6_rol_i_r_and", + "llvm.hexagon.S6.rol.i.r.nac" => "__builtin_HEXAGON_S6_rol_i_r_nac", + "llvm.hexagon.S6.rol.i.r.or" => "__builtin_HEXAGON_S6_rol_i_r_or", + "llvm.hexagon.S6.rol.i.r.xacc" => "__builtin_HEXAGON_S6_rol_i_r_xacc", + "llvm.hexagon.S6.vsplatrbp" => "__builtin_HEXAGON_S6_vsplatrbp", + "llvm.hexagon.S6.vtrunehb.ppp" => "__builtin_HEXAGON_S6_vtrunehb_ppp", + "llvm.hexagon.S6.vtrunohb.ppp" => "__builtin_HEXAGON_S6_vtrunohb_ppp", + "llvm.hexagon.SI.to.SXTHI.asrh" => "__builtin_SI_to_SXTHI_asrh", + "llvm.hexagon.V6.extractw" => "__builtin_HEXAGON_V6_extractw", + "llvm.hexagon.V6.extractw.128B" => "__builtin_HEXAGON_V6_extractw_128B", + "llvm.hexagon.V6.hi" => "__builtin_HEXAGON_V6_hi", + "llvm.hexagon.V6.hi.128B" => "__builtin_HEXAGON_V6_hi_128B", + "llvm.hexagon.V6.lo" => "__builtin_HEXAGON_V6_lo", + "llvm.hexagon.V6.lo.128B" => "__builtin_HEXAGON_V6_lo_128B", + "llvm.hexagon.V6.lvsplatb" => "__builtin_HEXAGON_V6_lvsplatb", + "llvm.hexagon.V6.lvsplatb.128B" => "__builtin_HEXAGON_V6_lvsplatb_128B", + "llvm.hexagon.V6.lvsplath" => "__builtin_HEXAGON_V6_lvsplath", + "llvm.hexagon.V6.lvsplath.128B" => "__builtin_HEXAGON_V6_lvsplath_128B", + "llvm.hexagon.V6.lvsplatw" => "__builtin_HEXAGON_V6_lvsplatw", + "llvm.hexagon.V6.lvsplatw.128B" => "__builtin_HEXAGON_V6_lvsplatw_128B", + "llvm.hexagon.V6.pred.and" => "__builtin_HEXAGON_V6_pred_and", + "llvm.hexagon.V6.pred.and.128B" => "__builtin_HEXAGON_V6_pred_and_128B", + "llvm.hexagon.V6.pred.and.n" => "__builtin_HEXAGON_V6_pred_and_n", + "llvm.hexagon.V6.pred.and.n.128B" => "__builtin_HEXAGON_V6_pred_and_n_128B", + "llvm.hexagon.V6.pred.not" => "__builtin_HEXAGON_V6_pred_not", + "llvm.hexagon.V6.pred.not.128B" => "__builtin_HEXAGON_V6_pred_not_128B", + "llvm.hexagon.V6.pred.or" => "__builtin_HEXAGON_V6_pred_or", + "llvm.hexagon.V6.pred.or.128B" => "__builtin_HEXAGON_V6_pred_or_128B", + "llvm.hexagon.V6.pred.or.n" => "__builtin_HEXAGON_V6_pred_or_n", + "llvm.hexagon.V6.pred.or.n.128B" => "__builtin_HEXAGON_V6_pred_or_n_128B", + "llvm.hexagon.V6.pred.scalar2" => "__builtin_HEXAGON_V6_pred_scalar2", + "llvm.hexagon.V6.pred.scalar2.128B" => "__builtin_HEXAGON_V6_pred_scalar2_128B", + "llvm.hexagon.V6.pred.scalar2v2" => "__builtin_HEXAGON_V6_pred_scalar2v2", + "llvm.hexagon.V6.pred.scalar2v2.128B" => "__builtin_HEXAGON_V6_pred_scalar2v2_128B", + "llvm.hexagon.V6.pred.xor" => "__builtin_HEXAGON_V6_pred_xor", + "llvm.hexagon.V6.pred.xor.128B" => "__builtin_HEXAGON_V6_pred_xor_128B", + "llvm.hexagon.V6.shuffeqh" => "__builtin_HEXAGON_V6_shuffeqh", + "llvm.hexagon.V6.shuffeqh.128B" => "__builtin_HEXAGON_V6_shuffeqh_128B", + "llvm.hexagon.V6.shuffeqw" => "__builtin_HEXAGON_V6_shuffeqw", + "llvm.hexagon.V6.shuffeqw.128B" => "__builtin_HEXAGON_V6_shuffeqw_128B", + "llvm.hexagon.V6.v6mpyhubs10" => "__builtin_HEXAGON_V6_v6mpyhubs10", + "llvm.hexagon.V6.v6mpyhubs10.128B" => "__builtin_HEXAGON_V6_v6mpyhubs10_128B", + "llvm.hexagon.V6.v6mpyhubs10.vxx" => "__builtin_HEXAGON_V6_v6mpyhubs10_vxx", + "llvm.hexagon.V6.v6mpyhubs10.vxx.128B" => "__builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B", + "llvm.hexagon.V6.v6mpyvubs10" => "__builtin_HEXAGON_V6_v6mpyvubs10", + "llvm.hexagon.V6.v6mpyvubs10.128B" => "__builtin_HEXAGON_V6_v6mpyvubs10_128B", + "llvm.hexagon.V6.v6mpyvubs10.vxx" => "__builtin_HEXAGON_V6_v6mpyvubs10_vxx", + "llvm.hexagon.V6.v6mpyvubs10.vxx.128B" => "__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B", + "llvm.hexagon.V6.vS32b.nqpred.ai" => "__builtin_HEXAGON_V6_vS32b_nqpred_ai", + "llvm.hexagon.V6.vS32b.nqpred.ai.128B" => "__builtin_HEXAGON_V6_vS32b_nqpred_ai_128B", + "llvm.hexagon.V6.vS32b.nt.nqpred.ai" => "__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai", + "llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B" => "__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai_128B", + "llvm.hexagon.V6.vS32b.nt.qpred.ai" => "__builtin_HEXAGON_V6_vS32b_nt_qpred_ai", + "llvm.hexagon.V6.vS32b.nt.qpred.ai.128B" => "__builtin_HEXAGON_V6_vS32b_nt_qpred_ai_128B", + "llvm.hexagon.V6.vS32b.qpred.ai" => "__builtin_HEXAGON_V6_vS32b_qpred_ai", + "llvm.hexagon.V6.vS32b.qpred.ai.128B" => "__builtin_HEXAGON_V6_vS32b_qpred_ai_128B", + "llvm.hexagon.V6.vabs.hf" => "__builtin_HEXAGON_V6_vabs_hf", + "llvm.hexagon.V6.vabs.hf.128B" => "__builtin_HEXAGON_V6_vabs_hf_128B", + "llvm.hexagon.V6.vabs.sf" => "__builtin_HEXAGON_V6_vabs_sf", + "llvm.hexagon.V6.vabs.sf.128B" => "__builtin_HEXAGON_V6_vabs_sf_128B", + "llvm.hexagon.V6.vabsb" => "__builtin_HEXAGON_V6_vabsb", + "llvm.hexagon.V6.vabsb.128B" => "__builtin_HEXAGON_V6_vabsb_128B", + "llvm.hexagon.V6.vabsb.sat" => "__builtin_HEXAGON_V6_vabsb_sat", + "llvm.hexagon.V6.vabsb.sat.128B" => "__builtin_HEXAGON_V6_vabsb_sat_128B", + "llvm.hexagon.V6.vabsdiffh" => "__builtin_HEXAGON_V6_vabsdiffh", + "llvm.hexagon.V6.vabsdiffh.128B" => "__builtin_HEXAGON_V6_vabsdiffh_128B", + "llvm.hexagon.V6.vabsdiffub" => "__builtin_HEXAGON_V6_vabsdiffub", + "llvm.hexagon.V6.vabsdiffub.128B" => "__builtin_HEXAGON_V6_vabsdiffub_128B", + "llvm.hexagon.V6.vabsdiffuh" => "__builtin_HEXAGON_V6_vabsdiffuh", + "llvm.hexagon.V6.vabsdiffuh.128B" => "__builtin_HEXAGON_V6_vabsdiffuh_128B", + "llvm.hexagon.V6.vabsdiffw" => "__builtin_HEXAGON_V6_vabsdiffw", + "llvm.hexagon.V6.vabsdiffw.128B" => "__builtin_HEXAGON_V6_vabsdiffw_128B", + "llvm.hexagon.V6.vabsh" => "__builtin_HEXAGON_V6_vabsh", + "llvm.hexagon.V6.vabsh.128B" => "__builtin_HEXAGON_V6_vabsh_128B", + "llvm.hexagon.V6.vabsh.sat" => "__builtin_HEXAGON_V6_vabsh_sat", + "llvm.hexagon.V6.vabsh.sat.128B" => "__builtin_HEXAGON_V6_vabsh_sat_128B", + "llvm.hexagon.V6.vabsw" => "__builtin_HEXAGON_V6_vabsw", + "llvm.hexagon.V6.vabsw.128B" => "__builtin_HEXAGON_V6_vabsw_128B", + "llvm.hexagon.V6.vabsw.sat" => "__builtin_HEXAGON_V6_vabsw_sat", + "llvm.hexagon.V6.vabsw.sat.128B" => "__builtin_HEXAGON_V6_vabsw_sat_128B", + "llvm.hexagon.V6.vadd.hf" => "__builtin_HEXAGON_V6_vadd_hf", + "llvm.hexagon.V6.vadd.hf.128B" => "__builtin_HEXAGON_V6_vadd_hf_128B", + "llvm.hexagon.V6.vadd.hf.hf" => "__builtin_HEXAGON_V6_vadd_hf_hf", + "llvm.hexagon.V6.vadd.hf.hf.128B" => "__builtin_HEXAGON_V6_vadd_hf_hf_128B", + "llvm.hexagon.V6.vadd.qf16" => "__builtin_HEXAGON_V6_vadd_qf16", + "llvm.hexagon.V6.vadd.qf16.128B" => "__builtin_HEXAGON_V6_vadd_qf16_128B", + "llvm.hexagon.V6.vadd.qf16.mix" => "__builtin_HEXAGON_V6_vadd_qf16_mix", + "llvm.hexagon.V6.vadd.qf16.mix.128B" => "__builtin_HEXAGON_V6_vadd_qf16_mix_128B", + "llvm.hexagon.V6.vadd.qf32" => "__builtin_HEXAGON_V6_vadd_qf32", + "llvm.hexagon.V6.vadd.qf32.128B" => "__builtin_HEXAGON_V6_vadd_qf32_128B", + "llvm.hexagon.V6.vadd.qf32.mix" => "__builtin_HEXAGON_V6_vadd_qf32_mix", + "llvm.hexagon.V6.vadd.qf32.mix.128B" => "__builtin_HEXAGON_V6_vadd_qf32_mix_128B", + "llvm.hexagon.V6.vadd.sf" => "__builtin_HEXAGON_V6_vadd_sf", + "llvm.hexagon.V6.vadd.sf.128B" => "__builtin_HEXAGON_V6_vadd_sf_128B", + "llvm.hexagon.V6.vadd.sf.bf" => "__builtin_HEXAGON_V6_vadd_sf_bf", + "llvm.hexagon.V6.vadd.sf.bf.128B" => "__builtin_HEXAGON_V6_vadd_sf_bf_128B", + "llvm.hexagon.V6.vadd.sf.hf" => "__builtin_HEXAGON_V6_vadd_sf_hf", + "llvm.hexagon.V6.vadd.sf.hf.128B" => "__builtin_HEXAGON_V6_vadd_sf_hf_128B", + "llvm.hexagon.V6.vadd.sf.sf" => "__builtin_HEXAGON_V6_vadd_sf_sf", + "llvm.hexagon.V6.vadd.sf.sf.128B" => "__builtin_HEXAGON_V6_vadd_sf_sf_128B", + "llvm.hexagon.V6.vaddb" => "__builtin_HEXAGON_V6_vaddb", + "llvm.hexagon.V6.vaddb.128B" => "__builtin_HEXAGON_V6_vaddb_128B", + "llvm.hexagon.V6.vaddb.dv" => "__builtin_HEXAGON_V6_vaddb_dv", + "llvm.hexagon.V6.vaddb.dv.128B" => "__builtin_HEXAGON_V6_vaddb_dv_128B", + "llvm.hexagon.V6.vaddbnq" => "__builtin_HEXAGON_V6_vaddbnq", + "llvm.hexagon.V6.vaddbnq.128B" => "__builtin_HEXAGON_V6_vaddbnq_128B", + "llvm.hexagon.V6.vaddbq" => "__builtin_HEXAGON_V6_vaddbq", + "llvm.hexagon.V6.vaddbq.128B" => "__builtin_HEXAGON_V6_vaddbq_128B", + "llvm.hexagon.V6.vaddbsat" => "__builtin_HEXAGON_V6_vaddbsat", + "llvm.hexagon.V6.vaddbsat.128B" => "__builtin_HEXAGON_V6_vaddbsat_128B", + "llvm.hexagon.V6.vaddbsat.dv" => "__builtin_HEXAGON_V6_vaddbsat_dv", + "llvm.hexagon.V6.vaddbsat.dv.128B" => "__builtin_HEXAGON_V6_vaddbsat_dv_128B", + "llvm.hexagon.V6.vaddcarrysat" => "__builtin_HEXAGON_V6_vaddcarrysat", + "llvm.hexagon.V6.vaddcarrysat.128B" => "__builtin_HEXAGON_V6_vaddcarrysat_128B", + "llvm.hexagon.V6.vaddclbh" => "__builtin_HEXAGON_V6_vaddclbh", + "llvm.hexagon.V6.vaddclbh.128B" => "__builtin_HEXAGON_V6_vaddclbh_128B", + "llvm.hexagon.V6.vaddclbw" => "__builtin_HEXAGON_V6_vaddclbw", + "llvm.hexagon.V6.vaddclbw.128B" => "__builtin_HEXAGON_V6_vaddclbw_128B", + "llvm.hexagon.V6.vaddh" => "__builtin_HEXAGON_V6_vaddh", + "llvm.hexagon.V6.vaddh.128B" => "__builtin_HEXAGON_V6_vaddh_128B", + "llvm.hexagon.V6.vaddh.dv" => "__builtin_HEXAGON_V6_vaddh_dv", + "llvm.hexagon.V6.vaddh.dv.128B" => "__builtin_HEXAGON_V6_vaddh_dv_128B", + "llvm.hexagon.V6.vaddhnq" => "__builtin_HEXAGON_V6_vaddhnq", + "llvm.hexagon.V6.vaddhnq.128B" => "__builtin_HEXAGON_V6_vaddhnq_128B", + "llvm.hexagon.V6.vaddhq" => "__builtin_HEXAGON_V6_vaddhq", + "llvm.hexagon.V6.vaddhq.128B" => "__builtin_HEXAGON_V6_vaddhq_128B", + "llvm.hexagon.V6.vaddhsat" => "__builtin_HEXAGON_V6_vaddhsat", + "llvm.hexagon.V6.vaddhsat.128B" => "__builtin_HEXAGON_V6_vaddhsat_128B", + "llvm.hexagon.V6.vaddhsat.dv" => "__builtin_HEXAGON_V6_vaddhsat_dv", + "llvm.hexagon.V6.vaddhsat.dv.128B" => "__builtin_HEXAGON_V6_vaddhsat_dv_128B", + "llvm.hexagon.V6.vaddhw" => "__builtin_HEXAGON_V6_vaddhw", + "llvm.hexagon.V6.vaddhw.128B" => "__builtin_HEXAGON_V6_vaddhw_128B", + "llvm.hexagon.V6.vaddhw.acc" => "__builtin_HEXAGON_V6_vaddhw_acc", + "llvm.hexagon.V6.vaddhw.acc.128B" => "__builtin_HEXAGON_V6_vaddhw_acc_128B", + "llvm.hexagon.V6.vaddubh" => "__builtin_HEXAGON_V6_vaddubh", + "llvm.hexagon.V6.vaddubh.128B" => "__builtin_HEXAGON_V6_vaddubh_128B", + "llvm.hexagon.V6.vaddubh.acc" => "__builtin_HEXAGON_V6_vaddubh_acc", + "llvm.hexagon.V6.vaddubh.acc.128B" => "__builtin_HEXAGON_V6_vaddubh_acc_128B", + "llvm.hexagon.V6.vaddubsat" => "__builtin_HEXAGON_V6_vaddubsat", + "llvm.hexagon.V6.vaddubsat.128B" => "__builtin_HEXAGON_V6_vaddubsat_128B", + "llvm.hexagon.V6.vaddubsat.dv" => "__builtin_HEXAGON_V6_vaddubsat_dv", + "llvm.hexagon.V6.vaddubsat.dv.128B" => "__builtin_HEXAGON_V6_vaddubsat_dv_128B", + "llvm.hexagon.V6.vaddububb.sat" => "__builtin_HEXAGON_V6_vaddububb_sat", + "llvm.hexagon.V6.vaddububb.sat.128B" => "__builtin_HEXAGON_V6_vaddububb_sat_128B", + "llvm.hexagon.V6.vadduhsat" => "__builtin_HEXAGON_V6_vadduhsat", + "llvm.hexagon.V6.vadduhsat.128B" => "__builtin_HEXAGON_V6_vadduhsat_128B", + "llvm.hexagon.V6.vadduhsat.dv" => "__builtin_HEXAGON_V6_vadduhsat_dv", + "llvm.hexagon.V6.vadduhsat.dv.128B" => "__builtin_HEXAGON_V6_vadduhsat_dv_128B", + "llvm.hexagon.V6.vadduhw" => "__builtin_HEXAGON_V6_vadduhw", + "llvm.hexagon.V6.vadduhw.128B" => "__builtin_HEXAGON_V6_vadduhw_128B", + "llvm.hexagon.V6.vadduhw.acc" => "__builtin_HEXAGON_V6_vadduhw_acc", + "llvm.hexagon.V6.vadduhw.acc.128B" => "__builtin_HEXAGON_V6_vadduhw_acc_128B", + "llvm.hexagon.V6.vadduwsat" => "__builtin_HEXAGON_V6_vadduwsat", + "llvm.hexagon.V6.vadduwsat.128B" => "__builtin_HEXAGON_V6_vadduwsat_128B", + "llvm.hexagon.V6.vadduwsat.dv" => "__builtin_HEXAGON_V6_vadduwsat_dv", + "llvm.hexagon.V6.vadduwsat.dv.128B" => "__builtin_HEXAGON_V6_vadduwsat_dv_128B", + "llvm.hexagon.V6.vaddw" => "__builtin_HEXAGON_V6_vaddw", + "llvm.hexagon.V6.vaddw.128B" => "__builtin_HEXAGON_V6_vaddw_128B", + "llvm.hexagon.V6.vaddw.dv" => "__builtin_HEXAGON_V6_vaddw_dv", + "llvm.hexagon.V6.vaddw.dv.128B" => "__builtin_HEXAGON_V6_vaddw_dv_128B", + "llvm.hexagon.V6.vaddwnq" => "__builtin_HEXAGON_V6_vaddwnq", + "llvm.hexagon.V6.vaddwnq.128B" => "__builtin_HEXAGON_V6_vaddwnq_128B", + "llvm.hexagon.V6.vaddwq" => "__builtin_HEXAGON_V6_vaddwq", + "llvm.hexagon.V6.vaddwq.128B" => "__builtin_HEXAGON_V6_vaddwq_128B", + "llvm.hexagon.V6.vaddwsat" => "__builtin_HEXAGON_V6_vaddwsat", + "llvm.hexagon.V6.vaddwsat.128B" => "__builtin_HEXAGON_V6_vaddwsat_128B", + "llvm.hexagon.V6.vaddwsat.dv" => "__builtin_HEXAGON_V6_vaddwsat_dv", + "llvm.hexagon.V6.vaddwsat.dv.128B" => "__builtin_HEXAGON_V6_vaddwsat_dv_128B", + "llvm.hexagon.V6.valignb" => "__builtin_HEXAGON_V6_valignb", + "llvm.hexagon.V6.valignb.128B" => "__builtin_HEXAGON_V6_valignb_128B", + "llvm.hexagon.V6.valignbi" => "__builtin_HEXAGON_V6_valignbi", + "llvm.hexagon.V6.valignbi.128B" => "__builtin_HEXAGON_V6_valignbi_128B", + "llvm.hexagon.V6.vand" => "__builtin_HEXAGON_V6_vand", + "llvm.hexagon.V6.vand.128B" => "__builtin_HEXAGON_V6_vand_128B", + "llvm.hexagon.V6.vandnqrt" => "__builtin_HEXAGON_V6_vandnqrt", + "llvm.hexagon.V6.vandnqrt.128B" => "__builtin_HEXAGON_V6_vandnqrt_128B", + "llvm.hexagon.V6.vandnqrt.acc" => "__builtin_HEXAGON_V6_vandnqrt_acc", + "llvm.hexagon.V6.vandnqrt.acc.128B" => "__builtin_HEXAGON_V6_vandnqrt_acc_128B", + "llvm.hexagon.V6.vandqrt" => "__builtin_HEXAGON_V6_vandqrt", + "llvm.hexagon.V6.vandqrt.128B" => "__builtin_HEXAGON_V6_vandqrt_128B", + "llvm.hexagon.V6.vandqrt.acc" => "__builtin_HEXAGON_V6_vandqrt_acc", + "llvm.hexagon.V6.vandqrt.acc.128B" => "__builtin_HEXAGON_V6_vandqrt_acc_128B", + "llvm.hexagon.V6.vandvnqv" => "__builtin_HEXAGON_V6_vandvnqv", + "llvm.hexagon.V6.vandvnqv.128B" => "__builtin_HEXAGON_V6_vandvnqv_128B", + "llvm.hexagon.V6.vandvqv" => "__builtin_HEXAGON_V6_vandvqv", + "llvm.hexagon.V6.vandvqv.128B" => "__builtin_HEXAGON_V6_vandvqv_128B", + "llvm.hexagon.V6.vandvrt" => "__builtin_HEXAGON_V6_vandvrt", + "llvm.hexagon.V6.vandvrt.128B" => "__builtin_HEXAGON_V6_vandvrt_128B", + "llvm.hexagon.V6.vandvrt.acc" => "__builtin_HEXAGON_V6_vandvrt_acc", + "llvm.hexagon.V6.vandvrt.acc.128B" => "__builtin_HEXAGON_V6_vandvrt_acc_128B", + "llvm.hexagon.V6.vaslh" => "__builtin_HEXAGON_V6_vaslh", + "llvm.hexagon.V6.vaslh.128B" => "__builtin_HEXAGON_V6_vaslh_128B", + "llvm.hexagon.V6.vaslh.acc" => "__builtin_HEXAGON_V6_vaslh_acc", + "llvm.hexagon.V6.vaslh.acc.128B" => "__builtin_HEXAGON_V6_vaslh_acc_128B", + "llvm.hexagon.V6.vaslhv" => "__builtin_HEXAGON_V6_vaslhv", + "llvm.hexagon.V6.vaslhv.128B" => "__builtin_HEXAGON_V6_vaslhv_128B", + "llvm.hexagon.V6.vaslw" => "__builtin_HEXAGON_V6_vaslw", + "llvm.hexagon.V6.vaslw.128B" => "__builtin_HEXAGON_V6_vaslw_128B", + "llvm.hexagon.V6.vaslw.acc" => "__builtin_HEXAGON_V6_vaslw_acc", + "llvm.hexagon.V6.vaslw.acc.128B" => "__builtin_HEXAGON_V6_vaslw_acc_128B", + "llvm.hexagon.V6.vaslwv" => "__builtin_HEXAGON_V6_vaslwv", + "llvm.hexagon.V6.vaslwv.128B" => "__builtin_HEXAGON_V6_vaslwv_128B", + "llvm.hexagon.V6.vasr.into" => "__builtin_HEXAGON_V6_vasr_into", + "llvm.hexagon.V6.vasr.into.128B" => "__builtin_HEXAGON_V6_vasr_into_128B", + "llvm.hexagon.V6.vasrh" => "__builtin_HEXAGON_V6_vasrh", + "llvm.hexagon.V6.vasrh.128B" => "__builtin_HEXAGON_V6_vasrh_128B", + "llvm.hexagon.V6.vasrh.acc" => "__builtin_HEXAGON_V6_vasrh_acc", + "llvm.hexagon.V6.vasrh.acc.128B" => "__builtin_HEXAGON_V6_vasrh_acc_128B", + "llvm.hexagon.V6.vasrhbrndsat" => "__builtin_HEXAGON_V6_vasrhbrndsat", + "llvm.hexagon.V6.vasrhbrndsat.128B" => "__builtin_HEXAGON_V6_vasrhbrndsat_128B", + "llvm.hexagon.V6.vasrhbsat" => "__builtin_HEXAGON_V6_vasrhbsat", + "llvm.hexagon.V6.vasrhbsat.128B" => "__builtin_HEXAGON_V6_vasrhbsat_128B", + "llvm.hexagon.V6.vasrhubrndsat" => "__builtin_HEXAGON_V6_vasrhubrndsat", + "llvm.hexagon.V6.vasrhubrndsat.128B" => "__builtin_HEXAGON_V6_vasrhubrndsat_128B", + "llvm.hexagon.V6.vasrhubsat" => "__builtin_HEXAGON_V6_vasrhubsat", + "llvm.hexagon.V6.vasrhubsat.128B" => "__builtin_HEXAGON_V6_vasrhubsat_128B", + "llvm.hexagon.V6.vasrhv" => "__builtin_HEXAGON_V6_vasrhv", + "llvm.hexagon.V6.vasrhv.128B" => "__builtin_HEXAGON_V6_vasrhv_128B", + "llvm.hexagon.V6.vasruhubrndsat" => "__builtin_HEXAGON_V6_vasruhubrndsat", + "llvm.hexagon.V6.vasruhubrndsat.128B" => "__builtin_HEXAGON_V6_vasruhubrndsat_128B", + "llvm.hexagon.V6.vasruhubsat" => "__builtin_HEXAGON_V6_vasruhubsat", + "llvm.hexagon.V6.vasruhubsat.128B" => "__builtin_HEXAGON_V6_vasruhubsat_128B", + "llvm.hexagon.V6.vasruwuhrndsat" => "__builtin_HEXAGON_V6_vasruwuhrndsat", + "llvm.hexagon.V6.vasruwuhrndsat.128B" => "__builtin_HEXAGON_V6_vasruwuhrndsat_128B", + "llvm.hexagon.V6.vasruwuhsat" => "__builtin_HEXAGON_V6_vasruwuhsat", + "llvm.hexagon.V6.vasruwuhsat.128B" => "__builtin_HEXAGON_V6_vasruwuhsat_128B", + "llvm.hexagon.V6.vasrvuhubrndsat" => "__builtin_HEXAGON_V6_vasrvuhubrndsat", + "llvm.hexagon.V6.vasrvuhubrndsat.128B" => "__builtin_HEXAGON_V6_vasrvuhubrndsat_128B", + "llvm.hexagon.V6.vasrvuhubsat" => "__builtin_HEXAGON_V6_vasrvuhubsat", + "llvm.hexagon.V6.vasrvuhubsat.128B" => "__builtin_HEXAGON_V6_vasrvuhubsat_128B", + "llvm.hexagon.V6.vasrvwuhrndsat" => "__builtin_HEXAGON_V6_vasrvwuhrndsat", + "llvm.hexagon.V6.vasrvwuhrndsat.128B" => "__builtin_HEXAGON_V6_vasrvwuhrndsat_128B", + "llvm.hexagon.V6.vasrvwuhsat" => "__builtin_HEXAGON_V6_vasrvwuhsat", + "llvm.hexagon.V6.vasrvwuhsat.128B" => "__builtin_HEXAGON_V6_vasrvwuhsat_128B", + "llvm.hexagon.V6.vasrw" => "__builtin_HEXAGON_V6_vasrw", + "llvm.hexagon.V6.vasrw.128B" => "__builtin_HEXAGON_V6_vasrw_128B", + "llvm.hexagon.V6.vasrw.acc" => "__builtin_HEXAGON_V6_vasrw_acc", + "llvm.hexagon.V6.vasrw.acc.128B" => "__builtin_HEXAGON_V6_vasrw_acc_128B", + "llvm.hexagon.V6.vasrwh" => "__builtin_HEXAGON_V6_vasrwh", + "llvm.hexagon.V6.vasrwh.128B" => "__builtin_HEXAGON_V6_vasrwh_128B", + "llvm.hexagon.V6.vasrwhrndsat" => "__builtin_HEXAGON_V6_vasrwhrndsat", + "llvm.hexagon.V6.vasrwhrndsat.128B" => "__builtin_HEXAGON_V6_vasrwhrndsat_128B", + "llvm.hexagon.V6.vasrwhsat" => "__builtin_HEXAGON_V6_vasrwhsat", + "llvm.hexagon.V6.vasrwhsat.128B" => "__builtin_HEXAGON_V6_vasrwhsat_128B", + "llvm.hexagon.V6.vasrwuhrndsat" => "__builtin_HEXAGON_V6_vasrwuhrndsat", + "llvm.hexagon.V6.vasrwuhrndsat.128B" => "__builtin_HEXAGON_V6_vasrwuhrndsat_128B", + "llvm.hexagon.V6.vasrwuhsat" => "__builtin_HEXAGON_V6_vasrwuhsat", + "llvm.hexagon.V6.vasrwuhsat.128B" => "__builtin_HEXAGON_V6_vasrwuhsat_128B", + "llvm.hexagon.V6.vasrwv" => "__builtin_HEXAGON_V6_vasrwv", + "llvm.hexagon.V6.vasrwv.128B" => "__builtin_HEXAGON_V6_vasrwv_128B", + "llvm.hexagon.V6.vassign" => "__builtin_HEXAGON_V6_vassign", + "llvm.hexagon.V6.vassign.128B" => "__builtin_HEXAGON_V6_vassign_128B", + "llvm.hexagon.V6.vassign.fp" => "__builtin_HEXAGON_V6_vassign_fp", + "llvm.hexagon.V6.vassign.fp.128B" => "__builtin_HEXAGON_V6_vassign_fp_128B", + "llvm.hexagon.V6.vassignp" => "__builtin_HEXAGON_V6_vassignp", + "llvm.hexagon.V6.vassignp.128B" => "__builtin_HEXAGON_V6_vassignp_128B", + "llvm.hexagon.V6.vavgb" => "__builtin_HEXAGON_V6_vavgb", + "llvm.hexagon.V6.vavgb.128B" => "__builtin_HEXAGON_V6_vavgb_128B", + "llvm.hexagon.V6.vavgbrnd" => "__builtin_HEXAGON_V6_vavgbrnd", + "llvm.hexagon.V6.vavgbrnd.128B" => "__builtin_HEXAGON_V6_vavgbrnd_128B", + "llvm.hexagon.V6.vavgh" => "__builtin_HEXAGON_V6_vavgh", + "llvm.hexagon.V6.vavgh.128B" => "__builtin_HEXAGON_V6_vavgh_128B", + "llvm.hexagon.V6.vavghrnd" => "__builtin_HEXAGON_V6_vavghrnd", + "llvm.hexagon.V6.vavghrnd.128B" => "__builtin_HEXAGON_V6_vavghrnd_128B", + "llvm.hexagon.V6.vavgub" => "__builtin_HEXAGON_V6_vavgub", + "llvm.hexagon.V6.vavgub.128B" => "__builtin_HEXAGON_V6_vavgub_128B", + "llvm.hexagon.V6.vavgubrnd" => "__builtin_HEXAGON_V6_vavgubrnd", + "llvm.hexagon.V6.vavgubrnd.128B" => "__builtin_HEXAGON_V6_vavgubrnd_128B", + "llvm.hexagon.V6.vavguh" => "__builtin_HEXAGON_V6_vavguh", + "llvm.hexagon.V6.vavguh.128B" => "__builtin_HEXAGON_V6_vavguh_128B", + "llvm.hexagon.V6.vavguhrnd" => "__builtin_HEXAGON_V6_vavguhrnd", + "llvm.hexagon.V6.vavguhrnd.128B" => "__builtin_HEXAGON_V6_vavguhrnd_128B", + "llvm.hexagon.V6.vavguw" => "__builtin_HEXAGON_V6_vavguw", + "llvm.hexagon.V6.vavguw.128B" => "__builtin_HEXAGON_V6_vavguw_128B", + "llvm.hexagon.V6.vavguwrnd" => "__builtin_HEXAGON_V6_vavguwrnd", + "llvm.hexagon.V6.vavguwrnd.128B" => "__builtin_HEXAGON_V6_vavguwrnd_128B", + "llvm.hexagon.V6.vavgw" => "__builtin_HEXAGON_V6_vavgw", + "llvm.hexagon.V6.vavgw.128B" => "__builtin_HEXAGON_V6_vavgw_128B", + "llvm.hexagon.V6.vavgwrnd" => "__builtin_HEXAGON_V6_vavgwrnd", + "llvm.hexagon.V6.vavgwrnd.128B" => "__builtin_HEXAGON_V6_vavgwrnd_128B", + "llvm.hexagon.V6.vcl0h" => "__builtin_HEXAGON_V6_vcl0h", + "llvm.hexagon.V6.vcl0h.128B" => "__builtin_HEXAGON_V6_vcl0h_128B", + "llvm.hexagon.V6.vcl0w" => "__builtin_HEXAGON_V6_vcl0w", + "llvm.hexagon.V6.vcl0w.128B" => "__builtin_HEXAGON_V6_vcl0w_128B", + "llvm.hexagon.V6.vcombine" => "__builtin_HEXAGON_V6_vcombine", + "llvm.hexagon.V6.vcombine.128B" => "__builtin_HEXAGON_V6_vcombine_128B", + "llvm.hexagon.V6.vconv.h.hf" => "__builtin_HEXAGON_V6_vconv_h_hf", + "llvm.hexagon.V6.vconv.h.hf.128B" => "__builtin_HEXAGON_V6_vconv_h_hf_128B", + "llvm.hexagon.V6.vconv.hf.h" => "__builtin_HEXAGON_V6_vconv_hf_h", + "llvm.hexagon.V6.vconv.hf.h.128B" => "__builtin_HEXAGON_V6_vconv_hf_h_128B", + "llvm.hexagon.V6.vconv.hf.qf16" => "__builtin_HEXAGON_V6_vconv_hf_qf16", + "llvm.hexagon.V6.vconv.hf.qf16.128B" => "__builtin_HEXAGON_V6_vconv_hf_qf16_128B", + "llvm.hexagon.V6.vconv.hf.qf32" => "__builtin_HEXAGON_V6_vconv_hf_qf32", + "llvm.hexagon.V6.vconv.hf.qf32.128B" => "__builtin_HEXAGON_V6_vconv_hf_qf32_128B", + "llvm.hexagon.V6.vconv.sf.qf32" => "__builtin_HEXAGON_V6_vconv_sf_qf32", + "llvm.hexagon.V6.vconv.sf.qf32.128B" => "__builtin_HEXAGON_V6_vconv_sf_qf32_128B", + "llvm.hexagon.V6.vconv.sf.w" => "__builtin_HEXAGON_V6_vconv_sf_w", + "llvm.hexagon.V6.vconv.sf.w.128B" => "__builtin_HEXAGON_V6_vconv_sf_w_128B", + "llvm.hexagon.V6.vconv.w.sf" => "__builtin_HEXAGON_V6_vconv_w_sf", + "llvm.hexagon.V6.vconv.w.sf.128B" => "__builtin_HEXAGON_V6_vconv_w_sf_128B", + "llvm.hexagon.V6.vcvt.b.hf" => "__builtin_HEXAGON_V6_vcvt_b_hf", + "llvm.hexagon.V6.vcvt.b.hf.128B" => "__builtin_HEXAGON_V6_vcvt_b_hf_128B", + "llvm.hexagon.V6.vcvt.bf.sf" => "__builtin_HEXAGON_V6_vcvt_bf_sf", + "llvm.hexagon.V6.vcvt.bf.sf.128B" => "__builtin_HEXAGON_V6_vcvt_bf_sf_128B", + "llvm.hexagon.V6.vcvt.h.hf" => "__builtin_HEXAGON_V6_vcvt_h_hf", + "llvm.hexagon.V6.vcvt.h.hf.128B" => "__builtin_HEXAGON_V6_vcvt_h_hf_128B", + "llvm.hexagon.V6.vcvt.hf.b" => "__builtin_HEXAGON_V6_vcvt_hf_b", + "llvm.hexagon.V6.vcvt.hf.b.128B" => "__builtin_HEXAGON_V6_vcvt_hf_b_128B", + "llvm.hexagon.V6.vcvt.hf.h" => "__builtin_HEXAGON_V6_vcvt_hf_h", + "llvm.hexagon.V6.vcvt.hf.h.128B" => "__builtin_HEXAGON_V6_vcvt_hf_h_128B", + "llvm.hexagon.V6.vcvt.hf.sf" => "__builtin_HEXAGON_V6_vcvt_hf_sf", + "llvm.hexagon.V6.vcvt.hf.sf.128B" => "__builtin_HEXAGON_V6_vcvt_hf_sf_128B", + "llvm.hexagon.V6.vcvt.hf.ub" => "__builtin_HEXAGON_V6_vcvt_hf_ub", + "llvm.hexagon.V6.vcvt.hf.ub.128B" => "__builtin_HEXAGON_V6_vcvt_hf_ub_128B", + "llvm.hexagon.V6.vcvt.hf.uh" => "__builtin_HEXAGON_V6_vcvt_hf_uh", + "llvm.hexagon.V6.vcvt.hf.uh.128B" => "__builtin_HEXAGON_V6_vcvt_hf_uh_128B", + "llvm.hexagon.V6.vcvt.sf.hf" => "__builtin_HEXAGON_V6_vcvt_sf_hf", + "llvm.hexagon.V6.vcvt.sf.hf.128B" => "__builtin_HEXAGON_V6_vcvt_sf_hf_128B", + "llvm.hexagon.V6.vcvt.ub.hf" => "__builtin_HEXAGON_V6_vcvt_ub_hf", + "llvm.hexagon.V6.vcvt.ub.hf.128B" => "__builtin_HEXAGON_V6_vcvt_ub_hf_128B", + "llvm.hexagon.V6.vcvt.uh.hf" => "__builtin_HEXAGON_V6_vcvt_uh_hf", + "llvm.hexagon.V6.vcvt.uh.hf.128B" => "__builtin_HEXAGON_V6_vcvt_uh_hf_128B", + "llvm.hexagon.V6.vd0" => "__builtin_HEXAGON_V6_vd0", + "llvm.hexagon.V6.vd0.128B" => "__builtin_HEXAGON_V6_vd0_128B", + "llvm.hexagon.V6.vdd0" => "__builtin_HEXAGON_V6_vdd0", + "llvm.hexagon.V6.vdd0.128B" => "__builtin_HEXAGON_V6_vdd0_128B", + "llvm.hexagon.V6.vdealb" => "__builtin_HEXAGON_V6_vdealb", + "llvm.hexagon.V6.vdealb.128B" => "__builtin_HEXAGON_V6_vdealb_128B", + "llvm.hexagon.V6.vdealb4w" => "__builtin_HEXAGON_V6_vdealb4w", + "llvm.hexagon.V6.vdealb4w.128B" => "__builtin_HEXAGON_V6_vdealb4w_128B", + "llvm.hexagon.V6.vdealh" => "__builtin_HEXAGON_V6_vdealh", + "llvm.hexagon.V6.vdealh.128B" => "__builtin_HEXAGON_V6_vdealh_128B", + "llvm.hexagon.V6.vdealvdd" => "__builtin_HEXAGON_V6_vdealvdd", + "llvm.hexagon.V6.vdealvdd.128B" => "__builtin_HEXAGON_V6_vdealvdd_128B", + "llvm.hexagon.V6.vdelta" => "__builtin_HEXAGON_V6_vdelta", + "llvm.hexagon.V6.vdelta.128B" => "__builtin_HEXAGON_V6_vdelta_128B", + "llvm.hexagon.V6.vdmpy.sf.hf" => "__builtin_HEXAGON_V6_vdmpy_sf_hf", + "llvm.hexagon.V6.vdmpy.sf.hf.128B" => "__builtin_HEXAGON_V6_vdmpy_sf_hf_128B", + "llvm.hexagon.V6.vdmpy.sf.hf.acc" => "__builtin_HEXAGON_V6_vdmpy_sf_hf_acc", + "llvm.hexagon.V6.vdmpy.sf.hf.acc.128B" => "__builtin_HEXAGON_V6_vdmpy_sf_hf_acc_128B", + "llvm.hexagon.V6.vdmpybus" => "__builtin_HEXAGON_V6_vdmpybus", + "llvm.hexagon.V6.vdmpybus.128B" => "__builtin_HEXAGON_V6_vdmpybus_128B", + "llvm.hexagon.V6.vdmpybus.acc" => "__builtin_HEXAGON_V6_vdmpybus_acc", + "llvm.hexagon.V6.vdmpybus.acc.128B" => "__builtin_HEXAGON_V6_vdmpybus_acc_128B", + "llvm.hexagon.V6.vdmpybus.dv" => "__builtin_HEXAGON_V6_vdmpybus_dv", + "llvm.hexagon.V6.vdmpybus.dv.128B" => "__builtin_HEXAGON_V6_vdmpybus_dv_128B", + "llvm.hexagon.V6.vdmpybus.dv.acc" => "__builtin_HEXAGON_V6_vdmpybus_dv_acc", + "llvm.hexagon.V6.vdmpybus.dv.acc.128B" => "__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B", + "llvm.hexagon.V6.vdmpyhb" => "__builtin_HEXAGON_V6_vdmpyhb", + "llvm.hexagon.V6.vdmpyhb.128B" => "__builtin_HEXAGON_V6_vdmpyhb_128B", + "llvm.hexagon.V6.vdmpyhb.acc" => "__builtin_HEXAGON_V6_vdmpyhb_acc", + "llvm.hexagon.V6.vdmpyhb.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhb_acc_128B", + "llvm.hexagon.V6.vdmpyhb.dv" => "__builtin_HEXAGON_V6_vdmpyhb_dv", + "llvm.hexagon.V6.vdmpyhb.dv.128B" => "__builtin_HEXAGON_V6_vdmpyhb_dv_128B", + "llvm.hexagon.V6.vdmpyhb.dv.acc" => "__builtin_HEXAGON_V6_vdmpyhb_dv_acc", + "llvm.hexagon.V6.vdmpyhb.dv.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B", + "llvm.hexagon.V6.vdmpyhisat" => "__builtin_HEXAGON_V6_vdmpyhisat", + "llvm.hexagon.V6.vdmpyhisat.128B" => "__builtin_HEXAGON_V6_vdmpyhisat_128B", + "llvm.hexagon.V6.vdmpyhisat.acc" => "__builtin_HEXAGON_V6_vdmpyhisat_acc", + "llvm.hexagon.V6.vdmpyhisat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhisat_acc_128B", + "llvm.hexagon.V6.vdmpyhsat" => "__builtin_HEXAGON_V6_vdmpyhsat", + "llvm.hexagon.V6.vdmpyhsat.128B" => "__builtin_HEXAGON_V6_vdmpyhsat_128B", + "llvm.hexagon.V6.vdmpyhsat.acc" => "__builtin_HEXAGON_V6_vdmpyhsat_acc", + "llvm.hexagon.V6.vdmpyhsat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhsat_acc_128B", + "llvm.hexagon.V6.vdmpyhsuisat" => "__builtin_HEXAGON_V6_vdmpyhsuisat", + "llvm.hexagon.V6.vdmpyhsuisat.128B" => "__builtin_HEXAGON_V6_vdmpyhsuisat_128B", + "llvm.hexagon.V6.vdmpyhsuisat.acc" => "__builtin_HEXAGON_V6_vdmpyhsuisat_acc", + "llvm.hexagon.V6.vdmpyhsuisat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B", + "llvm.hexagon.V6.vdmpyhsusat" => "__builtin_HEXAGON_V6_vdmpyhsusat", + "llvm.hexagon.V6.vdmpyhsusat.128B" => "__builtin_HEXAGON_V6_vdmpyhsusat_128B", + "llvm.hexagon.V6.vdmpyhsusat.acc" => "__builtin_HEXAGON_V6_vdmpyhsusat_acc", + "llvm.hexagon.V6.vdmpyhsusat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B", + "llvm.hexagon.V6.vdmpyhvsat" => "__builtin_HEXAGON_V6_vdmpyhvsat", + "llvm.hexagon.V6.vdmpyhvsat.128B" => "__builtin_HEXAGON_V6_vdmpyhvsat_128B", + "llvm.hexagon.V6.vdmpyhvsat.acc" => "__builtin_HEXAGON_V6_vdmpyhvsat_acc", + "llvm.hexagon.V6.vdmpyhvsat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B", + "llvm.hexagon.V6.vdsaduh" => "__builtin_HEXAGON_V6_vdsaduh", + "llvm.hexagon.V6.vdsaduh.128B" => "__builtin_HEXAGON_V6_vdsaduh_128B", + "llvm.hexagon.V6.vdsaduh.acc" => "__builtin_HEXAGON_V6_vdsaduh_acc", + "llvm.hexagon.V6.vdsaduh.acc.128B" => "__builtin_HEXAGON_V6_vdsaduh_acc_128B", + "llvm.hexagon.V6.veqb" => "__builtin_HEXAGON_V6_veqb", + "llvm.hexagon.V6.veqb.128B" => "__builtin_HEXAGON_V6_veqb_128B", + "llvm.hexagon.V6.veqb.and" => "__builtin_HEXAGON_V6_veqb_and", + "llvm.hexagon.V6.veqb.and.128B" => "__builtin_HEXAGON_V6_veqb_and_128B", + "llvm.hexagon.V6.veqb.or" => "__builtin_HEXAGON_V6_veqb_or", + "llvm.hexagon.V6.veqb.or.128B" => "__builtin_HEXAGON_V6_veqb_or_128B", + "llvm.hexagon.V6.veqb.xor" => "__builtin_HEXAGON_V6_veqb_xor", + "llvm.hexagon.V6.veqb.xor.128B" => "__builtin_HEXAGON_V6_veqb_xor_128B", + "llvm.hexagon.V6.veqh" => "__builtin_HEXAGON_V6_veqh", + "llvm.hexagon.V6.veqh.128B" => "__builtin_HEXAGON_V6_veqh_128B", + "llvm.hexagon.V6.veqh.and" => "__builtin_HEXAGON_V6_veqh_and", + "llvm.hexagon.V6.veqh.and.128B" => "__builtin_HEXAGON_V6_veqh_and_128B", + "llvm.hexagon.V6.veqh.or" => "__builtin_HEXAGON_V6_veqh_or", + "llvm.hexagon.V6.veqh.or.128B" => "__builtin_HEXAGON_V6_veqh_or_128B", + "llvm.hexagon.V6.veqh.xor" => "__builtin_HEXAGON_V6_veqh_xor", + "llvm.hexagon.V6.veqh.xor.128B" => "__builtin_HEXAGON_V6_veqh_xor_128B", + "llvm.hexagon.V6.veqw" => "__builtin_HEXAGON_V6_veqw", + "llvm.hexagon.V6.veqw.128B" => "__builtin_HEXAGON_V6_veqw_128B", + "llvm.hexagon.V6.veqw.and" => "__builtin_HEXAGON_V6_veqw_and", + "llvm.hexagon.V6.veqw.and.128B" => "__builtin_HEXAGON_V6_veqw_and_128B", + "llvm.hexagon.V6.veqw.or" => "__builtin_HEXAGON_V6_veqw_or", + "llvm.hexagon.V6.veqw.or.128B" => "__builtin_HEXAGON_V6_veqw_or_128B", + "llvm.hexagon.V6.veqw.xor" => "__builtin_HEXAGON_V6_veqw_xor", + "llvm.hexagon.V6.veqw.xor.128B" => "__builtin_HEXAGON_V6_veqw_xor_128B", + "llvm.hexagon.V6.vfmax.hf" => "__builtin_HEXAGON_V6_vfmax_hf", + "llvm.hexagon.V6.vfmax.hf.128B" => "__builtin_HEXAGON_V6_vfmax_hf_128B", + "llvm.hexagon.V6.vfmax.sf" => "__builtin_HEXAGON_V6_vfmax_sf", + "llvm.hexagon.V6.vfmax.sf.128B" => "__builtin_HEXAGON_V6_vfmax_sf_128B", + "llvm.hexagon.V6.vfmin.hf" => "__builtin_HEXAGON_V6_vfmin_hf", + "llvm.hexagon.V6.vfmin.hf.128B" => "__builtin_HEXAGON_V6_vfmin_hf_128B", + "llvm.hexagon.V6.vfmin.sf" => "__builtin_HEXAGON_V6_vfmin_sf", + "llvm.hexagon.V6.vfmin.sf.128B" => "__builtin_HEXAGON_V6_vfmin_sf_128B", + "llvm.hexagon.V6.vfneg.hf" => "__builtin_HEXAGON_V6_vfneg_hf", + "llvm.hexagon.V6.vfneg.hf.128B" => "__builtin_HEXAGON_V6_vfneg_hf_128B", + "llvm.hexagon.V6.vfneg.sf" => "__builtin_HEXAGON_V6_vfneg_sf", + "llvm.hexagon.V6.vfneg.sf.128B" => "__builtin_HEXAGON_V6_vfneg_sf_128B", + "llvm.hexagon.V6.vgathermh" => "__builtin_HEXAGON_V6_vgathermh", + "llvm.hexagon.V6.vgathermh.128B" => "__builtin_HEXAGON_V6_vgathermh_128B", + "llvm.hexagon.V6.vgathermhq" => "__builtin_HEXAGON_V6_vgathermhq", + "llvm.hexagon.V6.vgathermhq.128B" => "__builtin_HEXAGON_V6_vgathermhq_128B", + "llvm.hexagon.V6.vgathermhw" => "__builtin_HEXAGON_V6_vgathermhw", + "llvm.hexagon.V6.vgathermhw.128B" => "__builtin_HEXAGON_V6_vgathermhw_128B", + "llvm.hexagon.V6.vgathermhwq" => "__builtin_HEXAGON_V6_vgathermhwq", + "llvm.hexagon.V6.vgathermhwq.128B" => "__builtin_HEXAGON_V6_vgathermhwq_128B", + "llvm.hexagon.V6.vgathermw" => "__builtin_HEXAGON_V6_vgathermw", + "llvm.hexagon.V6.vgathermw.128B" => "__builtin_HEXAGON_V6_vgathermw_128B", + "llvm.hexagon.V6.vgathermwq" => "__builtin_HEXAGON_V6_vgathermwq", + "llvm.hexagon.V6.vgathermwq.128B" => "__builtin_HEXAGON_V6_vgathermwq_128B", + "llvm.hexagon.V6.vgtb" => "__builtin_HEXAGON_V6_vgtb", + "llvm.hexagon.V6.vgtb.128B" => "__builtin_HEXAGON_V6_vgtb_128B", + "llvm.hexagon.V6.vgtb.and" => "__builtin_HEXAGON_V6_vgtb_and", + "llvm.hexagon.V6.vgtb.and.128B" => "__builtin_HEXAGON_V6_vgtb_and_128B", + "llvm.hexagon.V6.vgtb.or" => "__builtin_HEXAGON_V6_vgtb_or", + "llvm.hexagon.V6.vgtb.or.128B" => "__builtin_HEXAGON_V6_vgtb_or_128B", + "llvm.hexagon.V6.vgtb.xor" => "__builtin_HEXAGON_V6_vgtb_xor", + "llvm.hexagon.V6.vgtb.xor.128B" => "__builtin_HEXAGON_V6_vgtb_xor_128B", + "llvm.hexagon.V6.vgtbf" => "__builtin_HEXAGON_V6_vgtbf", + "llvm.hexagon.V6.vgtbf.128B" => "__builtin_HEXAGON_V6_vgtbf_128B", + "llvm.hexagon.V6.vgtbf.and" => "__builtin_HEXAGON_V6_vgtbf_and", + "llvm.hexagon.V6.vgtbf.and.128B" => "__builtin_HEXAGON_V6_vgtbf_and_128B", + "llvm.hexagon.V6.vgtbf.or" => "__builtin_HEXAGON_V6_vgtbf_or", + "llvm.hexagon.V6.vgtbf.or.128B" => "__builtin_HEXAGON_V6_vgtbf_or_128B", + "llvm.hexagon.V6.vgtbf.xor" => "__builtin_HEXAGON_V6_vgtbf_xor", + "llvm.hexagon.V6.vgtbf.xor.128B" => "__builtin_HEXAGON_V6_vgtbf_xor_128B", + "llvm.hexagon.V6.vgth" => "__builtin_HEXAGON_V6_vgth", + "llvm.hexagon.V6.vgth.128B" => "__builtin_HEXAGON_V6_vgth_128B", + "llvm.hexagon.V6.vgth.and" => "__builtin_HEXAGON_V6_vgth_and", + "llvm.hexagon.V6.vgth.and.128B" => "__builtin_HEXAGON_V6_vgth_and_128B", + "llvm.hexagon.V6.vgth.or" => "__builtin_HEXAGON_V6_vgth_or", + "llvm.hexagon.V6.vgth.or.128B" => "__builtin_HEXAGON_V6_vgth_or_128B", + "llvm.hexagon.V6.vgth.xor" => "__builtin_HEXAGON_V6_vgth_xor", + "llvm.hexagon.V6.vgth.xor.128B" => "__builtin_HEXAGON_V6_vgth_xor_128B", + "llvm.hexagon.V6.vgthf" => "__builtin_HEXAGON_V6_vgthf", + "llvm.hexagon.V6.vgthf.128B" => "__builtin_HEXAGON_V6_vgthf_128B", + "llvm.hexagon.V6.vgthf.and" => "__builtin_HEXAGON_V6_vgthf_and", + "llvm.hexagon.V6.vgthf.and.128B" => "__builtin_HEXAGON_V6_vgthf_and_128B", + "llvm.hexagon.V6.vgthf.or" => "__builtin_HEXAGON_V6_vgthf_or", + "llvm.hexagon.V6.vgthf.or.128B" => "__builtin_HEXAGON_V6_vgthf_or_128B", + "llvm.hexagon.V6.vgthf.xor" => "__builtin_HEXAGON_V6_vgthf_xor", + "llvm.hexagon.V6.vgthf.xor.128B" => "__builtin_HEXAGON_V6_vgthf_xor_128B", + "llvm.hexagon.V6.vgtsf" => "__builtin_HEXAGON_V6_vgtsf", + "llvm.hexagon.V6.vgtsf.128B" => "__builtin_HEXAGON_V6_vgtsf_128B", + "llvm.hexagon.V6.vgtsf.and" => "__builtin_HEXAGON_V6_vgtsf_and", + "llvm.hexagon.V6.vgtsf.and.128B" => "__builtin_HEXAGON_V6_vgtsf_and_128B", + "llvm.hexagon.V6.vgtsf.or" => "__builtin_HEXAGON_V6_vgtsf_or", + "llvm.hexagon.V6.vgtsf.or.128B" => "__builtin_HEXAGON_V6_vgtsf_or_128B", + "llvm.hexagon.V6.vgtsf.xor" => "__builtin_HEXAGON_V6_vgtsf_xor", + "llvm.hexagon.V6.vgtsf.xor.128B" => "__builtin_HEXAGON_V6_vgtsf_xor_128B", + "llvm.hexagon.V6.vgtub" => "__builtin_HEXAGON_V6_vgtub", + "llvm.hexagon.V6.vgtub.128B" => "__builtin_HEXAGON_V6_vgtub_128B", + "llvm.hexagon.V6.vgtub.and" => "__builtin_HEXAGON_V6_vgtub_and", + "llvm.hexagon.V6.vgtub.and.128B" => "__builtin_HEXAGON_V6_vgtub_and_128B", + "llvm.hexagon.V6.vgtub.or" => "__builtin_HEXAGON_V6_vgtub_or", + "llvm.hexagon.V6.vgtub.or.128B" => "__builtin_HEXAGON_V6_vgtub_or_128B", + "llvm.hexagon.V6.vgtub.xor" => "__builtin_HEXAGON_V6_vgtub_xor", + "llvm.hexagon.V6.vgtub.xor.128B" => "__builtin_HEXAGON_V6_vgtub_xor_128B", + "llvm.hexagon.V6.vgtuh" => "__builtin_HEXAGON_V6_vgtuh", + "llvm.hexagon.V6.vgtuh.128B" => "__builtin_HEXAGON_V6_vgtuh_128B", + "llvm.hexagon.V6.vgtuh.and" => "__builtin_HEXAGON_V6_vgtuh_and", + "llvm.hexagon.V6.vgtuh.and.128B" => "__builtin_HEXAGON_V6_vgtuh_and_128B", + "llvm.hexagon.V6.vgtuh.or" => "__builtin_HEXAGON_V6_vgtuh_or", + "llvm.hexagon.V6.vgtuh.or.128B" => "__builtin_HEXAGON_V6_vgtuh_or_128B", + "llvm.hexagon.V6.vgtuh.xor" => "__builtin_HEXAGON_V6_vgtuh_xor", + "llvm.hexagon.V6.vgtuh.xor.128B" => "__builtin_HEXAGON_V6_vgtuh_xor_128B", + "llvm.hexagon.V6.vgtuw" => "__builtin_HEXAGON_V6_vgtuw", + "llvm.hexagon.V6.vgtuw.128B" => "__builtin_HEXAGON_V6_vgtuw_128B", + "llvm.hexagon.V6.vgtuw.and" => "__builtin_HEXAGON_V6_vgtuw_and", + "llvm.hexagon.V6.vgtuw.and.128B" => "__builtin_HEXAGON_V6_vgtuw_and_128B", + "llvm.hexagon.V6.vgtuw.or" => "__builtin_HEXAGON_V6_vgtuw_or", + "llvm.hexagon.V6.vgtuw.or.128B" => "__builtin_HEXAGON_V6_vgtuw_or_128B", + "llvm.hexagon.V6.vgtuw.xor" => "__builtin_HEXAGON_V6_vgtuw_xor", + "llvm.hexagon.V6.vgtuw.xor.128B" => "__builtin_HEXAGON_V6_vgtuw_xor_128B", + "llvm.hexagon.V6.vgtw" => "__builtin_HEXAGON_V6_vgtw", + "llvm.hexagon.V6.vgtw.128B" => "__builtin_HEXAGON_V6_vgtw_128B", + "llvm.hexagon.V6.vgtw.and" => "__builtin_HEXAGON_V6_vgtw_and", + "llvm.hexagon.V6.vgtw.and.128B" => "__builtin_HEXAGON_V6_vgtw_and_128B", + "llvm.hexagon.V6.vgtw.or" => "__builtin_HEXAGON_V6_vgtw_or", + "llvm.hexagon.V6.vgtw.or.128B" => "__builtin_HEXAGON_V6_vgtw_or_128B", + "llvm.hexagon.V6.vgtw.xor" => "__builtin_HEXAGON_V6_vgtw_xor", + "llvm.hexagon.V6.vgtw.xor.128B" => "__builtin_HEXAGON_V6_vgtw_xor_128B", + "llvm.hexagon.V6.vinsertwr" => "__builtin_HEXAGON_V6_vinsertwr", + "llvm.hexagon.V6.vinsertwr.128B" => "__builtin_HEXAGON_V6_vinsertwr_128B", + "llvm.hexagon.V6.vlalignb" => "__builtin_HEXAGON_V6_vlalignb", + "llvm.hexagon.V6.vlalignb.128B" => "__builtin_HEXAGON_V6_vlalignb_128B", + "llvm.hexagon.V6.vlalignbi" => "__builtin_HEXAGON_V6_vlalignbi", + "llvm.hexagon.V6.vlalignbi.128B" => "__builtin_HEXAGON_V6_vlalignbi_128B", + "llvm.hexagon.V6.vlsrb" => "__builtin_HEXAGON_V6_vlsrb", + "llvm.hexagon.V6.vlsrb.128B" => "__builtin_HEXAGON_V6_vlsrb_128B", + "llvm.hexagon.V6.vlsrh" => "__builtin_HEXAGON_V6_vlsrh", + "llvm.hexagon.V6.vlsrh.128B" => "__builtin_HEXAGON_V6_vlsrh_128B", + "llvm.hexagon.V6.vlsrhv" => "__builtin_HEXAGON_V6_vlsrhv", + "llvm.hexagon.V6.vlsrhv.128B" => "__builtin_HEXAGON_V6_vlsrhv_128B", + "llvm.hexagon.V6.vlsrw" => "__builtin_HEXAGON_V6_vlsrw", + "llvm.hexagon.V6.vlsrw.128B" => "__builtin_HEXAGON_V6_vlsrw_128B", + "llvm.hexagon.V6.vlsrwv" => "__builtin_HEXAGON_V6_vlsrwv", + "llvm.hexagon.V6.vlsrwv.128B" => "__builtin_HEXAGON_V6_vlsrwv_128B", + "llvm.hexagon.V6.vlut4" => "__builtin_HEXAGON_V6_vlut4", + "llvm.hexagon.V6.vlut4.128B" => "__builtin_HEXAGON_V6_vlut4_128B", + "llvm.hexagon.V6.vlutb" => "__builtin_HEXAGON_V6_vlutb", + "llvm.hexagon.V6.vlutb.128B" => "__builtin_HEXAGON_V6_vlutb_128B", + "llvm.hexagon.V6.vlutb.acc" => "__builtin_HEXAGON_V6_vlutb_acc", + "llvm.hexagon.V6.vlutb.acc.128B" => "__builtin_HEXAGON_V6_vlutb_acc_128B", + "llvm.hexagon.V6.vlutb.dv" => "__builtin_HEXAGON_V6_vlutb_dv", + "llvm.hexagon.V6.vlutb.dv.128B" => "__builtin_HEXAGON_V6_vlutb_dv_128B", + "llvm.hexagon.V6.vlutb.dv.acc" => "__builtin_HEXAGON_V6_vlutb_dv_acc", + "llvm.hexagon.V6.vlutb.dv.acc.128B" => "__builtin_HEXAGON_V6_vlutb_dv_acc_128B", + "llvm.hexagon.V6.vlutvvb" => "__builtin_HEXAGON_V6_vlutvvb", + "llvm.hexagon.V6.vlutvvb.128B" => "__builtin_HEXAGON_V6_vlutvvb_128B", + "llvm.hexagon.V6.vlutvvb.nm" => "__builtin_HEXAGON_V6_vlutvvb_nm", + "llvm.hexagon.V6.vlutvvb.nm.128B" => "__builtin_HEXAGON_V6_vlutvvb_nm_128B", + "llvm.hexagon.V6.vlutvvb.oracc" => "__builtin_HEXAGON_V6_vlutvvb_oracc", + "llvm.hexagon.V6.vlutvvb.oracc.128B" => "__builtin_HEXAGON_V6_vlutvvb_oracc_128B", + "llvm.hexagon.V6.vlutvvb.oracci" => "__builtin_HEXAGON_V6_vlutvvb_oracci", + "llvm.hexagon.V6.vlutvvb.oracci.128B" => "__builtin_HEXAGON_V6_vlutvvb_oracci_128B", + "llvm.hexagon.V6.vlutvvbi" => "__builtin_HEXAGON_V6_vlutvvbi", + "llvm.hexagon.V6.vlutvvbi.128B" => "__builtin_HEXAGON_V6_vlutvvbi_128B", + "llvm.hexagon.V6.vlutvwh" => "__builtin_HEXAGON_V6_vlutvwh", + "llvm.hexagon.V6.vlutvwh.128B" => "__builtin_HEXAGON_V6_vlutvwh_128B", + "llvm.hexagon.V6.vlutvwh.nm" => "__builtin_HEXAGON_V6_vlutvwh_nm", + "llvm.hexagon.V6.vlutvwh.nm.128B" => "__builtin_HEXAGON_V6_vlutvwh_nm_128B", + "llvm.hexagon.V6.vlutvwh.oracc" => "__builtin_HEXAGON_V6_vlutvwh_oracc", + "llvm.hexagon.V6.vlutvwh.oracc.128B" => "__builtin_HEXAGON_V6_vlutvwh_oracc_128B", + "llvm.hexagon.V6.vlutvwh.oracci" => "__builtin_HEXAGON_V6_vlutvwh_oracci", + "llvm.hexagon.V6.vlutvwh.oracci.128B" => "__builtin_HEXAGON_V6_vlutvwh_oracci_128B", + "llvm.hexagon.V6.vlutvwhi" => "__builtin_HEXAGON_V6_vlutvwhi", + "llvm.hexagon.V6.vlutvwhi.128B" => "__builtin_HEXAGON_V6_vlutvwhi_128B", + "llvm.hexagon.V6.vmax.bf" => "__builtin_HEXAGON_V6_vmax_bf", + "llvm.hexagon.V6.vmax.bf.128B" => "__builtin_HEXAGON_V6_vmax_bf_128B", + "llvm.hexagon.V6.vmax.hf" => "__builtin_HEXAGON_V6_vmax_hf", + "llvm.hexagon.V6.vmax.hf.128B" => "__builtin_HEXAGON_V6_vmax_hf_128B", + "llvm.hexagon.V6.vmax.sf" => "__builtin_HEXAGON_V6_vmax_sf", + "llvm.hexagon.V6.vmax.sf.128B" => "__builtin_HEXAGON_V6_vmax_sf_128B", + "llvm.hexagon.V6.vmaxb" => "__builtin_HEXAGON_V6_vmaxb", + "llvm.hexagon.V6.vmaxb.128B" => "__builtin_HEXAGON_V6_vmaxb_128B", + "llvm.hexagon.V6.vmaxh" => "__builtin_HEXAGON_V6_vmaxh", + "llvm.hexagon.V6.vmaxh.128B" => "__builtin_HEXAGON_V6_vmaxh_128B", + "llvm.hexagon.V6.vmaxub" => "__builtin_HEXAGON_V6_vmaxub", + "llvm.hexagon.V6.vmaxub.128B" => "__builtin_HEXAGON_V6_vmaxub_128B", + "llvm.hexagon.V6.vmaxuh" => "__builtin_HEXAGON_V6_vmaxuh", + "llvm.hexagon.V6.vmaxuh.128B" => "__builtin_HEXAGON_V6_vmaxuh_128B", + "llvm.hexagon.V6.vmaxw" => "__builtin_HEXAGON_V6_vmaxw", + "llvm.hexagon.V6.vmaxw.128B" => "__builtin_HEXAGON_V6_vmaxw_128B", + "llvm.hexagon.V6.vmin.bf" => "__builtin_HEXAGON_V6_vmin_bf", + "llvm.hexagon.V6.vmin.bf.128B" => "__builtin_HEXAGON_V6_vmin_bf_128B", + "llvm.hexagon.V6.vmin.hf" => "__builtin_HEXAGON_V6_vmin_hf", + "llvm.hexagon.V6.vmin.hf.128B" => "__builtin_HEXAGON_V6_vmin_hf_128B", + "llvm.hexagon.V6.vmin.sf" => "__builtin_HEXAGON_V6_vmin_sf", + "llvm.hexagon.V6.vmin.sf.128B" => "__builtin_HEXAGON_V6_vmin_sf_128B", + "llvm.hexagon.V6.vminb" => "__builtin_HEXAGON_V6_vminb", + "llvm.hexagon.V6.vminb.128B" => "__builtin_HEXAGON_V6_vminb_128B", + "llvm.hexagon.V6.vminh" => "__builtin_HEXAGON_V6_vminh", + "llvm.hexagon.V6.vminh.128B" => "__builtin_HEXAGON_V6_vminh_128B", + "llvm.hexagon.V6.vminub" => "__builtin_HEXAGON_V6_vminub", + "llvm.hexagon.V6.vminub.128B" => "__builtin_HEXAGON_V6_vminub_128B", + "llvm.hexagon.V6.vminuh" => "__builtin_HEXAGON_V6_vminuh", + "llvm.hexagon.V6.vminuh.128B" => "__builtin_HEXAGON_V6_vminuh_128B", + "llvm.hexagon.V6.vminw" => "__builtin_HEXAGON_V6_vminw", + "llvm.hexagon.V6.vminw.128B" => "__builtin_HEXAGON_V6_vminw_128B", + "llvm.hexagon.V6.vmpabus" => "__builtin_HEXAGON_V6_vmpabus", + "llvm.hexagon.V6.vmpabus.128B" => "__builtin_HEXAGON_V6_vmpabus_128B", + "llvm.hexagon.V6.vmpabus.acc" => "__builtin_HEXAGON_V6_vmpabus_acc", + "llvm.hexagon.V6.vmpabus.acc.128B" => "__builtin_HEXAGON_V6_vmpabus_acc_128B", + "llvm.hexagon.V6.vmpabusv" => "__builtin_HEXAGON_V6_vmpabusv", + "llvm.hexagon.V6.vmpabusv.128B" => "__builtin_HEXAGON_V6_vmpabusv_128B", + "llvm.hexagon.V6.vmpabuu" => "__builtin_HEXAGON_V6_vmpabuu", + "llvm.hexagon.V6.vmpabuu.128B" => "__builtin_HEXAGON_V6_vmpabuu_128B", + "llvm.hexagon.V6.vmpabuu.acc" => "__builtin_HEXAGON_V6_vmpabuu_acc", + "llvm.hexagon.V6.vmpabuu.acc.128B" => "__builtin_HEXAGON_V6_vmpabuu_acc_128B", + "llvm.hexagon.V6.vmpabuuv" => "__builtin_HEXAGON_V6_vmpabuuv", + "llvm.hexagon.V6.vmpabuuv.128B" => "__builtin_HEXAGON_V6_vmpabuuv_128B", + "llvm.hexagon.V6.vmpahb" => "__builtin_HEXAGON_V6_vmpahb", + "llvm.hexagon.V6.vmpahb.128B" => "__builtin_HEXAGON_V6_vmpahb_128B", + "llvm.hexagon.V6.vmpahb.acc" => "__builtin_HEXAGON_V6_vmpahb_acc", + "llvm.hexagon.V6.vmpahb.acc.128B" => "__builtin_HEXAGON_V6_vmpahb_acc_128B", + "llvm.hexagon.V6.vmpahhsat" => "__builtin_HEXAGON_V6_vmpahhsat", + "llvm.hexagon.V6.vmpahhsat.128B" => "__builtin_HEXAGON_V6_vmpahhsat_128B", + "llvm.hexagon.V6.vmpauhb" => "__builtin_HEXAGON_V6_vmpauhb", + "llvm.hexagon.V6.vmpauhb.128B" => "__builtin_HEXAGON_V6_vmpauhb_128B", + "llvm.hexagon.V6.vmpauhb.acc" => "__builtin_HEXAGON_V6_vmpauhb_acc", + "llvm.hexagon.V6.vmpauhb.acc.128B" => "__builtin_HEXAGON_V6_vmpauhb_acc_128B", + "llvm.hexagon.V6.vmpauhuhsat" => "__builtin_HEXAGON_V6_vmpauhuhsat", + "llvm.hexagon.V6.vmpauhuhsat.128B" => "__builtin_HEXAGON_V6_vmpauhuhsat_128B", + "llvm.hexagon.V6.vmpsuhuhsat" => "__builtin_HEXAGON_V6_vmpsuhuhsat", + "llvm.hexagon.V6.vmpsuhuhsat.128B" => "__builtin_HEXAGON_V6_vmpsuhuhsat_128B", + "llvm.hexagon.V6.vmpy.hf.hf" => "__builtin_HEXAGON_V6_vmpy_hf_hf", + "llvm.hexagon.V6.vmpy.hf.hf.128B" => "__builtin_HEXAGON_V6_vmpy_hf_hf_128B", + "llvm.hexagon.V6.vmpy.hf.hf.acc" => "__builtin_HEXAGON_V6_vmpy_hf_hf_acc", + "llvm.hexagon.V6.vmpy.hf.hf.acc.128B" => "__builtin_HEXAGON_V6_vmpy_hf_hf_acc_128B", + "llvm.hexagon.V6.vmpy.qf16" => "__builtin_HEXAGON_V6_vmpy_qf16", + "llvm.hexagon.V6.vmpy.qf16.128B" => "__builtin_HEXAGON_V6_vmpy_qf16_128B", + "llvm.hexagon.V6.vmpy.qf16.hf" => "__builtin_HEXAGON_V6_vmpy_qf16_hf", + "llvm.hexagon.V6.vmpy.qf16.hf.128B" => "__builtin_HEXAGON_V6_vmpy_qf16_hf_128B", + "llvm.hexagon.V6.vmpy.qf16.mix.hf" => "__builtin_HEXAGON_V6_vmpy_qf16_mix_hf", + "llvm.hexagon.V6.vmpy.qf16.mix.hf.128B" => "__builtin_HEXAGON_V6_vmpy_qf16_mix_hf_128B", + "llvm.hexagon.V6.vmpy.qf32" => "__builtin_HEXAGON_V6_vmpy_qf32", + "llvm.hexagon.V6.vmpy.qf32.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_128B", + "llvm.hexagon.V6.vmpy.qf32.hf" => "__builtin_HEXAGON_V6_vmpy_qf32_hf", + "llvm.hexagon.V6.vmpy.qf32.hf.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_hf_128B", + "llvm.hexagon.V6.vmpy.qf32.mix.hf" => "__builtin_HEXAGON_V6_vmpy_qf32_mix_hf", + "llvm.hexagon.V6.vmpy.qf32.mix.hf.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_mix_hf_128B", + "llvm.hexagon.V6.vmpy.qf32.qf16" => "__builtin_HEXAGON_V6_vmpy_qf32_qf16", + "llvm.hexagon.V6.vmpy.qf32.qf16.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_qf16_128B", + "llvm.hexagon.V6.vmpy.qf32.sf" => "__builtin_HEXAGON_V6_vmpy_qf32_sf", + "llvm.hexagon.V6.vmpy.qf32.sf.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_sf_128B", + "llvm.hexagon.V6.vmpy.sf.bf" => "__builtin_HEXAGON_V6_vmpy_sf_bf", + "llvm.hexagon.V6.vmpy.sf.bf.128B" => "__builtin_HEXAGON_V6_vmpy_sf_bf_128B", + "llvm.hexagon.V6.vmpy.sf.bf.acc" => "__builtin_HEXAGON_V6_vmpy_sf_bf_acc", + "llvm.hexagon.V6.vmpy.sf.bf.acc.128B" => "__builtin_HEXAGON_V6_vmpy_sf_bf_acc_128B", + "llvm.hexagon.V6.vmpy.sf.hf" => "__builtin_HEXAGON_V6_vmpy_sf_hf", + "llvm.hexagon.V6.vmpy.sf.hf.128B" => "__builtin_HEXAGON_V6_vmpy_sf_hf_128B", + "llvm.hexagon.V6.vmpy.sf.hf.acc" => "__builtin_HEXAGON_V6_vmpy_sf_hf_acc", + "llvm.hexagon.V6.vmpy.sf.hf.acc.128B" => "__builtin_HEXAGON_V6_vmpy_sf_hf_acc_128B", + "llvm.hexagon.V6.vmpy.sf.sf" => "__builtin_HEXAGON_V6_vmpy_sf_sf", + "llvm.hexagon.V6.vmpy.sf.sf.128B" => "__builtin_HEXAGON_V6_vmpy_sf_sf_128B", + "llvm.hexagon.V6.vmpybus" => "__builtin_HEXAGON_V6_vmpybus", + "llvm.hexagon.V6.vmpybus.128B" => "__builtin_HEXAGON_V6_vmpybus_128B", + "llvm.hexagon.V6.vmpybus.acc" => "__builtin_HEXAGON_V6_vmpybus_acc", + "llvm.hexagon.V6.vmpybus.acc.128B" => "__builtin_HEXAGON_V6_vmpybus_acc_128B", + "llvm.hexagon.V6.vmpybusv" => "__builtin_HEXAGON_V6_vmpybusv", + "llvm.hexagon.V6.vmpybusv.128B" => "__builtin_HEXAGON_V6_vmpybusv_128B", + "llvm.hexagon.V6.vmpybusv.acc" => "__builtin_HEXAGON_V6_vmpybusv_acc", + "llvm.hexagon.V6.vmpybusv.acc.128B" => "__builtin_HEXAGON_V6_vmpybusv_acc_128B", + "llvm.hexagon.V6.vmpybv" => "__builtin_HEXAGON_V6_vmpybv", + "llvm.hexagon.V6.vmpybv.128B" => "__builtin_HEXAGON_V6_vmpybv_128B", + "llvm.hexagon.V6.vmpybv.acc" => "__builtin_HEXAGON_V6_vmpybv_acc", + "llvm.hexagon.V6.vmpybv.acc.128B" => "__builtin_HEXAGON_V6_vmpybv_acc_128B", + "llvm.hexagon.V6.vmpyewuh" => "__builtin_HEXAGON_V6_vmpyewuh", + "llvm.hexagon.V6.vmpyewuh.128B" => "__builtin_HEXAGON_V6_vmpyewuh_128B", + "llvm.hexagon.V6.vmpyewuh.64" => "__builtin_HEXAGON_V6_vmpyewuh_64", + "llvm.hexagon.V6.vmpyewuh.64.128B" => "__builtin_HEXAGON_V6_vmpyewuh_64_128B", + "llvm.hexagon.V6.vmpyh" => "__builtin_HEXAGON_V6_vmpyh", + "llvm.hexagon.V6.vmpyh.128B" => "__builtin_HEXAGON_V6_vmpyh_128B", + "llvm.hexagon.V6.vmpyh.acc" => "__builtin_HEXAGON_V6_vmpyh_acc", + "llvm.hexagon.V6.vmpyh.acc.128B" => "__builtin_HEXAGON_V6_vmpyh_acc_128B", + "llvm.hexagon.V6.vmpyhsat.acc" => "__builtin_HEXAGON_V6_vmpyhsat_acc", + "llvm.hexagon.V6.vmpyhsat.acc.128B" => "__builtin_HEXAGON_V6_vmpyhsat_acc_128B", + "llvm.hexagon.V6.vmpyhsrs" => "__builtin_HEXAGON_V6_vmpyhsrs", + "llvm.hexagon.V6.vmpyhsrs.128B" => "__builtin_HEXAGON_V6_vmpyhsrs_128B", + "llvm.hexagon.V6.vmpyhss" => "__builtin_HEXAGON_V6_vmpyhss", + "llvm.hexagon.V6.vmpyhss.128B" => "__builtin_HEXAGON_V6_vmpyhss_128B", + "llvm.hexagon.V6.vmpyhus" => "__builtin_HEXAGON_V6_vmpyhus", + "llvm.hexagon.V6.vmpyhus.128B" => "__builtin_HEXAGON_V6_vmpyhus_128B", + "llvm.hexagon.V6.vmpyhus.acc" => "__builtin_HEXAGON_V6_vmpyhus_acc", + "llvm.hexagon.V6.vmpyhus.acc.128B" => "__builtin_HEXAGON_V6_vmpyhus_acc_128B", + "llvm.hexagon.V6.vmpyhv" => "__builtin_HEXAGON_V6_vmpyhv", + "llvm.hexagon.V6.vmpyhv.128B" => "__builtin_HEXAGON_V6_vmpyhv_128B", + "llvm.hexagon.V6.vmpyhv.acc" => "__builtin_HEXAGON_V6_vmpyhv_acc", + "llvm.hexagon.V6.vmpyhv.acc.128B" => "__builtin_HEXAGON_V6_vmpyhv_acc_128B", + "llvm.hexagon.V6.vmpyhvsrs" => "__builtin_HEXAGON_V6_vmpyhvsrs", + "llvm.hexagon.V6.vmpyhvsrs.128B" => "__builtin_HEXAGON_V6_vmpyhvsrs_128B", + "llvm.hexagon.V6.vmpyieoh" => "__builtin_HEXAGON_V6_vmpyieoh", + "llvm.hexagon.V6.vmpyieoh.128B" => "__builtin_HEXAGON_V6_vmpyieoh_128B", + "llvm.hexagon.V6.vmpyiewh.acc" => "__builtin_HEXAGON_V6_vmpyiewh_acc", + "llvm.hexagon.V6.vmpyiewh.acc.128B" => "__builtin_HEXAGON_V6_vmpyiewh_acc_128B", + "llvm.hexagon.V6.vmpyiewuh" => "__builtin_HEXAGON_V6_vmpyiewuh", + "llvm.hexagon.V6.vmpyiewuh.128B" => "__builtin_HEXAGON_V6_vmpyiewuh_128B", + "llvm.hexagon.V6.vmpyiewuh.acc" => "__builtin_HEXAGON_V6_vmpyiewuh_acc", + "llvm.hexagon.V6.vmpyiewuh.acc.128B" => "__builtin_HEXAGON_V6_vmpyiewuh_acc_128B", + "llvm.hexagon.V6.vmpyih" => "__builtin_HEXAGON_V6_vmpyih", + "llvm.hexagon.V6.vmpyih.128B" => "__builtin_HEXAGON_V6_vmpyih_128B", + "llvm.hexagon.V6.vmpyih.acc" => "__builtin_HEXAGON_V6_vmpyih_acc", + "llvm.hexagon.V6.vmpyih.acc.128B" => "__builtin_HEXAGON_V6_vmpyih_acc_128B", + "llvm.hexagon.V6.vmpyihb" => "__builtin_HEXAGON_V6_vmpyihb", + "llvm.hexagon.V6.vmpyihb.128B" => "__builtin_HEXAGON_V6_vmpyihb_128B", + "llvm.hexagon.V6.vmpyihb.acc" => "__builtin_HEXAGON_V6_vmpyihb_acc", + "llvm.hexagon.V6.vmpyihb.acc.128B" => "__builtin_HEXAGON_V6_vmpyihb_acc_128B", + "llvm.hexagon.V6.vmpyiowh" => "__builtin_HEXAGON_V6_vmpyiowh", + "llvm.hexagon.V6.vmpyiowh.128B" => "__builtin_HEXAGON_V6_vmpyiowh_128B", + "llvm.hexagon.V6.vmpyiwb" => "__builtin_HEXAGON_V6_vmpyiwb", + "llvm.hexagon.V6.vmpyiwb.128B" => "__builtin_HEXAGON_V6_vmpyiwb_128B", + "llvm.hexagon.V6.vmpyiwb.acc" => "__builtin_HEXAGON_V6_vmpyiwb_acc", + "llvm.hexagon.V6.vmpyiwb.acc.128B" => "__builtin_HEXAGON_V6_vmpyiwb_acc_128B", + "llvm.hexagon.V6.vmpyiwh" => "__builtin_HEXAGON_V6_vmpyiwh", + "llvm.hexagon.V6.vmpyiwh.128B" => "__builtin_HEXAGON_V6_vmpyiwh_128B", + "llvm.hexagon.V6.vmpyiwh.acc" => "__builtin_HEXAGON_V6_vmpyiwh_acc", + "llvm.hexagon.V6.vmpyiwh.acc.128B" => "__builtin_HEXAGON_V6_vmpyiwh_acc_128B", + "llvm.hexagon.V6.vmpyiwub" => "__builtin_HEXAGON_V6_vmpyiwub", + "llvm.hexagon.V6.vmpyiwub.128B" => "__builtin_HEXAGON_V6_vmpyiwub_128B", + "llvm.hexagon.V6.vmpyiwub.acc" => "__builtin_HEXAGON_V6_vmpyiwub_acc", + "llvm.hexagon.V6.vmpyiwub.acc.128B" => "__builtin_HEXAGON_V6_vmpyiwub_acc_128B", + "llvm.hexagon.V6.vmpyowh" => "__builtin_HEXAGON_V6_vmpyowh", + "llvm.hexagon.V6.vmpyowh.128B" => "__builtin_HEXAGON_V6_vmpyowh_128B", + "llvm.hexagon.V6.vmpyowh.64.acc" => "__builtin_HEXAGON_V6_vmpyowh_64_acc", + "llvm.hexagon.V6.vmpyowh.64.acc.128B" => "__builtin_HEXAGON_V6_vmpyowh_64_acc_128B", + "llvm.hexagon.V6.vmpyowh.rnd" => "__builtin_HEXAGON_V6_vmpyowh_rnd", + "llvm.hexagon.V6.vmpyowh.rnd.128B" => "__builtin_HEXAGON_V6_vmpyowh_rnd_128B", + "llvm.hexagon.V6.vmpyowh.rnd.sacc" => "__builtin_HEXAGON_V6_vmpyowh_rnd_sacc", + "llvm.hexagon.V6.vmpyowh.rnd.sacc.128B" => "__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B", + "llvm.hexagon.V6.vmpyowh.sacc" => "__builtin_HEXAGON_V6_vmpyowh_sacc", + "llvm.hexagon.V6.vmpyowh.sacc.128B" => "__builtin_HEXAGON_V6_vmpyowh_sacc_128B", + "llvm.hexagon.V6.vmpyub" => "__builtin_HEXAGON_V6_vmpyub", + "llvm.hexagon.V6.vmpyub.128B" => "__builtin_HEXAGON_V6_vmpyub_128B", + "llvm.hexagon.V6.vmpyub.acc" => "__builtin_HEXAGON_V6_vmpyub_acc", + "llvm.hexagon.V6.vmpyub.acc.128B" => "__builtin_HEXAGON_V6_vmpyub_acc_128B", + "llvm.hexagon.V6.vmpyubv" => "__builtin_HEXAGON_V6_vmpyubv", + "llvm.hexagon.V6.vmpyubv.128B" => "__builtin_HEXAGON_V6_vmpyubv_128B", + "llvm.hexagon.V6.vmpyubv.acc" => "__builtin_HEXAGON_V6_vmpyubv_acc", + "llvm.hexagon.V6.vmpyubv.acc.128B" => "__builtin_HEXAGON_V6_vmpyubv_acc_128B", + "llvm.hexagon.V6.vmpyuh" => "__builtin_HEXAGON_V6_vmpyuh", + "llvm.hexagon.V6.vmpyuh.128B" => "__builtin_HEXAGON_V6_vmpyuh_128B", + "llvm.hexagon.V6.vmpyuh.acc" => "__builtin_HEXAGON_V6_vmpyuh_acc", + "llvm.hexagon.V6.vmpyuh.acc.128B" => "__builtin_HEXAGON_V6_vmpyuh_acc_128B", + "llvm.hexagon.V6.vmpyuhe" => "__builtin_HEXAGON_V6_vmpyuhe", + "llvm.hexagon.V6.vmpyuhe.128B" => "__builtin_HEXAGON_V6_vmpyuhe_128B", + "llvm.hexagon.V6.vmpyuhe.acc" => "__builtin_HEXAGON_V6_vmpyuhe_acc", + "llvm.hexagon.V6.vmpyuhe.acc.128B" => "__builtin_HEXAGON_V6_vmpyuhe_acc_128B", + "llvm.hexagon.V6.vmpyuhv" => "__builtin_HEXAGON_V6_vmpyuhv", + "llvm.hexagon.V6.vmpyuhv.128B" => "__builtin_HEXAGON_V6_vmpyuhv_128B", + "llvm.hexagon.V6.vmpyuhv.acc" => "__builtin_HEXAGON_V6_vmpyuhv_acc", + "llvm.hexagon.V6.vmpyuhv.acc.128B" => "__builtin_HEXAGON_V6_vmpyuhv_acc_128B", + "llvm.hexagon.V6.vmpyuhvs" => "__builtin_HEXAGON_V6_vmpyuhvs", + "llvm.hexagon.V6.vmpyuhvs.128B" => "__builtin_HEXAGON_V6_vmpyuhvs_128B", + "llvm.hexagon.V6.vmux" => "__builtin_HEXAGON_V6_vmux", + "llvm.hexagon.V6.vmux.128B" => "__builtin_HEXAGON_V6_vmux_128B", + "llvm.hexagon.V6.vnavgb" => "__builtin_HEXAGON_V6_vnavgb", + "llvm.hexagon.V6.vnavgb.128B" => "__builtin_HEXAGON_V6_vnavgb_128B", + "llvm.hexagon.V6.vnavgh" => "__builtin_HEXAGON_V6_vnavgh", + "llvm.hexagon.V6.vnavgh.128B" => "__builtin_HEXAGON_V6_vnavgh_128B", + "llvm.hexagon.V6.vnavgub" => "__builtin_HEXAGON_V6_vnavgub", + "llvm.hexagon.V6.vnavgub.128B" => "__builtin_HEXAGON_V6_vnavgub_128B", + "llvm.hexagon.V6.vnavgw" => "__builtin_HEXAGON_V6_vnavgw", + "llvm.hexagon.V6.vnavgw.128B" => "__builtin_HEXAGON_V6_vnavgw_128B", + "llvm.hexagon.V6.vnormamth" => "__builtin_HEXAGON_V6_vnormamth", + "llvm.hexagon.V6.vnormamth.128B" => "__builtin_HEXAGON_V6_vnormamth_128B", + "llvm.hexagon.V6.vnormamtw" => "__builtin_HEXAGON_V6_vnormamtw", + "llvm.hexagon.V6.vnormamtw.128B" => "__builtin_HEXAGON_V6_vnormamtw_128B", + "llvm.hexagon.V6.vnot" => "__builtin_HEXAGON_V6_vnot", + "llvm.hexagon.V6.vnot.128B" => "__builtin_HEXAGON_V6_vnot_128B", + "llvm.hexagon.V6.vor" => "__builtin_HEXAGON_V6_vor", + "llvm.hexagon.V6.vor.128B" => "__builtin_HEXAGON_V6_vor_128B", + "llvm.hexagon.V6.vpackeb" => "__builtin_HEXAGON_V6_vpackeb", + "llvm.hexagon.V6.vpackeb.128B" => "__builtin_HEXAGON_V6_vpackeb_128B", + "llvm.hexagon.V6.vpackeh" => "__builtin_HEXAGON_V6_vpackeh", + "llvm.hexagon.V6.vpackeh.128B" => "__builtin_HEXAGON_V6_vpackeh_128B", + "llvm.hexagon.V6.vpackhb.sat" => "__builtin_HEXAGON_V6_vpackhb_sat", + "llvm.hexagon.V6.vpackhb.sat.128B" => "__builtin_HEXAGON_V6_vpackhb_sat_128B", + "llvm.hexagon.V6.vpackhub.sat" => "__builtin_HEXAGON_V6_vpackhub_sat", + "llvm.hexagon.V6.vpackhub.sat.128B" => "__builtin_HEXAGON_V6_vpackhub_sat_128B", + "llvm.hexagon.V6.vpackob" => "__builtin_HEXAGON_V6_vpackob", + "llvm.hexagon.V6.vpackob.128B" => "__builtin_HEXAGON_V6_vpackob_128B", + "llvm.hexagon.V6.vpackoh" => "__builtin_HEXAGON_V6_vpackoh", + "llvm.hexagon.V6.vpackoh.128B" => "__builtin_HEXAGON_V6_vpackoh_128B", + "llvm.hexagon.V6.vpackwh.sat" => "__builtin_HEXAGON_V6_vpackwh_sat", + "llvm.hexagon.V6.vpackwh.sat.128B" => "__builtin_HEXAGON_V6_vpackwh_sat_128B", + "llvm.hexagon.V6.vpackwuh.sat" => "__builtin_HEXAGON_V6_vpackwuh_sat", + "llvm.hexagon.V6.vpackwuh.sat.128B" => "__builtin_HEXAGON_V6_vpackwuh_sat_128B", + "llvm.hexagon.V6.vpopcounth" => "__builtin_HEXAGON_V6_vpopcounth", + "llvm.hexagon.V6.vpopcounth.128B" => "__builtin_HEXAGON_V6_vpopcounth_128B", + "llvm.hexagon.V6.vprefixqb" => "__builtin_HEXAGON_V6_vprefixqb", + "llvm.hexagon.V6.vprefixqb.128B" => "__builtin_HEXAGON_V6_vprefixqb_128B", + "llvm.hexagon.V6.vprefixqh" => "__builtin_HEXAGON_V6_vprefixqh", + "llvm.hexagon.V6.vprefixqh.128B" => "__builtin_HEXAGON_V6_vprefixqh_128B", + "llvm.hexagon.V6.vprefixqw" => "__builtin_HEXAGON_V6_vprefixqw", + "llvm.hexagon.V6.vprefixqw.128B" => "__builtin_HEXAGON_V6_vprefixqw_128B", + "llvm.hexagon.V6.vrdelta" => "__builtin_HEXAGON_V6_vrdelta", + "llvm.hexagon.V6.vrdelta.128B" => "__builtin_HEXAGON_V6_vrdelta_128B", + "llvm.hexagon.V6.vrmpybub.rtt" => "__builtin_HEXAGON_V6_vrmpybub_rtt", + "llvm.hexagon.V6.vrmpybub.rtt.128B" => "__builtin_HEXAGON_V6_vrmpybub_rtt_128B", + "llvm.hexagon.V6.vrmpybub.rtt.acc" => "__builtin_HEXAGON_V6_vrmpybub_rtt_acc", + "llvm.hexagon.V6.vrmpybub.rtt.acc.128B" => "__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B", + "llvm.hexagon.V6.vrmpybus" => "__builtin_HEXAGON_V6_vrmpybus", + "llvm.hexagon.V6.vrmpybus.128B" => "__builtin_HEXAGON_V6_vrmpybus_128B", + "llvm.hexagon.V6.vrmpybus.acc" => "__builtin_HEXAGON_V6_vrmpybus_acc", + "llvm.hexagon.V6.vrmpybus.acc.128B" => "__builtin_HEXAGON_V6_vrmpybus_acc_128B", + "llvm.hexagon.V6.vrmpybusi" => "__builtin_HEXAGON_V6_vrmpybusi", + "llvm.hexagon.V6.vrmpybusi.128B" => "__builtin_HEXAGON_V6_vrmpybusi_128B", + "llvm.hexagon.V6.vrmpybusi.acc" => "__builtin_HEXAGON_V6_vrmpybusi_acc", + "llvm.hexagon.V6.vrmpybusi.acc.128B" => "__builtin_HEXAGON_V6_vrmpybusi_acc_128B", + "llvm.hexagon.V6.vrmpybusv" => "__builtin_HEXAGON_V6_vrmpybusv", + "llvm.hexagon.V6.vrmpybusv.128B" => "__builtin_HEXAGON_V6_vrmpybusv_128B", + "llvm.hexagon.V6.vrmpybusv.acc" => "__builtin_HEXAGON_V6_vrmpybusv_acc", + "llvm.hexagon.V6.vrmpybusv.acc.128B" => "__builtin_HEXAGON_V6_vrmpybusv_acc_128B", + "llvm.hexagon.V6.vrmpybv" => "__builtin_HEXAGON_V6_vrmpybv", + "llvm.hexagon.V6.vrmpybv.128B" => "__builtin_HEXAGON_V6_vrmpybv_128B", + "llvm.hexagon.V6.vrmpybv.acc" => "__builtin_HEXAGON_V6_vrmpybv_acc", + "llvm.hexagon.V6.vrmpybv.acc.128B" => "__builtin_HEXAGON_V6_vrmpybv_acc_128B", + "llvm.hexagon.V6.vrmpyub" => "__builtin_HEXAGON_V6_vrmpyub", + "llvm.hexagon.V6.vrmpyub.128B" => "__builtin_HEXAGON_V6_vrmpyub_128B", + "llvm.hexagon.V6.vrmpyub.acc" => "__builtin_HEXAGON_V6_vrmpyub_acc", + "llvm.hexagon.V6.vrmpyub.acc.128B" => "__builtin_HEXAGON_V6_vrmpyub_acc_128B", + "llvm.hexagon.V6.vrmpyub.rtt" => "__builtin_HEXAGON_V6_vrmpyub_rtt", + "llvm.hexagon.V6.vrmpyub.rtt.128B" => "__builtin_HEXAGON_V6_vrmpyub_rtt_128B", + "llvm.hexagon.V6.vrmpyub.rtt.acc" => "__builtin_HEXAGON_V6_vrmpyub_rtt_acc", + "llvm.hexagon.V6.vrmpyub.rtt.acc.128B" => "__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B", + "llvm.hexagon.V6.vrmpyubi" => "__builtin_HEXAGON_V6_vrmpyubi", + "llvm.hexagon.V6.vrmpyubi.128B" => "__builtin_HEXAGON_V6_vrmpyubi_128B", + "llvm.hexagon.V6.vrmpyubi.acc" => "__builtin_HEXAGON_V6_vrmpyubi_acc", + "llvm.hexagon.V6.vrmpyubi.acc.128B" => "__builtin_HEXAGON_V6_vrmpyubi_acc_128B", + "llvm.hexagon.V6.vrmpyubv" => "__builtin_HEXAGON_V6_vrmpyubv", + "llvm.hexagon.V6.vrmpyubv.128B" => "__builtin_HEXAGON_V6_vrmpyubv_128B", + "llvm.hexagon.V6.vrmpyubv.acc" => "__builtin_HEXAGON_V6_vrmpyubv_acc", + "llvm.hexagon.V6.vrmpyubv.acc.128B" => "__builtin_HEXAGON_V6_vrmpyubv_acc_128B", + "llvm.hexagon.V6.vror" => "__builtin_HEXAGON_V6_vror", + "llvm.hexagon.V6.vror.128B" => "__builtin_HEXAGON_V6_vror_128B", + "llvm.hexagon.V6.vrotr" => "__builtin_HEXAGON_V6_vrotr", + "llvm.hexagon.V6.vrotr.128B" => "__builtin_HEXAGON_V6_vrotr_128B", + "llvm.hexagon.V6.vroundhb" => "__builtin_HEXAGON_V6_vroundhb", + "llvm.hexagon.V6.vroundhb.128B" => "__builtin_HEXAGON_V6_vroundhb_128B", + "llvm.hexagon.V6.vroundhub" => "__builtin_HEXAGON_V6_vroundhub", + "llvm.hexagon.V6.vroundhub.128B" => "__builtin_HEXAGON_V6_vroundhub_128B", + "llvm.hexagon.V6.vrounduhub" => "__builtin_HEXAGON_V6_vrounduhub", + "llvm.hexagon.V6.vrounduhub.128B" => "__builtin_HEXAGON_V6_vrounduhub_128B", + "llvm.hexagon.V6.vrounduwuh" => "__builtin_HEXAGON_V6_vrounduwuh", + "llvm.hexagon.V6.vrounduwuh.128B" => "__builtin_HEXAGON_V6_vrounduwuh_128B", + "llvm.hexagon.V6.vroundwh" => "__builtin_HEXAGON_V6_vroundwh", + "llvm.hexagon.V6.vroundwh.128B" => "__builtin_HEXAGON_V6_vroundwh_128B", + "llvm.hexagon.V6.vroundwuh" => "__builtin_HEXAGON_V6_vroundwuh", + "llvm.hexagon.V6.vroundwuh.128B" => "__builtin_HEXAGON_V6_vroundwuh_128B", + "llvm.hexagon.V6.vrsadubi" => "__builtin_HEXAGON_V6_vrsadubi", + "llvm.hexagon.V6.vrsadubi.128B" => "__builtin_HEXAGON_V6_vrsadubi_128B", + "llvm.hexagon.V6.vrsadubi.acc" => "__builtin_HEXAGON_V6_vrsadubi_acc", + "llvm.hexagon.V6.vrsadubi.acc.128B" => "__builtin_HEXAGON_V6_vrsadubi_acc_128B", + "llvm.hexagon.V6.vsatdw" => "__builtin_HEXAGON_V6_vsatdw", + "llvm.hexagon.V6.vsatdw.128B" => "__builtin_HEXAGON_V6_vsatdw_128B", + "llvm.hexagon.V6.vsathub" => "__builtin_HEXAGON_V6_vsathub", + "llvm.hexagon.V6.vsathub.128B" => "__builtin_HEXAGON_V6_vsathub_128B", + "llvm.hexagon.V6.vsatuwuh" => "__builtin_HEXAGON_V6_vsatuwuh", + "llvm.hexagon.V6.vsatuwuh.128B" => "__builtin_HEXAGON_V6_vsatuwuh_128B", + "llvm.hexagon.V6.vsatwh" => "__builtin_HEXAGON_V6_vsatwh", + "llvm.hexagon.V6.vsatwh.128B" => "__builtin_HEXAGON_V6_vsatwh_128B", + "llvm.hexagon.V6.vsb" => "__builtin_HEXAGON_V6_vsb", + "llvm.hexagon.V6.vsb.128B" => "__builtin_HEXAGON_V6_vsb_128B", + "llvm.hexagon.V6.vscattermh" => "__builtin_HEXAGON_V6_vscattermh", + "llvm.hexagon.V6.vscattermh.128B" => "__builtin_HEXAGON_V6_vscattermh_128B", + "llvm.hexagon.V6.vscattermh.add" => "__builtin_HEXAGON_V6_vscattermh_add", + "llvm.hexagon.V6.vscattermh.add.128B" => "__builtin_HEXAGON_V6_vscattermh_add_128B", + "llvm.hexagon.V6.vscattermhq" => "__builtin_HEXAGON_V6_vscattermhq", + "llvm.hexagon.V6.vscattermhq.128B" => "__builtin_HEXAGON_V6_vscattermhq_128B", + "llvm.hexagon.V6.vscattermhw" => "__builtin_HEXAGON_V6_vscattermhw", + "llvm.hexagon.V6.vscattermhw.128B" => "__builtin_HEXAGON_V6_vscattermhw_128B", + "llvm.hexagon.V6.vscattermhw.add" => "__builtin_HEXAGON_V6_vscattermhw_add", + "llvm.hexagon.V6.vscattermhw.add.128B" => "__builtin_HEXAGON_V6_vscattermhw_add_128B", + "llvm.hexagon.V6.vscattermhwq" => "__builtin_HEXAGON_V6_vscattermhwq", + "llvm.hexagon.V6.vscattermhwq.128B" => "__builtin_HEXAGON_V6_vscattermhwq_128B", + "llvm.hexagon.V6.vscattermw" => "__builtin_HEXAGON_V6_vscattermw", + "llvm.hexagon.V6.vscattermw.128B" => "__builtin_HEXAGON_V6_vscattermw_128B", + "llvm.hexagon.V6.vscattermw.add" => "__builtin_HEXAGON_V6_vscattermw_add", + "llvm.hexagon.V6.vscattermw.add.128B" => "__builtin_HEXAGON_V6_vscattermw_add_128B", + "llvm.hexagon.V6.vscattermwq" => "__builtin_HEXAGON_V6_vscattermwq", + "llvm.hexagon.V6.vscattermwq.128B" => "__builtin_HEXAGON_V6_vscattermwq_128B", + "llvm.hexagon.V6.vsh" => "__builtin_HEXAGON_V6_vsh", + "llvm.hexagon.V6.vsh.128B" => "__builtin_HEXAGON_V6_vsh_128B", + "llvm.hexagon.V6.vshufeh" => "__builtin_HEXAGON_V6_vshufeh", + "llvm.hexagon.V6.vshufeh.128B" => "__builtin_HEXAGON_V6_vshufeh_128B", + "llvm.hexagon.V6.vshuffb" => "__builtin_HEXAGON_V6_vshuffb", + "llvm.hexagon.V6.vshuffb.128B" => "__builtin_HEXAGON_V6_vshuffb_128B", + "llvm.hexagon.V6.vshuffeb" => "__builtin_HEXAGON_V6_vshuffeb", + "llvm.hexagon.V6.vshuffeb.128B" => "__builtin_HEXAGON_V6_vshuffeb_128B", + "llvm.hexagon.V6.vshuffh" => "__builtin_HEXAGON_V6_vshuffh", + "llvm.hexagon.V6.vshuffh.128B" => "__builtin_HEXAGON_V6_vshuffh_128B", + "llvm.hexagon.V6.vshuffob" => "__builtin_HEXAGON_V6_vshuffob", + "llvm.hexagon.V6.vshuffob.128B" => "__builtin_HEXAGON_V6_vshuffob_128B", + "llvm.hexagon.V6.vshuffvdd" => "__builtin_HEXAGON_V6_vshuffvdd", + "llvm.hexagon.V6.vshuffvdd.128B" => "__builtin_HEXAGON_V6_vshuffvdd_128B", + "llvm.hexagon.V6.vshufoeb" => "__builtin_HEXAGON_V6_vshufoeb", + "llvm.hexagon.V6.vshufoeb.128B" => "__builtin_HEXAGON_V6_vshufoeb_128B", + "llvm.hexagon.V6.vshufoeh" => "__builtin_HEXAGON_V6_vshufoeh", + "llvm.hexagon.V6.vshufoeh.128B" => "__builtin_HEXAGON_V6_vshufoeh_128B", + "llvm.hexagon.V6.vshufoh" => "__builtin_HEXAGON_V6_vshufoh", + "llvm.hexagon.V6.vshufoh.128B" => "__builtin_HEXAGON_V6_vshufoh_128B", + "llvm.hexagon.V6.vsub.hf" => "__builtin_HEXAGON_V6_vsub_hf", + "llvm.hexagon.V6.vsub.hf.128B" => "__builtin_HEXAGON_V6_vsub_hf_128B", + "llvm.hexagon.V6.vsub.hf.hf" => "__builtin_HEXAGON_V6_vsub_hf_hf", + "llvm.hexagon.V6.vsub.hf.hf.128B" => "__builtin_HEXAGON_V6_vsub_hf_hf_128B", + "llvm.hexagon.V6.vsub.qf16" => "__builtin_HEXAGON_V6_vsub_qf16", + "llvm.hexagon.V6.vsub.qf16.128B" => "__builtin_HEXAGON_V6_vsub_qf16_128B", + "llvm.hexagon.V6.vsub.qf16.mix" => "__builtin_HEXAGON_V6_vsub_qf16_mix", + "llvm.hexagon.V6.vsub.qf16.mix.128B" => "__builtin_HEXAGON_V6_vsub_qf16_mix_128B", + "llvm.hexagon.V6.vsub.qf32" => "__builtin_HEXAGON_V6_vsub_qf32", + "llvm.hexagon.V6.vsub.qf32.128B" => "__builtin_HEXAGON_V6_vsub_qf32_128B", + "llvm.hexagon.V6.vsub.qf32.mix" => "__builtin_HEXAGON_V6_vsub_qf32_mix", + "llvm.hexagon.V6.vsub.qf32.mix.128B" => "__builtin_HEXAGON_V6_vsub_qf32_mix_128B", + "llvm.hexagon.V6.vsub.sf" => "__builtin_HEXAGON_V6_vsub_sf", + "llvm.hexagon.V6.vsub.sf.128B" => "__builtin_HEXAGON_V6_vsub_sf_128B", + "llvm.hexagon.V6.vsub.sf.bf" => "__builtin_HEXAGON_V6_vsub_sf_bf", + "llvm.hexagon.V6.vsub.sf.bf.128B" => "__builtin_HEXAGON_V6_vsub_sf_bf_128B", + "llvm.hexagon.V6.vsub.sf.hf" => "__builtin_HEXAGON_V6_vsub_sf_hf", + "llvm.hexagon.V6.vsub.sf.hf.128B" => "__builtin_HEXAGON_V6_vsub_sf_hf_128B", + "llvm.hexagon.V6.vsub.sf.sf" => "__builtin_HEXAGON_V6_vsub_sf_sf", + "llvm.hexagon.V6.vsub.sf.sf.128B" => "__builtin_HEXAGON_V6_vsub_sf_sf_128B", + "llvm.hexagon.V6.vsubb" => "__builtin_HEXAGON_V6_vsubb", + "llvm.hexagon.V6.vsubb.128B" => "__builtin_HEXAGON_V6_vsubb_128B", + "llvm.hexagon.V6.vsubb.dv" => "__builtin_HEXAGON_V6_vsubb_dv", + "llvm.hexagon.V6.vsubb.dv.128B" => "__builtin_HEXAGON_V6_vsubb_dv_128B", + "llvm.hexagon.V6.vsubbnq" => "__builtin_HEXAGON_V6_vsubbnq", + "llvm.hexagon.V6.vsubbnq.128B" => "__builtin_HEXAGON_V6_vsubbnq_128B", + "llvm.hexagon.V6.vsubbq" => "__builtin_HEXAGON_V6_vsubbq", + "llvm.hexagon.V6.vsubbq.128B" => "__builtin_HEXAGON_V6_vsubbq_128B", + "llvm.hexagon.V6.vsubbsat" => "__builtin_HEXAGON_V6_vsubbsat", + "llvm.hexagon.V6.vsubbsat.128B" => "__builtin_HEXAGON_V6_vsubbsat_128B", + "llvm.hexagon.V6.vsubbsat.dv" => "__builtin_HEXAGON_V6_vsubbsat_dv", + "llvm.hexagon.V6.vsubbsat.dv.128B" => "__builtin_HEXAGON_V6_vsubbsat_dv_128B", + "llvm.hexagon.V6.vsubh" => "__builtin_HEXAGON_V6_vsubh", + "llvm.hexagon.V6.vsubh.128B" => "__builtin_HEXAGON_V6_vsubh_128B", + "llvm.hexagon.V6.vsubh.dv" => "__builtin_HEXAGON_V6_vsubh_dv", + "llvm.hexagon.V6.vsubh.dv.128B" => "__builtin_HEXAGON_V6_vsubh_dv_128B", + "llvm.hexagon.V6.vsubhnq" => "__builtin_HEXAGON_V6_vsubhnq", + "llvm.hexagon.V6.vsubhnq.128B" => "__builtin_HEXAGON_V6_vsubhnq_128B", + "llvm.hexagon.V6.vsubhq" => "__builtin_HEXAGON_V6_vsubhq", + "llvm.hexagon.V6.vsubhq.128B" => "__builtin_HEXAGON_V6_vsubhq_128B", + "llvm.hexagon.V6.vsubhsat" => "__builtin_HEXAGON_V6_vsubhsat", + "llvm.hexagon.V6.vsubhsat.128B" => "__builtin_HEXAGON_V6_vsubhsat_128B", + "llvm.hexagon.V6.vsubhsat.dv" => "__builtin_HEXAGON_V6_vsubhsat_dv", + "llvm.hexagon.V6.vsubhsat.dv.128B" => "__builtin_HEXAGON_V6_vsubhsat_dv_128B", + "llvm.hexagon.V6.vsubhw" => "__builtin_HEXAGON_V6_vsubhw", + "llvm.hexagon.V6.vsubhw.128B" => "__builtin_HEXAGON_V6_vsubhw_128B", + "llvm.hexagon.V6.vsububh" => "__builtin_HEXAGON_V6_vsububh", + "llvm.hexagon.V6.vsububh.128B" => "__builtin_HEXAGON_V6_vsububh_128B", + "llvm.hexagon.V6.vsububsat" => "__builtin_HEXAGON_V6_vsububsat", + "llvm.hexagon.V6.vsububsat.128B" => "__builtin_HEXAGON_V6_vsububsat_128B", + "llvm.hexagon.V6.vsububsat.dv" => "__builtin_HEXAGON_V6_vsububsat_dv", + "llvm.hexagon.V6.vsububsat.dv.128B" => "__builtin_HEXAGON_V6_vsububsat_dv_128B", + "llvm.hexagon.V6.vsubububb.sat" => "__builtin_HEXAGON_V6_vsubububb_sat", + "llvm.hexagon.V6.vsubububb.sat.128B" => "__builtin_HEXAGON_V6_vsubububb_sat_128B", + "llvm.hexagon.V6.vsubuhsat" => "__builtin_HEXAGON_V6_vsubuhsat", + "llvm.hexagon.V6.vsubuhsat.128B" => "__builtin_HEXAGON_V6_vsubuhsat_128B", + "llvm.hexagon.V6.vsubuhsat.dv" => "__builtin_HEXAGON_V6_vsubuhsat_dv", + "llvm.hexagon.V6.vsubuhsat.dv.128B" => "__builtin_HEXAGON_V6_vsubuhsat_dv_128B", + "llvm.hexagon.V6.vsubuhw" => "__builtin_HEXAGON_V6_vsubuhw", + "llvm.hexagon.V6.vsubuhw.128B" => "__builtin_HEXAGON_V6_vsubuhw_128B", + "llvm.hexagon.V6.vsubuwsat" => "__builtin_HEXAGON_V6_vsubuwsat", + "llvm.hexagon.V6.vsubuwsat.128B" => "__builtin_HEXAGON_V6_vsubuwsat_128B", + "llvm.hexagon.V6.vsubuwsat.dv" => "__builtin_HEXAGON_V6_vsubuwsat_dv", + "llvm.hexagon.V6.vsubuwsat.dv.128B" => "__builtin_HEXAGON_V6_vsubuwsat_dv_128B", + "llvm.hexagon.V6.vsubw" => "__builtin_HEXAGON_V6_vsubw", + "llvm.hexagon.V6.vsubw.128B" => "__builtin_HEXAGON_V6_vsubw_128B", + "llvm.hexagon.V6.vsubw.dv" => "__builtin_HEXAGON_V6_vsubw_dv", + "llvm.hexagon.V6.vsubw.dv.128B" => "__builtin_HEXAGON_V6_vsubw_dv_128B", + "llvm.hexagon.V6.vsubwnq" => "__builtin_HEXAGON_V6_vsubwnq", + "llvm.hexagon.V6.vsubwnq.128B" => "__builtin_HEXAGON_V6_vsubwnq_128B", + "llvm.hexagon.V6.vsubwq" => "__builtin_HEXAGON_V6_vsubwq", + "llvm.hexagon.V6.vsubwq.128B" => "__builtin_HEXAGON_V6_vsubwq_128B", + "llvm.hexagon.V6.vsubwsat" => "__builtin_HEXAGON_V6_vsubwsat", + "llvm.hexagon.V6.vsubwsat.128B" => "__builtin_HEXAGON_V6_vsubwsat_128B", + "llvm.hexagon.V6.vsubwsat.dv" => "__builtin_HEXAGON_V6_vsubwsat_dv", + "llvm.hexagon.V6.vsubwsat.dv.128B" => "__builtin_HEXAGON_V6_vsubwsat_dv_128B", + "llvm.hexagon.V6.vswap" => "__builtin_HEXAGON_V6_vswap", + "llvm.hexagon.V6.vswap.128B" => "__builtin_HEXAGON_V6_vswap_128B", + "llvm.hexagon.V6.vtmpyb" => "__builtin_HEXAGON_V6_vtmpyb", + "llvm.hexagon.V6.vtmpyb.128B" => "__builtin_HEXAGON_V6_vtmpyb_128B", + "llvm.hexagon.V6.vtmpyb.acc" => "__builtin_HEXAGON_V6_vtmpyb_acc", + "llvm.hexagon.V6.vtmpyb.acc.128B" => "__builtin_HEXAGON_V6_vtmpyb_acc_128B", + "llvm.hexagon.V6.vtmpybus" => "__builtin_HEXAGON_V6_vtmpybus", + "llvm.hexagon.V6.vtmpybus.128B" => "__builtin_HEXAGON_V6_vtmpybus_128B", + "llvm.hexagon.V6.vtmpybus.acc" => "__builtin_HEXAGON_V6_vtmpybus_acc", + "llvm.hexagon.V6.vtmpybus.acc.128B" => "__builtin_HEXAGON_V6_vtmpybus_acc_128B", + "llvm.hexagon.V6.vtmpyhb" => "__builtin_HEXAGON_V6_vtmpyhb", + "llvm.hexagon.V6.vtmpyhb.128B" => "__builtin_HEXAGON_V6_vtmpyhb_128B", + "llvm.hexagon.V6.vtmpyhb.acc" => "__builtin_HEXAGON_V6_vtmpyhb_acc", + "llvm.hexagon.V6.vtmpyhb.acc.128B" => "__builtin_HEXAGON_V6_vtmpyhb_acc_128B", + "llvm.hexagon.V6.vunpackb" => "__builtin_HEXAGON_V6_vunpackb", + "llvm.hexagon.V6.vunpackb.128B" => "__builtin_HEXAGON_V6_vunpackb_128B", + "llvm.hexagon.V6.vunpackh" => "__builtin_HEXAGON_V6_vunpackh", + "llvm.hexagon.V6.vunpackh.128B" => "__builtin_HEXAGON_V6_vunpackh_128B", + "llvm.hexagon.V6.vunpackob" => "__builtin_HEXAGON_V6_vunpackob", + "llvm.hexagon.V6.vunpackob.128B" => "__builtin_HEXAGON_V6_vunpackob_128B", + "llvm.hexagon.V6.vunpackoh" => "__builtin_HEXAGON_V6_vunpackoh", + "llvm.hexagon.V6.vunpackoh.128B" => "__builtin_HEXAGON_V6_vunpackoh_128B", + "llvm.hexagon.V6.vunpackub" => "__builtin_HEXAGON_V6_vunpackub", + "llvm.hexagon.V6.vunpackub.128B" => "__builtin_HEXAGON_V6_vunpackub_128B", + "llvm.hexagon.V6.vunpackuh" => "__builtin_HEXAGON_V6_vunpackuh", + "llvm.hexagon.V6.vunpackuh.128B" => "__builtin_HEXAGON_V6_vunpackuh_128B", + "llvm.hexagon.V6.vxor" => "__builtin_HEXAGON_V6_vxor", + "llvm.hexagon.V6.vxor.128B" => "__builtin_HEXAGON_V6_vxor_128B", + "llvm.hexagon.V6.vzb" => "__builtin_HEXAGON_V6_vzb", + "llvm.hexagon.V6.vzb.128B" => "__builtin_HEXAGON_V6_vzb_128B", + "llvm.hexagon.V6.vzh" => "__builtin_HEXAGON_V6_vzh", + "llvm.hexagon.V6.vzh.128B" => "__builtin_HEXAGON_V6_vzh_128B", + "llvm.hexagon.Y2.dccleana" => "__builtin_HEXAGON_Y2_dccleana", + "llvm.hexagon.Y2.dccleaninva" => "__builtin_HEXAGON_Y2_dccleaninva", + "llvm.hexagon.Y2.dcfetch" => "__builtin_HEXAGON_Y2_dcfetch", + "llvm.hexagon.Y2.dcinva" => "__builtin_HEXAGON_Y2_dcinva", + "llvm.hexagon.Y2.dczeroa" => "__builtin_HEXAGON_Y2_dczeroa", + "llvm.hexagon.Y4.l2fetch" => "__builtin_HEXAGON_Y4_l2fetch", + "llvm.hexagon.Y5.l2fetch" => "__builtin_HEXAGON_Y5_l2fetch", + "llvm.hexagon.Y6.dmlink" => "__builtin_HEXAGON_Y6_dmlink", + "llvm.hexagon.Y6.dmpause" => "__builtin_HEXAGON_Y6_dmpause", + "llvm.hexagon.Y6.dmpoll" => "__builtin_HEXAGON_Y6_dmpoll", + "llvm.hexagon.Y6.dmresume" => "__builtin_HEXAGON_Y6_dmresume", + "llvm.hexagon.Y6.dmstart" => "__builtin_HEXAGON_Y6_dmstart", + "llvm.hexagon.Y6.dmwait" => "__builtin_HEXAGON_Y6_dmwait", + "llvm.hexagon.brev.ldb" => "__builtin_brev_ldb", + "llvm.hexagon.brev.ldd" => "__builtin_brev_ldd", + "llvm.hexagon.brev.ldh" => "__builtin_brev_ldh", + "llvm.hexagon.brev.ldub" => "__builtin_brev_ldub", + "llvm.hexagon.brev.lduh" => "__builtin_brev_lduh", + "llvm.hexagon.brev.ldw" => "__builtin_brev_ldw", + "llvm.hexagon.brev.stb" => "__builtin_brev_stb", + "llvm.hexagon.brev.std" => "__builtin_brev_std", + "llvm.hexagon.brev.sth" => "__builtin_brev_sth", + "llvm.hexagon.brev.sthhi" => "__builtin_brev_sthhi", + "llvm.hexagon.brev.stw" => "__builtin_brev_stw", + "llvm.hexagon.circ.ldb" => "__builtin_circ_ldb", + "llvm.hexagon.circ.ldd" => "__builtin_circ_ldd", + "llvm.hexagon.circ.ldh" => "__builtin_circ_ldh", + "llvm.hexagon.circ.ldub" => "__builtin_circ_ldub", + "llvm.hexagon.circ.lduh" => "__builtin_circ_lduh", + "llvm.hexagon.circ.ldw" => "__builtin_circ_ldw", + "llvm.hexagon.circ.stb" => "__builtin_circ_stb", + "llvm.hexagon.circ.std" => "__builtin_circ_std", + "llvm.hexagon.circ.sth" => "__builtin_circ_sth", + "llvm.hexagon.circ.sthhi" => "__builtin_circ_sthhi", + "llvm.hexagon.circ.stw" => "__builtin_circ_stw", + "llvm.hexagon.mm256i.vaddw" => "__builtin__mm256i_vaddw", + "llvm.hexagon.prefetch" => "__builtin_HEXAGON_prefetch", + "llvm.hexagon.vmemcpy" => "__builtin_hexagon_vmemcpy", + "llvm.hexagon.vmemset" => "__builtin_hexagon_vmemset", + // mips + "llvm.mips.absq.s.ph" => "__builtin_mips_absq_s_ph", + "llvm.mips.absq.s.qb" => "__builtin_mips_absq_s_qb", + "llvm.mips.absq.s.w" => "__builtin_mips_absq_s_w", + "llvm.mips.add.a.b" => "__builtin_msa_add_a_b", + "llvm.mips.add.a.d" => "__builtin_msa_add_a_d", + "llvm.mips.add.a.h" => "__builtin_msa_add_a_h", + "llvm.mips.add.a.w" => "__builtin_msa_add_a_w", + "llvm.mips.addq.ph" => "__builtin_mips_addq_ph", + "llvm.mips.addq.s.ph" => "__builtin_mips_addq_s_ph", + "llvm.mips.addq.s.w" => "__builtin_mips_addq_s_w", + "llvm.mips.addqh.ph" => "__builtin_mips_addqh_ph", + "llvm.mips.addqh.r.ph" => "__builtin_mips_addqh_r_ph", + "llvm.mips.addqh.r.w" => "__builtin_mips_addqh_r_w", + "llvm.mips.addqh.w" => "__builtin_mips_addqh_w", + "llvm.mips.adds.a.b" => "__builtin_msa_adds_a_b", + "llvm.mips.adds.a.d" => "__builtin_msa_adds_a_d", + "llvm.mips.adds.a.h" => "__builtin_msa_adds_a_h", + "llvm.mips.adds.a.w" => "__builtin_msa_adds_a_w", + "llvm.mips.adds.s.b" => "__builtin_msa_adds_s_b", + "llvm.mips.adds.s.d" => "__builtin_msa_adds_s_d", + "llvm.mips.adds.s.h" => "__builtin_msa_adds_s_h", + "llvm.mips.adds.s.w" => "__builtin_msa_adds_s_w", + "llvm.mips.adds.u.b" => "__builtin_msa_adds_u_b", + "llvm.mips.adds.u.d" => "__builtin_msa_adds_u_d", + "llvm.mips.adds.u.h" => "__builtin_msa_adds_u_h", + "llvm.mips.adds.u.w" => "__builtin_msa_adds_u_w", + "llvm.mips.addsc" => "__builtin_mips_addsc", + "llvm.mips.addu.ph" => "__builtin_mips_addu_ph", + "llvm.mips.addu.qb" => "__builtin_mips_addu_qb", + "llvm.mips.addu.s.ph" => "__builtin_mips_addu_s_ph", + "llvm.mips.addu.s.qb" => "__builtin_mips_addu_s_qb", + "llvm.mips.adduh.qb" => "__builtin_mips_adduh_qb", + "llvm.mips.adduh.r.qb" => "__builtin_mips_adduh_r_qb", + "llvm.mips.addv.b" => "__builtin_msa_addv_b", + "llvm.mips.addv.d" => "__builtin_msa_addv_d", + "llvm.mips.addv.h" => "__builtin_msa_addv_h", + "llvm.mips.addv.w" => "__builtin_msa_addv_w", + "llvm.mips.addvi.b" => "__builtin_msa_addvi_b", + "llvm.mips.addvi.d" => "__builtin_msa_addvi_d", + "llvm.mips.addvi.h" => "__builtin_msa_addvi_h", + "llvm.mips.addvi.w" => "__builtin_msa_addvi_w", + "llvm.mips.addwc" => "__builtin_mips_addwc", + "llvm.mips.and.v" => "__builtin_msa_and_v", + "llvm.mips.andi.b" => "__builtin_msa_andi_b", + "llvm.mips.append" => "__builtin_mips_append", + "llvm.mips.asub.s.b" => "__builtin_msa_asub_s_b", + "llvm.mips.asub.s.d" => "__builtin_msa_asub_s_d", + "llvm.mips.asub.s.h" => "__builtin_msa_asub_s_h", + "llvm.mips.asub.s.w" => "__builtin_msa_asub_s_w", + "llvm.mips.asub.u.b" => "__builtin_msa_asub_u_b", + "llvm.mips.asub.u.d" => "__builtin_msa_asub_u_d", + "llvm.mips.asub.u.h" => "__builtin_msa_asub_u_h", + "llvm.mips.asub.u.w" => "__builtin_msa_asub_u_w", + "llvm.mips.ave.s.b" => "__builtin_msa_ave_s_b", + "llvm.mips.ave.s.d" => "__builtin_msa_ave_s_d", + "llvm.mips.ave.s.h" => "__builtin_msa_ave_s_h", + "llvm.mips.ave.s.w" => "__builtin_msa_ave_s_w", + "llvm.mips.ave.u.b" => "__builtin_msa_ave_u_b", + "llvm.mips.ave.u.d" => "__builtin_msa_ave_u_d", + "llvm.mips.ave.u.h" => "__builtin_msa_ave_u_h", + "llvm.mips.ave.u.w" => "__builtin_msa_ave_u_w", + "llvm.mips.aver.s.b" => "__builtin_msa_aver_s_b", + "llvm.mips.aver.s.d" => "__builtin_msa_aver_s_d", + "llvm.mips.aver.s.h" => "__builtin_msa_aver_s_h", + "llvm.mips.aver.s.w" => "__builtin_msa_aver_s_w", + "llvm.mips.aver.u.b" => "__builtin_msa_aver_u_b", + "llvm.mips.aver.u.d" => "__builtin_msa_aver_u_d", + "llvm.mips.aver.u.h" => "__builtin_msa_aver_u_h", + "llvm.mips.aver.u.w" => "__builtin_msa_aver_u_w", + "llvm.mips.balign" => "__builtin_mips_balign", + "llvm.mips.bclr.b" => "__builtin_msa_bclr_b", + "llvm.mips.bclr.d" => "__builtin_msa_bclr_d", + "llvm.mips.bclr.h" => "__builtin_msa_bclr_h", + "llvm.mips.bclr.w" => "__builtin_msa_bclr_w", + "llvm.mips.bclri.b" => "__builtin_msa_bclri_b", + "llvm.mips.bclri.d" => "__builtin_msa_bclri_d", + "llvm.mips.bclri.h" => "__builtin_msa_bclri_h", + "llvm.mips.bclri.w" => "__builtin_msa_bclri_w", + "llvm.mips.binsl.b" => "__builtin_msa_binsl_b", + "llvm.mips.binsl.d" => "__builtin_msa_binsl_d", + "llvm.mips.binsl.h" => "__builtin_msa_binsl_h", + "llvm.mips.binsl.w" => "__builtin_msa_binsl_w", + "llvm.mips.binsli.b" => "__builtin_msa_binsli_b", + "llvm.mips.binsli.d" => "__builtin_msa_binsli_d", + "llvm.mips.binsli.h" => "__builtin_msa_binsli_h", + "llvm.mips.binsli.w" => "__builtin_msa_binsli_w", + "llvm.mips.binsr.b" => "__builtin_msa_binsr_b", + "llvm.mips.binsr.d" => "__builtin_msa_binsr_d", + "llvm.mips.binsr.h" => "__builtin_msa_binsr_h", + "llvm.mips.binsr.w" => "__builtin_msa_binsr_w", + "llvm.mips.binsri.b" => "__builtin_msa_binsri_b", + "llvm.mips.binsri.d" => "__builtin_msa_binsri_d", + "llvm.mips.binsri.h" => "__builtin_msa_binsri_h", + "llvm.mips.binsri.w" => "__builtin_msa_binsri_w", + "llvm.mips.bitrev" => "__builtin_mips_bitrev", + "llvm.mips.bmnz.v" => "__builtin_msa_bmnz_v", + "llvm.mips.bmnzi.b" => "__builtin_msa_bmnzi_b", + "llvm.mips.bmz.v" => "__builtin_msa_bmz_v", + "llvm.mips.bmzi.b" => "__builtin_msa_bmzi_b", + "llvm.mips.bneg.b" => "__builtin_msa_bneg_b", + "llvm.mips.bneg.d" => "__builtin_msa_bneg_d", + "llvm.mips.bneg.h" => "__builtin_msa_bneg_h", + "llvm.mips.bneg.w" => "__builtin_msa_bneg_w", + "llvm.mips.bnegi.b" => "__builtin_msa_bnegi_b", + "llvm.mips.bnegi.d" => "__builtin_msa_bnegi_d", + "llvm.mips.bnegi.h" => "__builtin_msa_bnegi_h", + "llvm.mips.bnegi.w" => "__builtin_msa_bnegi_w", + "llvm.mips.bnz.b" => "__builtin_msa_bnz_b", + "llvm.mips.bnz.d" => "__builtin_msa_bnz_d", + "llvm.mips.bnz.h" => "__builtin_msa_bnz_h", + "llvm.mips.bnz.v" => "__builtin_msa_bnz_v", + "llvm.mips.bnz.w" => "__builtin_msa_bnz_w", + "llvm.mips.bposge32" => "__builtin_mips_bposge32", + "llvm.mips.bsel.v" => "__builtin_msa_bsel_v", + "llvm.mips.bseli.b" => "__builtin_msa_bseli_b", + "llvm.mips.bset.b" => "__builtin_msa_bset_b", + "llvm.mips.bset.d" => "__builtin_msa_bset_d", + "llvm.mips.bset.h" => "__builtin_msa_bset_h", + "llvm.mips.bset.w" => "__builtin_msa_bset_w", + "llvm.mips.bseti.b" => "__builtin_msa_bseti_b", + "llvm.mips.bseti.d" => "__builtin_msa_bseti_d", + "llvm.mips.bseti.h" => "__builtin_msa_bseti_h", + "llvm.mips.bseti.w" => "__builtin_msa_bseti_w", + "llvm.mips.bz.b" => "__builtin_msa_bz_b", + "llvm.mips.bz.d" => "__builtin_msa_bz_d", + "llvm.mips.bz.h" => "__builtin_msa_bz_h", + "llvm.mips.bz.v" => "__builtin_msa_bz_v", + "llvm.mips.bz.w" => "__builtin_msa_bz_w", + "llvm.mips.ceq.b" => "__builtin_msa_ceq_b", + "llvm.mips.ceq.d" => "__builtin_msa_ceq_d", + "llvm.mips.ceq.h" => "__builtin_msa_ceq_h", + "llvm.mips.ceq.w" => "__builtin_msa_ceq_w", + "llvm.mips.ceqi.b" => "__builtin_msa_ceqi_b", + "llvm.mips.ceqi.d" => "__builtin_msa_ceqi_d", + "llvm.mips.ceqi.h" => "__builtin_msa_ceqi_h", + "llvm.mips.ceqi.w" => "__builtin_msa_ceqi_w", + "llvm.mips.cfcmsa" => "__builtin_msa_cfcmsa", + "llvm.mips.cle.s.b" => "__builtin_msa_cle_s_b", + "llvm.mips.cle.s.d" => "__builtin_msa_cle_s_d", + "llvm.mips.cle.s.h" => "__builtin_msa_cle_s_h", + "llvm.mips.cle.s.w" => "__builtin_msa_cle_s_w", + "llvm.mips.cle.u.b" => "__builtin_msa_cle_u_b", + "llvm.mips.cle.u.d" => "__builtin_msa_cle_u_d", + "llvm.mips.cle.u.h" => "__builtin_msa_cle_u_h", + "llvm.mips.cle.u.w" => "__builtin_msa_cle_u_w", + "llvm.mips.clei.s.b" => "__builtin_msa_clei_s_b", + "llvm.mips.clei.s.d" => "__builtin_msa_clei_s_d", + "llvm.mips.clei.s.h" => "__builtin_msa_clei_s_h", + "llvm.mips.clei.s.w" => "__builtin_msa_clei_s_w", + "llvm.mips.clei.u.b" => "__builtin_msa_clei_u_b", + "llvm.mips.clei.u.d" => "__builtin_msa_clei_u_d", + "llvm.mips.clei.u.h" => "__builtin_msa_clei_u_h", + "llvm.mips.clei.u.w" => "__builtin_msa_clei_u_w", + "llvm.mips.clt.s.b" => "__builtin_msa_clt_s_b", + "llvm.mips.clt.s.d" => "__builtin_msa_clt_s_d", + "llvm.mips.clt.s.h" => "__builtin_msa_clt_s_h", + "llvm.mips.clt.s.w" => "__builtin_msa_clt_s_w", + "llvm.mips.clt.u.b" => "__builtin_msa_clt_u_b", + "llvm.mips.clt.u.d" => "__builtin_msa_clt_u_d", + "llvm.mips.clt.u.h" => "__builtin_msa_clt_u_h", + "llvm.mips.clt.u.w" => "__builtin_msa_clt_u_w", + "llvm.mips.clti.s.b" => "__builtin_msa_clti_s_b", + "llvm.mips.clti.s.d" => "__builtin_msa_clti_s_d", + "llvm.mips.clti.s.h" => "__builtin_msa_clti_s_h", + "llvm.mips.clti.s.w" => "__builtin_msa_clti_s_w", + "llvm.mips.clti.u.b" => "__builtin_msa_clti_u_b", + "llvm.mips.clti.u.d" => "__builtin_msa_clti_u_d", + "llvm.mips.clti.u.h" => "__builtin_msa_clti_u_h", + "llvm.mips.clti.u.w" => "__builtin_msa_clti_u_w", + "llvm.mips.cmp.eq.ph" => "__builtin_mips_cmp_eq_ph", + "llvm.mips.cmp.le.ph" => "__builtin_mips_cmp_le_ph", + "llvm.mips.cmp.lt.ph" => "__builtin_mips_cmp_lt_ph", + "llvm.mips.cmpgdu.eq.qb" => "__builtin_mips_cmpgdu_eq_qb", + "llvm.mips.cmpgdu.le.qb" => "__builtin_mips_cmpgdu_le_qb", + "llvm.mips.cmpgdu.lt.qb" => "__builtin_mips_cmpgdu_lt_qb", + "llvm.mips.cmpgu.eq.qb" => "__builtin_mips_cmpgu_eq_qb", + "llvm.mips.cmpgu.le.qb" => "__builtin_mips_cmpgu_le_qb", + "llvm.mips.cmpgu.lt.qb" => "__builtin_mips_cmpgu_lt_qb", + "llvm.mips.cmpu.eq.qb" => "__builtin_mips_cmpu_eq_qb", + "llvm.mips.cmpu.le.qb" => "__builtin_mips_cmpu_le_qb", + "llvm.mips.cmpu.lt.qb" => "__builtin_mips_cmpu_lt_qb", + "llvm.mips.copy.s.b" => "__builtin_msa_copy_s_b", + "llvm.mips.copy.s.d" => "__builtin_msa_copy_s_d", + "llvm.mips.copy.s.h" => "__builtin_msa_copy_s_h", + "llvm.mips.copy.s.w" => "__builtin_msa_copy_s_w", + "llvm.mips.copy.u.b" => "__builtin_msa_copy_u_b", + "llvm.mips.copy.u.d" => "__builtin_msa_copy_u_d", + "llvm.mips.copy.u.h" => "__builtin_msa_copy_u_h", + "llvm.mips.copy.u.w" => "__builtin_msa_copy_u_w", + "llvm.mips.ctcmsa" => "__builtin_msa_ctcmsa", + "llvm.mips.div.s.b" => "__builtin_msa_div_s_b", + "llvm.mips.div.s.d" => "__builtin_msa_div_s_d", + "llvm.mips.div.s.h" => "__builtin_msa_div_s_h", + "llvm.mips.div.s.w" => "__builtin_msa_div_s_w", + "llvm.mips.div.u.b" => "__builtin_msa_div_u_b", + "llvm.mips.div.u.d" => "__builtin_msa_div_u_d", + "llvm.mips.div.u.h" => "__builtin_msa_div_u_h", + "llvm.mips.div.u.w" => "__builtin_msa_div_u_w", + "llvm.mips.dlsa" => "__builtin_mips_dlsa", + "llvm.mips.dotp.s.d" => "__builtin_msa_dotp_s_d", + "llvm.mips.dotp.s.h" => "__builtin_msa_dotp_s_h", + "llvm.mips.dotp.s.w" => "__builtin_msa_dotp_s_w", + "llvm.mips.dotp.u.d" => "__builtin_msa_dotp_u_d", + "llvm.mips.dotp.u.h" => "__builtin_msa_dotp_u_h", + "llvm.mips.dotp.u.w" => "__builtin_msa_dotp_u_w", + "llvm.mips.dpa.w.ph" => "__builtin_mips_dpa_w_ph", + "llvm.mips.dpadd.s.d" => "__builtin_msa_dpadd_s_d", + "llvm.mips.dpadd.s.h" => "__builtin_msa_dpadd_s_h", + "llvm.mips.dpadd.s.w" => "__builtin_msa_dpadd_s_w", + "llvm.mips.dpadd.u.d" => "__builtin_msa_dpadd_u_d", + "llvm.mips.dpadd.u.h" => "__builtin_msa_dpadd_u_h", + "llvm.mips.dpadd.u.w" => "__builtin_msa_dpadd_u_w", + "llvm.mips.dpaq.s.w.ph" => "__builtin_mips_dpaq_s_w_ph", + "llvm.mips.dpaq.sa.l.w" => "__builtin_mips_dpaq_sa_l_w", + "llvm.mips.dpaqx.s.w.ph" => "__builtin_mips_dpaqx_s_w_ph", + "llvm.mips.dpaqx.sa.w.ph" => "__builtin_mips_dpaqx_sa_w_ph", + "llvm.mips.dpau.h.qbl" => "__builtin_mips_dpau_h_qbl", + "llvm.mips.dpau.h.qbr" => "__builtin_mips_dpau_h_qbr", + "llvm.mips.dpax.w.ph" => "__builtin_mips_dpax_w_ph", + "llvm.mips.dps.w.ph" => "__builtin_mips_dps_w_ph", + "llvm.mips.dpsq.s.w.ph" => "__builtin_mips_dpsq_s_w_ph", + "llvm.mips.dpsq.sa.l.w" => "__builtin_mips_dpsq_sa_l_w", + "llvm.mips.dpsqx.s.w.ph" => "__builtin_mips_dpsqx_s_w_ph", + "llvm.mips.dpsqx.sa.w.ph" => "__builtin_mips_dpsqx_sa_w_ph", + "llvm.mips.dpsu.h.qbl" => "__builtin_mips_dpsu_h_qbl", + "llvm.mips.dpsu.h.qbr" => "__builtin_mips_dpsu_h_qbr", + "llvm.mips.dpsub.s.d" => "__builtin_msa_dpsub_s_d", + "llvm.mips.dpsub.s.h" => "__builtin_msa_dpsub_s_h", + "llvm.mips.dpsub.s.w" => "__builtin_msa_dpsub_s_w", + "llvm.mips.dpsub.u.d" => "__builtin_msa_dpsub_u_d", + "llvm.mips.dpsub.u.h" => "__builtin_msa_dpsub_u_h", + "llvm.mips.dpsub.u.w" => "__builtin_msa_dpsub_u_w", + "llvm.mips.dpsx.w.ph" => "__builtin_mips_dpsx_w_ph", + "llvm.mips.extp" => "__builtin_mips_extp", + "llvm.mips.extpdp" => "__builtin_mips_extpdp", + "llvm.mips.extr.r.w" => "__builtin_mips_extr_r_w", + "llvm.mips.extr.rs.w" => "__builtin_mips_extr_rs_w", + "llvm.mips.extr.s.h" => "__builtin_mips_extr_s_h", + "llvm.mips.extr.w" => "__builtin_mips_extr_w", + "llvm.mips.fadd.d" => "__builtin_msa_fadd_d", + "llvm.mips.fadd.w" => "__builtin_msa_fadd_w", + "llvm.mips.fcaf.d" => "__builtin_msa_fcaf_d", + "llvm.mips.fcaf.w" => "__builtin_msa_fcaf_w", + "llvm.mips.fceq.d" => "__builtin_msa_fceq_d", + "llvm.mips.fceq.w" => "__builtin_msa_fceq_w", + "llvm.mips.fclass.d" => "__builtin_msa_fclass_d", + "llvm.mips.fclass.w" => "__builtin_msa_fclass_w", + "llvm.mips.fcle.d" => "__builtin_msa_fcle_d", + "llvm.mips.fcle.w" => "__builtin_msa_fcle_w", + "llvm.mips.fclt.d" => "__builtin_msa_fclt_d", + "llvm.mips.fclt.w" => "__builtin_msa_fclt_w", + "llvm.mips.fcne.d" => "__builtin_msa_fcne_d", + "llvm.mips.fcne.w" => "__builtin_msa_fcne_w", + "llvm.mips.fcor.d" => "__builtin_msa_fcor_d", + "llvm.mips.fcor.w" => "__builtin_msa_fcor_w", + "llvm.mips.fcueq.d" => "__builtin_msa_fcueq_d", + "llvm.mips.fcueq.w" => "__builtin_msa_fcueq_w", + "llvm.mips.fcule.d" => "__builtin_msa_fcule_d", + "llvm.mips.fcule.w" => "__builtin_msa_fcule_w", + "llvm.mips.fcult.d" => "__builtin_msa_fcult_d", + "llvm.mips.fcult.w" => "__builtin_msa_fcult_w", + "llvm.mips.fcun.d" => "__builtin_msa_fcun_d", + "llvm.mips.fcun.w" => "__builtin_msa_fcun_w", + "llvm.mips.fcune.d" => "__builtin_msa_fcune_d", + "llvm.mips.fcune.w" => "__builtin_msa_fcune_w", + "llvm.mips.fdiv.d" => "__builtin_msa_fdiv_d", + "llvm.mips.fdiv.w" => "__builtin_msa_fdiv_w", + "llvm.mips.fexdo.h" => "__builtin_msa_fexdo_h", + "llvm.mips.fexdo.w" => "__builtin_msa_fexdo_w", + "llvm.mips.fexp2.d" => "__builtin_msa_fexp2_d", + "llvm.mips.fexp2.w" => "__builtin_msa_fexp2_w", + "llvm.mips.fexupl.d" => "__builtin_msa_fexupl_d", + "llvm.mips.fexupl.w" => "__builtin_msa_fexupl_w", + "llvm.mips.fexupr.d" => "__builtin_msa_fexupr_d", + "llvm.mips.fexupr.w" => "__builtin_msa_fexupr_w", + "llvm.mips.ffint.s.d" => "__builtin_msa_ffint_s_d", + "llvm.mips.ffint.s.w" => "__builtin_msa_ffint_s_w", + "llvm.mips.ffint.u.d" => "__builtin_msa_ffint_u_d", + "llvm.mips.ffint.u.w" => "__builtin_msa_ffint_u_w", + "llvm.mips.ffql.d" => "__builtin_msa_ffql_d", + "llvm.mips.ffql.w" => "__builtin_msa_ffql_w", + "llvm.mips.ffqr.d" => "__builtin_msa_ffqr_d", + "llvm.mips.ffqr.w" => "__builtin_msa_ffqr_w", + "llvm.mips.fill.b" => "__builtin_msa_fill_b", + "llvm.mips.fill.d" => "__builtin_msa_fill_d", + "llvm.mips.fill.h" => "__builtin_msa_fill_h", + "llvm.mips.fill.w" => "__builtin_msa_fill_w", + "llvm.mips.flog2.d" => "__builtin_msa_flog2_d", + "llvm.mips.flog2.w" => "__builtin_msa_flog2_w", + "llvm.mips.fmadd.d" => "__builtin_msa_fmadd_d", + "llvm.mips.fmadd.w" => "__builtin_msa_fmadd_w", + "llvm.mips.fmax.a.d" => "__builtin_msa_fmax_a_d", + "llvm.mips.fmax.a.w" => "__builtin_msa_fmax_a_w", + "llvm.mips.fmax.d" => "__builtin_msa_fmax_d", + "llvm.mips.fmax.w" => "__builtin_msa_fmax_w", + "llvm.mips.fmin.a.d" => "__builtin_msa_fmin_a_d", + "llvm.mips.fmin.a.w" => "__builtin_msa_fmin_a_w", + "llvm.mips.fmin.d" => "__builtin_msa_fmin_d", + "llvm.mips.fmin.w" => "__builtin_msa_fmin_w", + "llvm.mips.fmsub.d" => "__builtin_msa_fmsub_d", + "llvm.mips.fmsub.w" => "__builtin_msa_fmsub_w", + "llvm.mips.fmul.d" => "__builtin_msa_fmul_d", + "llvm.mips.fmul.w" => "__builtin_msa_fmul_w", + "llvm.mips.frcp.d" => "__builtin_msa_frcp_d", + "llvm.mips.frcp.w" => "__builtin_msa_frcp_w", + "llvm.mips.frint.d" => "__builtin_msa_frint_d", + "llvm.mips.frint.w" => "__builtin_msa_frint_w", + "llvm.mips.frsqrt.d" => "__builtin_msa_frsqrt_d", + "llvm.mips.frsqrt.w" => "__builtin_msa_frsqrt_w", + "llvm.mips.fsaf.d" => "__builtin_msa_fsaf_d", + "llvm.mips.fsaf.w" => "__builtin_msa_fsaf_w", + "llvm.mips.fseq.d" => "__builtin_msa_fseq_d", + "llvm.mips.fseq.w" => "__builtin_msa_fseq_w", + "llvm.mips.fsle.d" => "__builtin_msa_fsle_d", + "llvm.mips.fsle.w" => "__builtin_msa_fsle_w", + "llvm.mips.fslt.d" => "__builtin_msa_fslt_d", + "llvm.mips.fslt.w" => "__builtin_msa_fslt_w", + "llvm.mips.fsne.d" => "__builtin_msa_fsne_d", + "llvm.mips.fsne.w" => "__builtin_msa_fsne_w", + "llvm.mips.fsor.d" => "__builtin_msa_fsor_d", + "llvm.mips.fsor.w" => "__builtin_msa_fsor_w", + "llvm.mips.fsqrt.d" => "__builtin_msa_fsqrt_d", + "llvm.mips.fsqrt.w" => "__builtin_msa_fsqrt_w", + "llvm.mips.fsub.d" => "__builtin_msa_fsub_d", + "llvm.mips.fsub.w" => "__builtin_msa_fsub_w", + "llvm.mips.fsueq.d" => "__builtin_msa_fsueq_d", + "llvm.mips.fsueq.w" => "__builtin_msa_fsueq_w", + "llvm.mips.fsule.d" => "__builtin_msa_fsule_d", + "llvm.mips.fsule.w" => "__builtin_msa_fsule_w", + "llvm.mips.fsult.d" => "__builtin_msa_fsult_d", + "llvm.mips.fsult.w" => "__builtin_msa_fsult_w", + "llvm.mips.fsun.d" => "__builtin_msa_fsun_d", + "llvm.mips.fsun.w" => "__builtin_msa_fsun_w", + "llvm.mips.fsune.d" => "__builtin_msa_fsune_d", + "llvm.mips.fsune.w" => "__builtin_msa_fsune_w", + "llvm.mips.ftint.s.d" => "__builtin_msa_ftint_s_d", + "llvm.mips.ftint.s.w" => "__builtin_msa_ftint_s_w", + "llvm.mips.ftint.u.d" => "__builtin_msa_ftint_u_d", + "llvm.mips.ftint.u.w" => "__builtin_msa_ftint_u_w", + "llvm.mips.ftq.h" => "__builtin_msa_ftq_h", + "llvm.mips.ftq.w" => "__builtin_msa_ftq_w", + "llvm.mips.ftrunc.s.d" => "__builtin_msa_ftrunc_s_d", + "llvm.mips.ftrunc.s.w" => "__builtin_msa_ftrunc_s_w", + "llvm.mips.ftrunc.u.d" => "__builtin_msa_ftrunc_u_d", + "llvm.mips.ftrunc.u.w" => "__builtin_msa_ftrunc_u_w", + "llvm.mips.hadd.s.d" => "__builtin_msa_hadd_s_d", + "llvm.mips.hadd.s.h" => "__builtin_msa_hadd_s_h", + "llvm.mips.hadd.s.w" => "__builtin_msa_hadd_s_w", + "llvm.mips.hadd.u.d" => "__builtin_msa_hadd_u_d", + "llvm.mips.hadd.u.h" => "__builtin_msa_hadd_u_h", + "llvm.mips.hadd.u.w" => "__builtin_msa_hadd_u_w", + "llvm.mips.hsub.s.d" => "__builtin_msa_hsub_s_d", + "llvm.mips.hsub.s.h" => "__builtin_msa_hsub_s_h", + "llvm.mips.hsub.s.w" => "__builtin_msa_hsub_s_w", + "llvm.mips.hsub.u.d" => "__builtin_msa_hsub_u_d", + "llvm.mips.hsub.u.h" => "__builtin_msa_hsub_u_h", + "llvm.mips.hsub.u.w" => "__builtin_msa_hsub_u_w", + "llvm.mips.ilvev.b" => "__builtin_msa_ilvev_b", + "llvm.mips.ilvev.d" => "__builtin_msa_ilvev_d", + "llvm.mips.ilvev.h" => "__builtin_msa_ilvev_h", + "llvm.mips.ilvev.w" => "__builtin_msa_ilvev_w", + "llvm.mips.ilvl.b" => "__builtin_msa_ilvl_b", + "llvm.mips.ilvl.d" => "__builtin_msa_ilvl_d", + "llvm.mips.ilvl.h" => "__builtin_msa_ilvl_h", + "llvm.mips.ilvl.w" => "__builtin_msa_ilvl_w", + "llvm.mips.ilvod.b" => "__builtin_msa_ilvod_b", + "llvm.mips.ilvod.d" => "__builtin_msa_ilvod_d", + "llvm.mips.ilvod.h" => "__builtin_msa_ilvod_h", + "llvm.mips.ilvod.w" => "__builtin_msa_ilvod_w", + "llvm.mips.ilvr.b" => "__builtin_msa_ilvr_b", + "llvm.mips.ilvr.d" => "__builtin_msa_ilvr_d", + "llvm.mips.ilvr.h" => "__builtin_msa_ilvr_h", + "llvm.mips.ilvr.w" => "__builtin_msa_ilvr_w", + "llvm.mips.insert.b" => "__builtin_msa_insert_b", + "llvm.mips.insert.d" => "__builtin_msa_insert_d", + "llvm.mips.insert.h" => "__builtin_msa_insert_h", + "llvm.mips.insert.w" => "__builtin_msa_insert_w", + "llvm.mips.insv" => "__builtin_mips_insv", + "llvm.mips.insve.b" => "__builtin_msa_insve_b", + "llvm.mips.insve.d" => "__builtin_msa_insve_d", + "llvm.mips.insve.h" => "__builtin_msa_insve_h", + "llvm.mips.insve.w" => "__builtin_msa_insve_w", + "llvm.mips.lbux" => "__builtin_mips_lbux", + "llvm.mips.ld.b" => "__builtin_msa_ld_b", + "llvm.mips.ld.d" => "__builtin_msa_ld_d", + "llvm.mips.ld.h" => "__builtin_msa_ld_h", + "llvm.mips.ld.w" => "__builtin_msa_ld_w", + "llvm.mips.ldi.b" => "__builtin_msa_ldi_b", + "llvm.mips.ldi.d" => "__builtin_msa_ldi_d", + "llvm.mips.ldi.h" => "__builtin_msa_ldi_h", + "llvm.mips.ldi.w" => "__builtin_msa_ldi_w", + "llvm.mips.ldr.d" => "__builtin_msa_ldr_d", + "llvm.mips.ldr.w" => "__builtin_msa_ldr_w", + "llvm.mips.lhx" => "__builtin_mips_lhx", + "llvm.mips.lsa" => "__builtin_mips_lsa", + "llvm.mips.lwx" => "__builtin_mips_lwx", + "llvm.mips.madd" => "__builtin_mips_madd", + "llvm.mips.madd.q.h" => "__builtin_msa_madd_q_h", + "llvm.mips.madd.q.w" => "__builtin_msa_madd_q_w", + "llvm.mips.maddr.q.h" => "__builtin_msa_maddr_q_h", + "llvm.mips.maddr.q.w" => "__builtin_msa_maddr_q_w", + "llvm.mips.maddu" => "__builtin_mips_maddu", + "llvm.mips.maddv.b" => "__builtin_msa_maddv_b", + "llvm.mips.maddv.d" => "__builtin_msa_maddv_d", + "llvm.mips.maddv.h" => "__builtin_msa_maddv_h", + "llvm.mips.maddv.w" => "__builtin_msa_maddv_w", + "llvm.mips.maq.s.w.phl" => "__builtin_mips_maq_s_w_phl", + "llvm.mips.maq.s.w.phr" => "__builtin_mips_maq_s_w_phr", + "llvm.mips.maq.sa.w.phl" => "__builtin_mips_maq_sa_w_phl", + "llvm.mips.maq.sa.w.phr" => "__builtin_mips_maq_sa_w_phr", + "llvm.mips.max.a.b" => "__builtin_msa_max_a_b", + "llvm.mips.max.a.d" => "__builtin_msa_max_a_d", + "llvm.mips.max.a.h" => "__builtin_msa_max_a_h", + "llvm.mips.max.a.w" => "__builtin_msa_max_a_w", + "llvm.mips.max.s.b" => "__builtin_msa_max_s_b", + "llvm.mips.max.s.d" => "__builtin_msa_max_s_d", + "llvm.mips.max.s.h" => "__builtin_msa_max_s_h", + "llvm.mips.max.s.w" => "__builtin_msa_max_s_w", + "llvm.mips.max.u.b" => "__builtin_msa_max_u_b", + "llvm.mips.max.u.d" => "__builtin_msa_max_u_d", + "llvm.mips.max.u.h" => "__builtin_msa_max_u_h", + "llvm.mips.max.u.w" => "__builtin_msa_max_u_w", + "llvm.mips.maxi.s.b" => "__builtin_msa_maxi_s_b", + "llvm.mips.maxi.s.d" => "__builtin_msa_maxi_s_d", + "llvm.mips.maxi.s.h" => "__builtin_msa_maxi_s_h", + "llvm.mips.maxi.s.w" => "__builtin_msa_maxi_s_w", + "llvm.mips.maxi.u.b" => "__builtin_msa_maxi_u_b", + "llvm.mips.maxi.u.d" => "__builtin_msa_maxi_u_d", + "llvm.mips.maxi.u.h" => "__builtin_msa_maxi_u_h", + "llvm.mips.maxi.u.w" => "__builtin_msa_maxi_u_w", + "llvm.mips.min.a.b" => "__builtin_msa_min_a_b", + "llvm.mips.min.a.d" => "__builtin_msa_min_a_d", + "llvm.mips.min.a.h" => "__builtin_msa_min_a_h", + "llvm.mips.min.a.w" => "__builtin_msa_min_a_w", + "llvm.mips.min.s.b" => "__builtin_msa_min_s_b", + "llvm.mips.min.s.d" => "__builtin_msa_min_s_d", + "llvm.mips.min.s.h" => "__builtin_msa_min_s_h", + "llvm.mips.min.s.w" => "__builtin_msa_min_s_w", + "llvm.mips.min.u.b" => "__builtin_msa_min_u_b", + "llvm.mips.min.u.d" => "__builtin_msa_min_u_d", + "llvm.mips.min.u.h" => "__builtin_msa_min_u_h", + "llvm.mips.min.u.w" => "__builtin_msa_min_u_w", + "llvm.mips.mini.s.b" => "__builtin_msa_mini_s_b", + "llvm.mips.mini.s.d" => "__builtin_msa_mini_s_d", + "llvm.mips.mini.s.h" => "__builtin_msa_mini_s_h", + "llvm.mips.mini.s.w" => "__builtin_msa_mini_s_w", + "llvm.mips.mini.u.b" => "__builtin_msa_mini_u_b", + "llvm.mips.mini.u.d" => "__builtin_msa_mini_u_d", + "llvm.mips.mini.u.h" => "__builtin_msa_mini_u_h", + "llvm.mips.mini.u.w" => "__builtin_msa_mini_u_w", + "llvm.mips.mod.s.b" => "__builtin_msa_mod_s_b", + "llvm.mips.mod.s.d" => "__builtin_msa_mod_s_d", + "llvm.mips.mod.s.h" => "__builtin_msa_mod_s_h", + "llvm.mips.mod.s.w" => "__builtin_msa_mod_s_w", + "llvm.mips.mod.u.b" => "__builtin_msa_mod_u_b", + "llvm.mips.mod.u.d" => "__builtin_msa_mod_u_d", + "llvm.mips.mod.u.h" => "__builtin_msa_mod_u_h", + "llvm.mips.mod.u.w" => "__builtin_msa_mod_u_w", + "llvm.mips.modsub" => "__builtin_mips_modsub", + "llvm.mips.move.v" => "__builtin_msa_move_v", + "llvm.mips.msub" => "__builtin_mips_msub", + "llvm.mips.msub.q.h" => "__builtin_msa_msub_q_h", + "llvm.mips.msub.q.w" => "__builtin_msa_msub_q_w", + "llvm.mips.msubr.q.h" => "__builtin_msa_msubr_q_h", + "llvm.mips.msubr.q.w" => "__builtin_msa_msubr_q_w", + "llvm.mips.msubu" => "__builtin_mips_msubu", + "llvm.mips.msubv.b" => "__builtin_msa_msubv_b", + "llvm.mips.msubv.d" => "__builtin_msa_msubv_d", + "llvm.mips.msubv.h" => "__builtin_msa_msubv_h", + "llvm.mips.msubv.w" => "__builtin_msa_msubv_w", + "llvm.mips.mthlip" => "__builtin_mips_mthlip", + "llvm.mips.mul.ph" => "__builtin_mips_mul_ph", + "llvm.mips.mul.q.h" => "__builtin_msa_mul_q_h", + "llvm.mips.mul.q.w" => "__builtin_msa_mul_q_w", + "llvm.mips.mul.s.ph" => "__builtin_mips_mul_s_ph", + "llvm.mips.muleq.s.w.phl" => "__builtin_mips_muleq_s_w_phl", + "llvm.mips.muleq.s.w.phr" => "__builtin_mips_muleq_s_w_phr", + "llvm.mips.muleu.s.ph.qbl" => "__builtin_mips_muleu_s_ph_qbl", + "llvm.mips.muleu.s.ph.qbr" => "__builtin_mips_muleu_s_ph_qbr", + "llvm.mips.mulq.rs.ph" => "__builtin_mips_mulq_rs_ph", + "llvm.mips.mulq.rs.w" => "__builtin_mips_mulq_rs_w", + "llvm.mips.mulq.s.ph" => "__builtin_mips_mulq_s_ph", + "llvm.mips.mulq.s.w" => "__builtin_mips_mulq_s_w", + "llvm.mips.mulr.q.h" => "__builtin_msa_mulr_q_h", + "llvm.mips.mulr.q.w" => "__builtin_msa_mulr_q_w", + "llvm.mips.mulsa.w.ph" => "__builtin_mips_mulsa_w_ph", + "llvm.mips.mulsaq.s.w.ph" => "__builtin_mips_mulsaq_s_w_ph", + "llvm.mips.mult" => "__builtin_mips_mult", + "llvm.mips.multu" => "__builtin_mips_multu", + "llvm.mips.mulv.b" => "__builtin_msa_mulv_b", + "llvm.mips.mulv.d" => "__builtin_msa_mulv_d", + "llvm.mips.mulv.h" => "__builtin_msa_mulv_h", + "llvm.mips.mulv.w" => "__builtin_msa_mulv_w", + "llvm.mips.nloc.b" => "__builtin_msa_nloc_b", + "llvm.mips.nloc.d" => "__builtin_msa_nloc_d", + "llvm.mips.nloc.h" => "__builtin_msa_nloc_h", + "llvm.mips.nloc.w" => "__builtin_msa_nloc_w", + "llvm.mips.nlzc.b" => "__builtin_msa_nlzc_b", + "llvm.mips.nlzc.d" => "__builtin_msa_nlzc_d", + "llvm.mips.nlzc.h" => "__builtin_msa_nlzc_h", + "llvm.mips.nlzc.w" => "__builtin_msa_nlzc_w", + "llvm.mips.nor.v" => "__builtin_msa_nor_v", + "llvm.mips.nori.b" => "__builtin_msa_nori_b", + "llvm.mips.or.v" => "__builtin_msa_or_v", + "llvm.mips.ori.b" => "__builtin_msa_ori_b", + "llvm.mips.packrl.ph" => "__builtin_mips_packrl_ph", + "llvm.mips.pckev.b" => "__builtin_msa_pckev_b", + "llvm.mips.pckev.d" => "__builtin_msa_pckev_d", + "llvm.mips.pckev.h" => "__builtin_msa_pckev_h", + "llvm.mips.pckev.w" => "__builtin_msa_pckev_w", + "llvm.mips.pckod.b" => "__builtin_msa_pckod_b", + "llvm.mips.pckod.d" => "__builtin_msa_pckod_d", + "llvm.mips.pckod.h" => "__builtin_msa_pckod_h", + "llvm.mips.pckod.w" => "__builtin_msa_pckod_w", + "llvm.mips.pcnt.b" => "__builtin_msa_pcnt_b", + "llvm.mips.pcnt.d" => "__builtin_msa_pcnt_d", + "llvm.mips.pcnt.h" => "__builtin_msa_pcnt_h", + "llvm.mips.pcnt.w" => "__builtin_msa_pcnt_w", + "llvm.mips.pick.ph" => "__builtin_mips_pick_ph", + "llvm.mips.pick.qb" => "__builtin_mips_pick_qb", + "llvm.mips.preceq.w.phl" => "__builtin_mips_preceq_w_phl", + "llvm.mips.preceq.w.phr" => "__builtin_mips_preceq_w_phr", + "llvm.mips.precequ.ph.qbl" => "__builtin_mips_precequ_ph_qbl", + "llvm.mips.precequ.ph.qbla" => "__builtin_mips_precequ_ph_qbla", + "llvm.mips.precequ.ph.qbr" => "__builtin_mips_precequ_ph_qbr", + "llvm.mips.precequ.ph.qbra" => "__builtin_mips_precequ_ph_qbra", + "llvm.mips.preceu.ph.qbl" => "__builtin_mips_preceu_ph_qbl", + "llvm.mips.preceu.ph.qbla" => "__builtin_mips_preceu_ph_qbla", + "llvm.mips.preceu.ph.qbr" => "__builtin_mips_preceu_ph_qbr", + "llvm.mips.preceu.ph.qbra" => "__builtin_mips_preceu_ph_qbra", + "llvm.mips.precr.qb.ph" => "__builtin_mips_precr_qb_ph", + "llvm.mips.precr.sra.ph.w" => "__builtin_mips_precr_sra_ph_w", + "llvm.mips.precr.sra.r.ph.w" => "__builtin_mips_precr_sra_r_ph_w", + "llvm.mips.precrq.ph.w" => "__builtin_mips_precrq_ph_w", + "llvm.mips.precrq.qb.ph" => "__builtin_mips_precrq_qb_ph", + "llvm.mips.precrq.rs.ph.w" => "__builtin_mips_precrq_rs_ph_w", + "llvm.mips.precrqu.s.qb.ph" => "__builtin_mips_precrqu_s_qb_ph", + "llvm.mips.prepend" => "__builtin_mips_prepend", + "llvm.mips.raddu.w.qb" => "__builtin_mips_raddu_w_qb", + "llvm.mips.rddsp" => "__builtin_mips_rddsp", + "llvm.mips.repl.ph" => "__builtin_mips_repl_ph", + "llvm.mips.repl.qb" => "__builtin_mips_repl_qb", + "llvm.mips.sat.s.b" => "__builtin_msa_sat_s_b", + "llvm.mips.sat.s.d" => "__builtin_msa_sat_s_d", + "llvm.mips.sat.s.h" => "__builtin_msa_sat_s_h", + "llvm.mips.sat.s.w" => "__builtin_msa_sat_s_w", + "llvm.mips.sat.u.b" => "__builtin_msa_sat_u_b", + "llvm.mips.sat.u.d" => "__builtin_msa_sat_u_d", + "llvm.mips.sat.u.h" => "__builtin_msa_sat_u_h", + "llvm.mips.sat.u.w" => "__builtin_msa_sat_u_w", + "llvm.mips.shf.b" => "__builtin_msa_shf_b", + "llvm.mips.shf.h" => "__builtin_msa_shf_h", + "llvm.mips.shf.w" => "__builtin_msa_shf_w", + "llvm.mips.shilo" => "__builtin_mips_shilo", + "llvm.mips.shll.ph" => "__builtin_mips_shll_ph", + "llvm.mips.shll.qb" => "__builtin_mips_shll_qb", + "llvm.mips.shll.s.ph" => "__builtin_mips_shll_s_ph", + "llvm.mips.shll.s.w" => "__builtin_mips_shll_s_w", + "llvm.mips.shra.ph" => "__builtin_mips_shra_ph", + "llvm.mips.shra.qb" => "__builtin_mips_shra_qb", + "llvm.mips.shra.r.ph" => "__builtin_mips_shra_r_ph", + "llvm.mips.shra.r.qb" => "__builtin_mips_shra_r_qb", + "llvm.mips.shra.r.w" => "__builtin_mips_shra_r_w", + "llvm.mips.shrl.ph" => "__builtin_mips_shrl_ph", + "llvm.mips.shrl.qb" => "__builtin_mips_shrl_qb", + "llvm.mips.sld.b" => "__builtin_msa_sld_b", + "llvm.mips.sld.d" => "__builtin_msa_sld_d", + "llvm.mips.sld.h" => "__builtin_msa_sld_h", + "llvm.mips.sld.w" => "__builtin_msa_sld_w", + "llvm.mips.sldi.b" => "__builtin_msa_sldi_b", + "llvm.mips.sldi.d" => "__builtin_msa_sldi_d", + "llvm.mips.sldi.h" => "__builtin_msa_sldi_h", + "llvm.mips.sldi.w" => "__builtin_msa_sldi_w", + "llvm.mips.sll.b" => "__builtin_msa_sll_b", + "llvm.mips.sll.d" => "__builtin_msa_sll_d", + "llvm.mips.sll.h" => "__builtin_msa_sll_h", + "llvm.mips.sll.w" => "__builtin_msa_sll_w", + "llvm.mips.slli.b" => "__builtin_msa_slli_b", + "llvm.mips.slli.d" => "__builtin_msa_slli_d", + "llvm.mips.slli.h" => "__builtin_msa_slli_h", + "llvm.mips.slli.w" => "__builtin_msa_slli_w", + "llvm.mips.splat.b" => "__builtin_msa_splat_b", + "llvm.mips.splat.d" => "__builtin_msa_splat_d", + "llvm.mips.splat.h" => "__builtin_msa_splat_h", + "llvm.mips.splat.w" => "__builtin_msa_splat_w", + "llvm.mips.splati.b" => "__builtin_msa_splati_b", + "llvm.mips.splati.d" => "__builtin_msa_splati_d", + "llvm.mips.splati.h" => "__builtin_msa_splati_h", + "llvm.mips.splati.w" => "__builtin_msa_splati_w", + "llvm.mips.sra.b" => "__builtin_msa_sra_b", + "llvm.mips.sra.d" => "__builtin_msa_sra_d", + "llvm.mips.sra.h" => "__builtin_msa_sra_h", + "llvm.mips.sra.w" => "__builtin_msa_sra_w", + "llvm.mips.srai.b" => "__builtin_msa_srai_b", + "llvm.mips.srai.d" => "__builtin_msa_srai_d", + "llvm.mips.srai.h" => "__builtin_msa_srai_h", + "llvm.mips.srai.w" => "__builtin_msa_srai_w", + "llvm.mips.srar.b" => "__builtin_msa_srar_b", + "llvm.mips.srar.d" => "__builtin_msa_srar_d", + "llvm.mips.srar.h" => "__builtin_msa_srar_h", + "llvm.mips.srar.w" => "__builtin_msa_srar_w", + "llvm.mips.srari.b" => "__builtin_msa_srari_b", + "llvm.mips.srari.d" => "__builtin_msa_srari_d", + "llvm.mips.srari.h" => "__builtin_msa_srari_h", + "llvm.mips.srari.w" => "__builtin_msa_srari_w", + "llvm.mips.srl.b" => "__builtin_msa_srl_b", + "llvm.mips.srl.d" => "__builtin_msa_srl_d", + "llvm.mips.srl.h" => "__builtin_msa_srl_h", + "llvm.mips.srl.w" => "__builtin_msa_srl_w", + "llvm.mips.srli.b" => "__builtin_msa_srli_b", + "llvm.mips.srli.d" => "__builtin_msa_srli_d", + "llvm.mips.srli.h" => "__builtin_msa_srli_h", + "llvm.mips.srli.w" => "__builtin_msa_srli_w", + "llvm.mips.srlr.b" => "__builtin_msa_srlr_b", + "llvm.mips.srlr.d" => "__builtin_msa_srlr_d", + "llvm.mips.srlr.h" => "__builtin_msa_srlr_h", + "llvm.mips.srlr.w" => "__builtin_msa_srlr_w", + "llvm.mips.srlri.b" => "__builtin_msa_srlri_b", + "llvm.mips.srlri.d" => "__builtin_msa_srlri_d", + "llvm.mips.srlri.h" => "__builtin_msa_srlri_h", + "llvm.mips.srlri.w" => "__builtin_msa_srlri_w", + "llvm.mips.st.b" => "__builtin_msa_st_b", + "llvm.mips.st.d" => "__builtin_msa_st_d", + "llvm.mips.st.h" => "__builtin_msa_st_h", + "llvm.mips.st.w" => "__builtin_msa_st_w", + "llvm.mips.str.d" => "__builtin_msa_str_d", + "llvm.mips.str.w" => "__builtin_msa_str_w", + "llvm.mips.subq.ph" => "__builtin_mips_subq_ph", + "llvm.mips.subq.s.ph" => "__builtin_mips_subq_s_ph", + "llvm.mips.subq.s.w" => "__builtin_mips_subq_s_w", + "llvm.mips.subqh.ph" => "__builtin_mips_subqh_ph", + "llvm.mips.subqh.r.ph" => "__builtin_mips_subqh_r_ph", + "llvm.mips.subqh.r.w" => "__builtin_mips_subqh_r_w", + "llvm.mips.subqh.w" => "__builtin_mips_subqh_w", + "llvm.mips.subs.s.b" => "__builtin_msa_subs_s_b", + "llvm.mips.subs.s.d" => "__builtin_msa_subs_s_d", + "llvm.mips.subs.s.h" => "__builtin_msa_subs_s_h", + "llvm.mips.subs.s.w" => "__builtin_msa_subs_s_w", + "llvm.mips.subs.u.b" => "__builtin_msa_subs_u_b", + "llvm.mips.subs.u.d" => "__builtin_msa_subs_u_d", + "llvm.mips.subs.u.h" => "__builtin_msa_subs_u_h", + "llvm.mips.subs.u.w" => "__builtin_msa_subs_u_w", + "llvm.mips.subsus.u.b" => "__builtin_msa_subsus_u_b", + "llvm.mips.subsus.u.d" => "__builtin_msa_subsus_u_d", + "llvm.mips.subsus.u.h" => "__builtin_msa_subsus_u_h", + "llvm.mips.subsus.u.w" => "__builtin_msa_subsus_u_w", + "llvm.mips.subsuu.s.b" => "__builtin_msa_subsuu_s_b", + "llvm.mips.subsuu.s.d" => "__builtin_msa_subsuu_s_d", + "llvm.mips.subsuu.s.h" => "__builtin_msa_subsuu_s_h", + "llvm.mips.subsuu.s.w" => "__builtin_msa_subsuu_s_w", + "llvm.mips.subu.ph" => "__builtin_mips_subu_ph", + "llvm.mips.subu.qb" => "__builtin_mips_subu_qb", + "llvm.mips.subu.s.ph" => "__builtin_mips_subu_s_ph", + "llvm.mips.subu.s.qb" => "__builtin_mips_subu_s_qb", + "llvm.mips.subuh.qb" => "__builtin_mips_subuh_qb", + "llvm.mips.subuh.r.qb" => "__builtin_mips_subuh_r_qb", + "llvm.mips.subv.b" => "__builtin_msa_subv_b", + "llvm.mips.subv.d" => "__builtin_msa_subv_d", + "llvm.mips.subv.h" => "__builtin_msa_subv_h", + "llvm.mips.subv.w" => "__builtin_msa_subv_w", + "llvm.mips.subvi.b" => "__builtin_msa_subvi_b", + "llvm.mips.subvi.d" => "__builtin_msa_subvi_d", + "llvm.mips.subvi.h" => "__builtin_msa_subvi_h", + "llvm.mips.subvi.w" => "__builtin_msa_subvi_w", + "llvm.mips.vshf.b" => "__builtin_msa_vshf_b", + "llvm.mips.vshf.d" => "__builtin_msa_vshf_d", + "llvm.mips.vshf.h" => "__builtin_msa_vshf_h", + "llvm.mips.vshf.w" => "__builtin_msa_vshf_w", + "llvm.mips.wrdsp" => "__builtin_mips_wrdsp", + "llvm.mips.xor.v" => "__builtin_msa_xor_v", + "llvm.mips.xori.b" => "__builtin_msa_xori_b", + // nvvm + "llvm.nvvm.abs.bf16" => "__nvvm_abs_bf16", + "llvm.nvvm.abs.bf16x2" => "__nvvm_abs_bf16x2", + "llvm.nvvm.abs.i" => "__nvvm_abs_i", + "llvm.nvvm.abs.ll" => "__nvvm_abs_ll", + "llvm.nvvm.add.rm.d" => "__nvvm_add_rm_d", + "llvm.nvvm.add.rm.f" => "__nvvm_add_rm_f", + "llvm.nvvm.add.rm.ftz.f" => "__nvvm_add_rm_ftz_f", + "llvm.nvvm.add.rn.d" => "__nvvm_add_rn_d", + "llvm.nvvm.add.rn.f" => "__nvvm_add_rn_f", + "llvm.nvvm.add.rn.ftz.f" => "__nvvm_add_rn_ftz_f", + "llvm.nvvm.add.rp.d" => "__nvvm_add_rp_d", + "llvm.nvvm.add.rp.f" => "__nvvm_add_rp_f", + "llvm.nvvm.add.rp.ftz.f" => "__nvvm_add_rp_ftz_f", + "llvm.nvvm.add.rz.d" => "__nvvm_add_rz_d", + "llvm.nvvm.add.rz.f" => "__nvvm_add_rz_f", + "llvm.nvvm.add.rz.ftz.f" => "__nvvm_add_rz_ftz_f", + "llvm.nvvm.bar.sync" => "__nvvm_bar_sync", + "llvm.nvvm.bar.warp.sync" => "__nvvm_bar_warp_sync", + "llvm.nvvm.barrier" => "__nvvm_bar", + "llvm.nvvm.barrier.n" => "__nvvm_bar_n", + "llvm.nvvm.barrier.sync" => "__nvvm_barrier_sync", + "llvm.nvvm.barrier.sync.cnt" => "__nvvm_barrier_sync_cnt", + "llvm.nvvm.barrier0" => "__syncthreads", + // [DUPLICATE]: "llvm.nvvm.barrier0" => "__nvvm_bar0", + "llvm.nvvm.barrier0.and" => "__nvvm_bar0_and", + "llvm.nvvm.barrier0.or" => "__nvvm_bar0_or", + "llvm.nvvm.barrier0.popc" => "__nvvm_bar0_popc", + "llvm.nvvm.bitcast.d2ll" => "__nvvm_bitcast_d2ll", + "llvm.nvvm.bitcast.f2i" => "__nvvm_bitcast_f2i", + "llvm.nvvm.bitcast.i2f" => "__nvvm_bitcast_i2f", + "llvm.nvvm.bitcast.ll2d" => "__nvvm_bitcast_ll2d", + "llvm.nvvm.brev32" => "__nvvm_brev32", + "llvm.nvvm.brev64" => "__nvvm_brev64", + "llvm.nvvm.ceil.d" => "__nvvm_ceil_d", + "llvm.nvvm.ceil.f" => "__nvvm_ceil_f", + "llvm.nvvm.ceil.ftz.f" => "__nvvm_ceil_ftz_f", + "llvm.nvvm.clz.i" => "__nvvm_clz_i", + "llvm.nvvm.clz.ll" => "__nvvm_clz_ll", + "llvm.nvvm.cos.approx.f" => "__nvvm_cos_approx_f", + "llvm.nvvm.cos.approx.ftz.f" => "__nvvm_cos_approx_ftz_f", + "llvm.nvvm.cp.async.ca.shared.global.16" => "__nvvm_cp_async_ca_shared_global_16", + "llvm.nvvm.cp.async.ca.shared.global.4" => "__nvvm_cp_async_ca_shared_global_4", + "llvm.nvvm.cp.async.ca.shared.global.8" => "__nvvm_cp_async_ca_shared_global_8", + "llvm.nvvm.cp.async.cg.shared.global.16" => "__nvvm_cp_async_cg_shared_global_16", + "llvm.nvvm.cp.async.commit.group" => "__nvvm_cp_async_commit_group", + "llvm.nvvm.cp.async.mbarrier.arrive" => "__nvvm_cp_async_mbarrier_arrive", + "llvm.nvvm.cp.async.mbarrier.arrive.noinc" => "__nvvm_cp_async_mbarrier_arrive_noinc", + "llvm.nvvm.cp.async.mbarrier.arrive.noinc.shared" => "__nvvm_cp_async_mbarrier_arrive_noinc_shared", + "llvm.nvvm.cp.async.mbarrier.arrive.shared" => "__nvvm_cp_async_mbarrier_arrive_shared", + "llvm.nvvm.cp.async.wait.all" => "__nvvm_cp_async_wait_all", + "llvm.nvvm.cp.async.wait.group" => "__nvvm_cp_async_wait_group", + "llvm.nvvm.d2f.rm" => "__nvvm_d2f_rm", + "llvm.nvvm.d2f.rm.ftz" => "__nvvm_d2f_rm_ftz", + "llvm.nvvm.d2f.rn" => "__nvvm_d2f_rn", + "llvm.nvvm.d2f.rn.ftz" => "__nvvm_d2f_rn_ftz", + "llvm.nvvm.d2f.rp" => "__nvvm_d2f_rp", + "llvm.nvvm.d2f.rp.ftz" => "__nvvm_d2f_rp_ftz", + "llvm.nvvm.d2f.rz" => "__nvvm_d2f_rz", + "llvm.nvvm.d2f.rz.ftz" => "__nvvm_d2f_rz_ftz", + "llvm.nvvm.d2i.hi" => "__nvvm_d2i_hi", + "llvm.nvvm.d2i.lo" => "__nvvm_d2i_lo", + "llvm.nvvm.d2i.rm" => "__nvvm_d2i_rm", + "llvm.nvvm.d2i.rn" => "__nvvm_d2i_rn", + "llvm.nvvm.d2i.rp" => "__nvvm_d2i_rp", + "llvm.nvvm.d2i.rz" => "__nvvm_d2i_rz", + "llvm.nvvm.d2ll.rm" => "__nvvm_d2ll_rm", + "llvm.nvvm.d2ll.rn" => "__nvvm_d2ll_rn", + "llvm.nvvm.d2ll.rp" => "__nvvm_d2ll_rp", + "llvm.nvvm.d2ll.rz" => "__nvvm_d2ll_rz", + "llvm.nvvm.d2ui.rm" => "__nvvm_d2ui_rm", + "llvm.nvvm.d2ui.rn" => "__nvvm_d2ui_rn", + "llvm.nvvm.d2ui.rp" => "__nvvm_d2ui_rp", + "llvm.nvvm.d2ui.rz" => "__nvvm_d2ui_rz", + "llvm.nvvm.d2ull.rm" => "__nvvm_d2ull_rm", + "llvm.nvvm.d2ull.rn" => "__nvvm_d2ull_rn", + "llvm.nvvm.d2ull.rp" => "__nvvm_d2ull_rp", + "llvm.nvvm.d2ull.rz" => "__nvvm_d2ull_rz", + "llvm.nvvm.div.approx.f" => "__nvvm_div_approx_f", + "llvm.nvvm.div.approx.ftz.f" => "__nvvm_div_approx_ftz_f", + "llvm.nvvm.div.rm.d" => "__nvvm_div_rm_d", + "llvm.nvvm.div.rm.f" => "__nvvm_div_rm_f", + "llvm.nvvm.div.rm.ftz.f" => "__nvvm_div_rm_ftz_f", + "llvm.nvvm.div.rn.d" => "__nvvm_div_rn_d", + "llvm.nvvm.div.rn.f" => "__nvvm_div_rn_f", + "llvm.nvvm.div.rn.ftz.f" => "__nvvm_div_rn_ftz_f", + "llvm.nvvm.div.rp.d" => "__nvvm_div_rp_d", + "llvm.nvvm.div.rp.f" => "__nvvm_div_rp_f", + "llvm.nvvm.div.rp.ftz.f" => "__nvvm_div_rp_ftz_f", + "llvm.nvvm.div.rz.d" => "__nvvm_div_rz_d", + "llvm.nvvm.div.rz.f" => "__nvvm_div_rz_f", + "llvm.nvvm.div.rz.ftz.f" => "__nvvm_div_rz_ftz_f", + "llvm.nvvm.ex2.approx.d" => "__nvvm_ex2_approx_d", + "llvm.nvvm.ex2.approx.f" => "__nvvm_ex2_approx_f", + "llvm.nvvm.ex2.approx.f16" => "__nvvm_ex2_approx_f16", + "llvm.nvvm.ex2.approx.f16x2" => "__nvvm_ex2_approx_f16x2", + "llvm.nvvm.ex2.approx.ftz.f" => "__nvvm_ex2_approx_ftz_f", + "llvm.nvvm.f2bf16.rn" => "__nvvm_f2bf16_rn", + "llvm.nvvm.f2bf16.rn.relu" => "__nvvm_f2bf16_rn_relu", + "llvm.nvvm.f2bf16.rz" => "__nvvm_f2bf16_rz", + "llvm.nvvm.f2bf16.rz.relu" => "__nvvm_f2bf16_rz_relu", + "llvm.nvvm.f2h.rn" => "__nvvm_f2h_rn", + "llvm.nvvm.f2h.rn.ftz" => "__nvvm_f2h_rn_ftz", + "llvm.nvvm.f2i.rm" => "__nvvm_f2i_rm", + "llvm.nvvm.f2i.rm.ftz" => "__nvvm_f2i_rm_ftz", + "llvm.nvvm.f2i.rn" => "__nvvm_f2i_rn", + "llvm.nvvm.f2i.rn.ftz" => "__nvvm_f2i_rn_ftz", + "llvm.nvvm.f2i.rp" => "__nvvm_f2i_rp", + "llvm.nvvm.f2i.rp.ftz" => "__nvvm_f2i_rp_ftz", + "llvm.nvvm.f2i.rz" => "__nvvm_f2i_rz", + "llvm.nvvm.f2i.rz.ftz" => "__nvvm_f2i_rz_ftz", + "llvm.nvvm.f2ll.rm" => "__nvvm_f2ll_rm", + "llvm.nvvm.f2ll.rm.ftz" => "__nvvm_f2ll_rm_ftz", + "llvm.nvvm.f2ll.rn" => "__nvvm_f2ll_rn", + "llvm.nvvm.f2ll.rn.ftz" => "__nvvm_f2ll_rn_ftz", + "llvm.nvvm.f2ll.rp" => "__nvvm_f2ll_rp", + "llvm.nvvm.f2ll.rp.ftz" => "__nvvm_f2ll_rp_ftz", + "llvm.nvvm.f2ll.rz" => "__nvvm_f2ll_rz", + "llvm.nvvm.f2ll.rz.ftz" => "__nvvm_f2ll_rz_ftz", + "llvm.nvvm.f2tf32.rna" => "__nvvm_f2tf32_rna", + "llvm.nvvm.f2ui.rm" => "__nvvm_f2ui_rm", + "llvm.nvvm.f2ui.rm.ftz" => "__nvvm_f2ui_rm_ftz", + "llvm.nvvm.f2ui.rn" => "__nvvm_f2ui_rn", + "llvm.nvvm.f2ui.rn.ftz" => "__nvvm_f2ui_rn_ftz", + "llvm.nvvm.f2ui.rp" => "__nvvm_f2ui_rp", + "llvm.nvvm.f2ui.rp.ftz" => "__nvvm_f2ui_rp_ftz", + "llvm.nvvm.f2ui.rz" => "__nvvm_f2ui_rz", + "llvm.nvvm.f2ui.rz.ftz" => "__nvvm_f2ui_rz_ftz", + "llvm.nvvm.f2ull.rm" => "__nvvm_f2ull_rm", + "llvm.nvvm.f2ull.rm.ftz" => "__nvvm_f2ull_rm_ftz", + "llvm.nvvm.f2ull.rn" => "__nvvm_f2ull_rn", + "llvm.nvvm.f2ull.rn.ftz" => "__nvvm_f2ull_rn_ftz", + "llvm.nvvm.f2ull.rp" => "__nvvm_f2ull_rp", + "llvm.nvvm.f2ull.rp.ftz" => "__nvvm_f2ull_rp_ftz", + "llvm.nvvm.f2ull.rz" => "__nvvm_f2ull_rz", + "llvm.nvvm.f2ull.rz.ftz" => "__nvvm_f2ull_rz_ftz", + "llvm.nvvm.fabs.d" => "__nvvm_fabs_d", + "llvm.nvvm.fabs.f" => "__nvvm_fabs_f", + "llvm.nvvm.fabs.ftz.f" => "__nvvm_fabs_ftz_f", + "llvm.nvvm.ff2bf16x2.rn" => "__nvvm_ff2bf16x2_rn", + "llvm.nvvm.ff2bf16x2.rn.relu" => "__nvvm_ff2bf16x2_rn_relu", + "llvm.nvvm.ff2bf16x2.rz" => "__nvvm_ff2bf16x2_rz", + "llvm.nvvm.ff2bf16x2.rz.relu" => "__nvvm_ff2bf16x2_rz_relu", + "llvm.nvvm.ff2f16x2.rn" => "__nvvm_ff2f16x2_rn", + "llvm.nvvm.ff2f16x2.rn.relu" => "__nvvm_ff2f16x2_rn_relu", + "llvm.nvvm.ff2f16x2.rz" => "__nvvm_ff2f16x2_rz", + "llvm.nvvm.ff2f16x2.rz.relu" => "__nvvm_ff2f16x2_rz_relu", + "llvm.nvvm.floor.d" => "__nvvm_floor_d", + "llvm.nvvm.floor.f" => "__nvvm_floor_f", + "llvm.nvvm.floor.ftz.f" => "__nvvm_floor_ftz_f", + "llvm.nvvm.fma.rm.d" => "__nvvm_fma_rm_d", + "llvm.nvvm.fma.rm.f" => "__nvvm_fma_rm_f", + "llvm.nvvm.fma.rm.ftz.f" => "__nvvm_fma_rm_ftz_f", + "llvm.nvvm.fma.rn.bf16" => "__nvvm_fma_rn_bf16", + "llvm.nvvm.fma.rn.bf16x2" => "__nvvm_fma_rn_bf16x2", + "llvm.nvvm.fma.rn.d" => "__nvvm_fma_rn_d", + "llvm.nvvm.fma.rn.f" => "__nvvm_fma_rn_f", + "llvm.nvvm.fma.rn.f16" => "__nvvm_fma_rn_f16", + "llvm.nvvm.fma.rn.f16x2" => "__nvvm_fma_rn_f16x2", + "llvm.nvvm.fma.rn.ftz.f" => "__nvvm_fma_rn_ftz_f", + "llvm.nvvm.fma.rn.ftz.f16" => "__nvvm_fma_rn_ftz_f16", + "llvm.nvvm.fma.rn.ftz.f16x2" => "__nvvm_fma_rn_ftz_f16x2", + "llvm.nvvm.fma.rn.ftz.relu.f16" => "__nvvm_fma_rn_ftz_relu_f16", + "llvm.nvvm.fma.rn.ftz.relu.f16x2" => "__nvvm_fma_rn_ftz_relu_f16x2", + "llvm.nvvm.fma.rn.ftz.sat.f16" => "__nvvm_fma_rn_ftz_sat_f16", + "llvm.nvvm.fma.rn.ftz.sat.f16x2" => "__nvvm_fma_rn_ftz_sat_f16x2", + "llvm.nvvm.fma.rn.relu.bf16" => "__nvvm_fma_rn_relu_bf16", + "llvm.nvvm.fma.rn.relu.bf16x2" => "__nvvm_fma_rn_relu_bf16x2", + "llvm.nvvm.fma.rn.relu.f16" => "__nvvm_fma_rn_relu_f16", + "llvm.nvvm.fma.rn.relu.f16x2" => "__nvvm_fma_rn_relu_f16x2", + "llvm.nvvm.fma.rn.sat.f16" => "__nvvm_fma_rn_sat_f16", + "llvm.nvvm.fma.rn.sat.f16x2" => "__nvvm_fma_rn_sat_f16x2", + "llvm.nvvm.fma.rp.d" => "__nvvm_fma_rp_d", + "llvm.nvvm.fma.rp.f" => "__nvvm_fma_rp_f", + "llvm.nvvm.fma.rp.ftz.f" => "__nvvm_fma_rp_ftz_f", + "llvm.nvvm.fma.rz.d" => "__nvvm_fma_rz_d", + "llvm.nvvm.fma.rz.f" => "__nvvm_fma_rz_f", + "llvm.nvvm.fma.rz.ftz.f" => "__nvvm_fma_rz_ftz_f", + "llvm.nvvm.fmax.bf16" => "__nvvm_fmax_bf16", + "llvm.nvvm.fmax.bf16x2" => "__nvvm_fmax_bf16x2", + "llvm.nvvm.fmax.d" => "__nvvm_fmax_d", + "llvm.nvvm.fmax.f" => "__nvvm_fmax_f", + "llvm.nvvm.fmax.f16" => "__nvvm_fmax_f16", + "llvm.nvvm.fmax.f16x2" => "__nvvm_fmax_f16x2", + "llvm.nvvm.fmax.ftz.f" => "__nvvm_fmax_ftz_f", + "llvm.nvvm.fmax.ftz.f16" => "__nvvm_fmax_ftz_f16", + "llvm.nvvm.fmax.ftz.f16x2" => "__nvvm_fmax_ftz_f16x2", + "llvm.nvvm.fmax.ftz.nan.f" => "__nvvm_fmax_ftz_nan_f", + "llvm.nvvm.fmax.ftz.nan.f16" => "__nvvm_fmax_ftz_nan_f16", + "llvm.nvvm.fmax.ftz.nan.f16x2" => "__nvvm_fmax_ftz_nan_f16x2", + "llvm.nvvm.fmax.ftz.nan.xorsign.abs.f" => "__nvvm_fmax_ftz_nan_xorsign_abs_f", + "llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16" => "__nvvm_fmax_ftz_nan_xorsign_abs_f16", + "llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16x2" => "__nvvm_fmax_ftz_nan_xorsign_abs_f16x2", + "llvm.nvvm.fmax.ftz.xorsign.abs.f" => "__nvvm_fmax_ftz_xorsign_abs_f", + "llvm.nvvm.fmax.ftz.xorsign.abs.f16" => "__nvvm_fmax_ftz_xorsign_abs_f16", + "llvm.nvvm.fmax.ftz.xorsign.abs.f16x2" => "__nvvm_fmax_ftz_xorsign_abs_f16x2", + "llvm.nvvm.fmax.nan.bf16" => "__nvvm_fmax_nan_bf16", + "llvm.nvvm.fmax.nan.bf16x2" => "__nvvm_fmax_nan_bf16x2", + "llvm.nvvm.fmax.nan.f" => "__nvvm_fmax_nan_f", + "llvm.nvvm.fmax.nan.f16" => "__nvvm_fmax_nan_f16", + "llvm.nvvm.fmax.nan.f16x2" => "__nvvm_fmax_nan_f16x2", + "llvm.nvvm.fmax.nan.xorsign.abs.bf16" => "__nvvm_fmax_nan_xorsign_abs_bf16", + "llvm.nvvm.fmax.nan.xorsign.abs.bf16x2" => "__nvvm_fmax_nan_xorsign_abs_bf16x2", + "llvm.nvvm.fmax.nan.xorsign.abs.f" => "__nvvm_fmax_nan_xorsign_abs_f", + "llvm.nvvm.fmax.nan.xorsign.abs.f16" => "__nvvm_fmax_nan_xorsign_abs_f16", + "llvm.nvvm.fmax.nan.xorsign.abs.f16x2" => "__nvvm_fmax_nan_xorsign_abs_f16x2", + "llvm.nvvm.fmax.xorsign.abs.bf16" => "__nvvm_fmax_xorsign_abs_bf16", + "llvm.nvvm.fmax.xorsign.abs.bf16x2" => "__nvvm_fmax_xorsign_abs_bf16x2", + "llvm.nvvm.fmax.xorsign.abs.f" => "__nvvm_fmax_xorsign_abs_f", + "llvm.nvvm.fmax.xorsign.abs.f16" => "__nvvm_fmax_xorsign_abs_f16", + "llvm.nvvm.fmax.xorsign.abs.f16x2" => "__nvvm_fmax_xorsign_abs_f16x2", + "llvm.nvvm.fmin.bf16" => "__nvvm_fmin_bf16", + "llvm.nvvm.fmin.bf16x2" => "__nvvm_fmin_bf16x2", + "llvm.nvvm.fmin.d" => "__nvvm_fmin_d", + "llvm.nvvm.fmin.f" => "__nvvm_fmin_f", + "llvm.nvvm.fmin.f16" => "__nvvm_fmin_f16", + "llvm.nvvm.fmin.f16x2" => "__nvvm_fmin_f16x2", + "llvm.nvvm.fmin.ftz.f" => "__nvvm_fmin_ftz_f", + "llvm.nvvm.fmin.ftz.f16" => "__nvvm_fmin_ftz_f16", + "llvm.nvvm.fmin.ftz.f16x2" => "__nvvm_fmin_ftz_f16x2", + "llvm.nvvm.fmin.ftz.nan.f" => "__nvvm_fmin_ftz_nan_f", + "llvm.nvvm.fmin.ftz.nan.f16" => "__nvvm_fmin_ftz_nan_f16", + "llvm.nvvm.fmin.ftz.nan.f16x2" => "__nvvm_fmin_ftz_nan_f16x2", + "llvm.nvvm.fmin.ftz.nan.xorsign.abs.f" => "__nvvm_fmin_ftz_nan_xorsign_abs_f", + "llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16" => "__nvvm_fmin_ftz_nan_xorsign_abs_f16", + "llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16x2" => "__nvvm_fmin_ftz_nan_xorsign_abs_f16x2", + "llvm.nvvm.fmin.ftz.xorsign.abs.f" => "__nvvm_fmin_ftz_xorsign_abs_f", + "llvm.nvvm.fmin.ftz.xorsign.abs.f16" => "__nvvm_fmin_ftz_xorsign_abs_f16", + "llvm.nvvm.fmin.ftz.xorsign.abs.f16x2" => "__nvvm_fmin_ftz_xorsign_abs_f16x2", + "llvm.nvvm.fmin.nan.bf16" => "__nvvm_fmin_nan_bf16", + "llvm.nvvm.fmin.nan.bf16x2" => "__nvvm_fmin_nan_bf16x2", + "llvm.nvvm.fmin.nan.f" => "__nvvm_fmin_nan_f", + "llvm.nvvm.fmin.nan.f16" => "__nvvm_fmin_nan_f16", + "llvm.nvvm.fmin.nan.f16x2" => "__nvvm_fmin_nan_f16x2", + "llvm.nvvm.fmin.nan.xorsign.abs.bf16" => "__nvvm_fmin_nan_xorsign_abs_bf16", + "llvm.nvvm.fmin.nan.xorsign.abs.bf16x2" => "__nvvm_fmin_nan_xorsign_abs_bf16x2", + "llvm.nvvm.fmin.nan.xorsign.abs.f" => "__nvvm_fmin_nan_xorsign_abs_f", + "llvm.nvvm.fmin.nan.xorsign.abs.f16" => "__nvvm_fmin_nan_xorsign_abs_f16", + "llvm.nvvm.fmin.nan.xorsign.abs.f16x2" => "__nvvm_fmin_nan_xorsign_abs_f16x2", + "llvm.nvvm.fmin.xorsign.abs.bf16" => "__nvvm_fmin_xorsign_abs_bf16", + "llvm.nvvm.fmin.xorsign.abs.bf16x2" => "__nvvm_fmin_xorsign_abs_bf16x2", + "llvm.nvvm.fmin.xorsign.abs.f" => "__nvvm_fmin_xorsign_abs_f", + "llvm.nvvm.fmin.xorsign.abs.f16" => "__nvvm_fmin_xorsign_abs_f16", + "llvm.nvvm.fmin.xorsign.abs.f16x2" => "__nvvm_fmin_xorsign_abs_f16x2", + "llvm.nvvm.fns" => "__nvvm_fns", + "llvm.nvvm.h2f" => "__nvvm_h2f", + "llvm.nvvm.i2d.rm" => "__nvvm_i2d_rm", + "llvm.nvvm.i2d.rn" => "__nvvm_i2d_rn", + "llvm.nvvm.i2d.rp" => "__nvvm_i2d_rp", + "llvm.nvvm.i2d.rz" => "__nvvm_i2d_rz", + "llvm.nvvm.i2f.rm" => "__nvvm_i2f_rm", + "llvm.nvvm.i2f.rn" => "__nvvm_i2f_rn", + "llvm.nvvm.i2f.rp" => "__nvvm_i2f_rp", + "llvm.nvvm.i2f.rz" => "__nvvm_i2f_rz", + "llvm.nvvm.isspacep.const" => "__nvvm_isspacep_const", + "llvm.nvvm.isspacep.global" => "__nvvm_isspacep_global", + "llvm.nvvm.isspacep.local" => "__nvvm_isspacep_local", + "llvm.nvvm.isspacep.shared" => "__nvvm_isspacep_shared", + "llvm.nvvm.istypep.sampler" => "__nvvm_istypep_sampler", + "llvm.nvvm.istypep.surface" => "__nvvm_istypep_surface", + "llvm.nvvm.istypep.texture" => "__nvvm_istypep_texture", + "llvm.nvvm.lg2.approx.d" => "__nvvm_lg2_approx_d", + "llvm.nvvm.lg2.approx.f" => "__nvvm_lg2_approx_f", + "llvm.nvvm.lg2.approx.ftz.f" => "__nvvm_lg2_approx_ftz_f", + "llvm.nvvm.ll2d.rm" => "__nvvm_ll2d_rm", + "llvm.nvvm.ll2d.rn" => "__nvvm_ll2d_rn", + "llvm.nvvm.ll2d.rp" => "__nvvm_ll2d_rp", + "llvm.nvvm.ll2d.rz" => "__nvvm_ll2d_rz", + "llvm.nvvm.ll2f.rm" => "__nvvm_ll2f_rm", + "llvm.nvvm.ll2f.rn" => "__nvvm_ll2f_rn", + "llvm.nvvm.ll2f.rp" => "__nvvm_ll2f_rp", + "llvm.nvvm.ll2f.rz" => "__nvvm_ll2f_rz", + "llvm.nvvm.lohi.i2d" => "__nvvm_lohi_i2d", + "llvm.nvvm.match.any.sync.i32" => "__nvvm_match_any_sync_i32", + "llvm.nvvm.match.any.sync.i64" => "__nvvm_match_any_sync_i64", + "llvm.nvvm.max.i" => "__nvvm_max_i", + "llvm.nvvm.max.ll" => "__nvvm_max_ll", + "llvm.nvvm.max.ui" => "__nvvm_max_ui", + "llvm.nvvm.max.ull" => "__nvvm_max_ull", + "llvm.nvvm.mbarrier.arrive" => "__nvvm_mbarrier_arrive", + "llvm.nvvm.mbarrier.arrive.drop" => "__nvvm_mbarrier_arrive_drop", + "llvm.nvvm.mbarrier.arrive.drop.noComplete" => "__nvvm_mbarrier_arrive_drop_noComplete", + "llvm.nvvm.mbarrier.arrive.drop.noComplete.shared" => "__nvvm_mbarrier_arrive_drop_noComplete_shared", + "llvm.nvvm.mbarrier.arrive.drop.shared" => "__nvvm_mbarrier_arrive_drop_shared", + "llvm.nvvm.mbarrier.arrive.noComplete" => "__nvvm_mbarrier_arrive_noComplete", + "llvm.nvvm.mbarrier.arrive.noComplete.shared" => "__nvvm_mbarrier_arrive_noComplete_shared", + "llvm.nvvm.mbarrier.arrive.shared" => "__nvvm_mbarrier_arrive_shared", + "llvm.nvvm.mbarrier.init" => "__nvvm_mbarrier_init", + "llvm.nvvm.mbarrier.init.shared" => "__nvvm_mbarrier_init_shared", + "llvm.nvvm.mbarrier.inval" => "__nvvm_mbarrier_inval", + "llvm.nvvm.mbarrier.inval.shared" => "__nvvm_mbarrier_inval_shared", + "llvm.nvvm.mbarrier.pending.count" => "__nvvm_mbarrier_pending_count", + "llvm.nvvm.mbarrier.test.wait" => "__nvvm_mbarrier_test_wait", + "llvm.nvvm.mbarrier.test.wait.shared" => "__nvvm_mbarrier_test_wait_shared", + "llvm.nvvm.membar.cta" => "__nvvm_membar_cta", + "llvm.nvvm.membar.gl" => "__nvvm_membar_gl", + "llvm.nvvm.membar.sys" => "__nvvm_membar_sys", + "llvm.nvvm.min.i" => "__nvvm_min_i", + "llvm.nvvm.min.ll" => "__nvvm_min_ll", + "llvm.nvvm.min.ui" => "__nvvm_min_ui", + "llvm.nvvm.min.ull" => "__nvvm_min_ull", + "llvm.nvvm.mul.rm.d" => "__nvvm_mul_rm_d", + "llvm.nvvm.mul.rm.f" => "__nvvm_mul_rm_f", + "llvm.nvvm.mul.rm.ftz.f" => "__nvvm_mul_rm_ftz_f", + "llvm.nvvm.mul.rn.d" => "__nvvm_mul_rn_d", + "llvm.nvvm.mul.rn.f" => "__nvvm_mul_rn_f", + "llvm.nvvm.mul.rn.ftz.f" => "__nvvm_mul_rn_ftz_f", + "llvm.nvvm.mul.rp.d" => "__nvvm_mul_rp_d", + "llvm.nvvm.mul.rp.f" => "__nvvm_mul_rp_f", + "llvm.nvvm.mul.rp.ftz.f" => "__nvvm_mul_rp_ftz_f", + "llvm.nvvm.mul.rz.d" => "__nvvm_mul_rz_d", + "llvm.nvvm.mul.rz.f" => "__nvvm_mul_rz_f", + "llvm.nvvm.mul.rz.ftz.f" => "__nvvm_mul_rz_ftz_f", + "llvm.nvvm.mul24.i" => "__nvvm_mul24_i", + "llvm.nvvm.mul24.ui" => "__nvvm_mul24_ui", + "llvm.nvvm.mulhi.i" => "__nvvm_mulhi_i", + "llvm.nvvm.mulhi.ll" => "__nvvm_mulhi_ll", + "llvm.nvvm.mulhi.ui" => "__nvvm_mulhi_ui", + "llvm.nvvm.mulhi.ull" => "__nvvm_mulhi_ull", + "llvm.nvvm.neg.bf16" => "__nvvm_neg_bf16", + "llvm.nvvm.neg.bf16x2" => "__nvvm_neg_bf16x2", + "llvm.nvvm.popc.i" => "__nvvm_popc_i", + "llvm.nvvm.popc.ll" => "__nvvm_popc_ll", + "llvm.nvvm.prmt" => "__nvvm_prmt", + "llvm.nvvm.rcp.approx.ftz.d" => "__nvvm_rcp_approx_ftz_d", + "llvm.nvvm.rcp.approx.ftz.f" => "__nvvm_rcp_approx_ftz_f", + "llvm.nvvm.rcp.rm.d" => "__nvvm_rcp_rm_d", + "llvm.nvvm.rcp.rm.f" => "__nvvm_rcp_rm_f", + "llvm.nvvm.rcp.rm.ftz.f" => "__nvvm_rcp_rm_ftz_f", + "llvm.nvvm.rcp.rn.d" => "__nvvm_rcp_rn_d", + "llvm.nvvm.rcp.rn.f" => "__nvvm_rcp_rn_f", + "llvm.nvvm.rcp.rn.ftz.f" => "__nvvm_rcp_rn_ftz_f", + "llvm.nvvm.rcp.rp.d" => "__nvvm_rcp_rp_d", + "llvm.nvvm.rcp.rp.f" => "__nvvm_rcp_rp_f", + "llvm.nvvm.rcp.rp.ftz.f" => "__nvvm_rcp_rp_ftz_f", + "llvm.nvvm.rcp.rz.d" => "__nvvm_rcp_rz_d", + "llvm.nvvm.rcp.rz.f" => "__nvvm_rcp_rz_f", + "llvm.nvvm.rcp.rz.ftz.f" => "__nvvm_rcp_rz_ftz_f", + "llvm.nvvm.read.ptx.sreg.clock" => "__nvvm_read_ptx_sreg_clock", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.clock" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.clock64" => "__nvvm_read_ptx_sreg_clock64", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.clock64" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.ctaid.w" => "__nvvm_read_ptx_sreg_ctaid_w", + "llvm.nvvm.read.ptx.sreg.ctaid.x" => "__nvvm_read_ptx_sreg_ctaid_x", + "llvm.nvvm.read.ptx.sreg.ctaid.y" => "__nvvm_read_ptx_sreg_ctaid_y", + "llvm.nvvm.read.ptx.sreg.ctaid.z" => "__nvvm_read_ptx_sreg_ctaid_z", + "llvm.nvvm.read.ptx.sreg.envreg0" => "__nvvm_read_ptx_sreg_envreg0", + "llvm.nvvm.read.ptx.sreg.envreg1" => "__nvvm_read_ptx_sreg_envreg1", + "llvm.nvvm.read.ptx.sreg.envreg10" => "__nvvm_read_ptx_sreg_envreg10", + "llvm.nvvm.read.ptx.sreg.envreg11" => "__nvvm_read_ptx_sreg_envreg11", + "llvm.nvvm.read.ptx.sreg.envreg12" => "__nvvm_read_ptx_sreg_envreg12", + "llvm.nvvm.read.ptx.sreg.envreg13" => "__nvvm_read_ptx_sreg_envreg13", + "llvm.nvvm.read.ptx.sreg.envreg14" => "__nvvm_read_ptx_sreg_envreg14", + "llvm.nvvm.read.ptx.sreg.envreg15" => "__nvvm_read_ptx_sreg_envreg15", + "llvm.nvvm.read.ptx.sreg.envreg16" => "__nvvm_read_ptx_sreg_envreg16", + "llvm.nvvm.read.ptx.sreg.envreg17" => "__nvvm_read_ptx_sreg_envreg17", + "llvm.nvvm.read.ptx.sreg.envreg18" => "__nvvm_read_ptx_sreg_envreg18", + "llvm.nvvm.read.ptx.sreg.envreg19" => "__nvvm_read_ptx_sreg_envreg19", + "llvm.nvvm.read.ptx.sreg.envreg2" => "__nvvm_read_ptx_sreg_envreg2", + "llvm.nvvm.read.ptx.sreg.envreg20" => "__nvvm_read_ptx_sreg_envreg20", + "llvm.nvvm.read.ptx.sreg.envreg21" => "__nvvm_read_ptx_sreg_envreg21", + "llvm.nvvm.read.ptx.sreg.envreg22" => "__nvvm_read_ptx_sreg_envreg22", + "llvm.nvvm.read.ptx.sreg.envreg23" => "__nvvm_read_ptx_sreg_envreg23", + "llvm.nvvm.read.ptx.sreg.envreg24" => "__nvvm_read_ptx_sreg_envreg24", + "llvm.nvvm.read.ptx.sreg.envreg25" => "__nvvm_read_ptx_sreg_envreg25", + "llvm.nvvm.read.ptx.sreg.envreg26" => "__nvvm_read_ptx_sreg_envreg26", + "llvm.nvvm.read.ptx.sreg.envreg27" => "__nvvm_read_ptx_sreg_envreg27", + "llvm.nvvm.read.ptx.sreg.envreg28" => "__nvvm_read_ptx_sreg_envreg28", + "llvm.nvvm.read.ptx.sreg.envreg29" => "__nvvm_read_ptx_sreg_envreg29", + "llvm.nvvm.read.ptx.sreg.envreg3" => "__nvvm_read_ptx_sreg_envreg3", + "llvm.nvvm.read.ptx.sreg.envreg30" => "__nvvm_read_ptx_sreg_envreg30", + "llvm.nvvm.read.ptx.sreg.envreg31" => "__nvvm_read_ptx_sreg_envreg31", + "llvm.nvvm.read.ptx.sreg.envreg4" => "__nvvm_read_ptx_sreg_envreg4", + "llvm.nvvm.read.ptx.sreg.envreg5" => "__nvvm_read_ptx_sreg_envreg5", + "llvm.nvvm.read.ptx.sreg.envreg6" => "__nvvm_read_ptx_sreg_envreg6", + "llvm.nvvm.read.ptx.sreg.envreg7" => "__nvvm_read_ptx_sreg_envreg7", + "llvm.nvvm.read.ptx.sreg.envreg8" => "__nvvm_read_ptx_sreg_envreg8", + "llvm.nvvm.read.ptx.sreg.envreg9" => "__nvvm_read_ptx_sreg_envreg9", + "llvm.nvvm.read.ptx.sreg.gridid" => "__nvvm_read_ptx_sreg_gridid", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.gridid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.laneid" => "__nvvm_read_ptx_sreg_laneid", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.laneid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.lanemask.eq" => "__nvvm_read_ptx_sreg_lanemask_eq", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.eq" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.lanemask.ge" => "__nvvm_read_ptx_sreg_lanemask_ge", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.ge" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.lanemask.gt" => "__nvvm_read_ptx_sreg_lanemask_gt", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.gt" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.lanemask.le" => "__nvvm_read_ptx_sreg_lanemask_le", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.le" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.lanemask.lt" => "__nvvm_read_ptx_sreg_lanemask_lt", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.lt" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.nctaid.w" => "__nvvm_read_ptx_sreg_nctaid_w", + "llvm.nvvm.read.ptx.sreg.nctaid.x" => "__nvvm_read_ptx_sreg_nctaid_x", + "llvm.nvvm.read.ptx.sreg.nctaid.y" => "__nvvm_read_ptx_sreg_nctaid_y", + "llvm.nvvm.read.ptx.sreg.nctaid.z" => "__nvvm_read_ptx_sreg_nctaid_z", + "llvm.nvvm.read.ptx.sreg.nsmid" => "__nvvm_read_ptx_sreg_nsmid", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.nsmid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.ntid.w" => "__nvvm_read_ptx_sreg_ntid_w", + "llvm.nvvm.read.ptx.sreg.ntid.x" => "__nvvm_read_ptx_sreg_ntid_x", + "llvm.nvvm.read.ptx.sreg.ntid.y" => "__nvvm_read_ptx_sreg_ntid_y", + "llvm.nvvm.read.ptx.sreg.ntid.z" => "__nvvm_read_ptx_sreg_ntid_z", + "llvm.nvvm.read.ptx.sreg.nwarpid" => "__nvvm_read_ptx_sreg_nwarpid", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.nwarpid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.pm0" => "__nvvm_read_ptx_sreg_pm0", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.pm0" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.pm1" => "__nvvm_read_ptx_sreg_pm1", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.pm1" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.pm2" => "__nvvm_read_ptx_sreg_pm2", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.pm2" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.pm3" => "__nvvm_read_ptx_sreg_pm3", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.pm3" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.smid" => "__nvvm_read_ptx_sreg_smid", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.smid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.tid.w" => "__nvvm_read_ptx_sreg_tid_w", + "llvm.nvvm.read.ptx.sreg.tid.x" => "__nvvm_read_ptx_sreg_tid_x", + "llvm.nvvm.read.ptx.sreg.tid.y" => "__nvvm_read_ptx_sreg_tid_y", + "llvm.nvvm.read.ptx.sreg.tid.z" => "__nvvm_read_ptx_sreg_tid_z", + "llvm.nvvm.read.ptx.sreg.warpid" => "__nvvm_read_ptx_sreg_warpid", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.warpid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.warpsize" => "__nvvm_read_ptx_sreg_warpsize", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.warpsize" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.redux.sync.add" => "__nvvm_redux_sync_add", + "llvm.nvvm.redux.sync.and" => "__nvvm_redux_sync_and", + "llvm.nvvm.redux.sync.max" => "__nvvm_redux_sync_max", + "llvm.nvvm.redux.sync.min" => "__nvvm_redux_sync_min", + "llvm.nvvm.redux.sync.or" => "__nvvm_redux_sync_or", + "llvm.nvvm.redux.sync.umax" => "__nvvm_redux_sync_umax", + "llvm.nvvm.redux.sync.umin" => "__nvvm_redux_sync_umin", + "llvm.nvvm.redux.sync.xor" => "__nvvm_redux_sync_xor", + "llvm.nvvm.rotate.b32" => "__nvvm_rotate_b32", + "llvm.nvvm.rotate.b64" => "__nvvm_rotate_b64", + "llvm.nvvm.rotate.right.b64" => "__nvvm_rotate_right_b64", + "llvm.nvvm.round.d" => "__nvvm_round_d", + "llvm.nvvm.round.f" => "__nvvm_round_f", + "llvm.nvvm.round.ftz.f" => "__nvvm_round_ftz_f", + "llvm.nvvm.rsqrt.approx.d" => "__nvvm_rsqrt_approx_d", + "llvm.nvvm.rsqrt.approx.f" => "__nvvm_rsqrt_approx_f", + "llvm.nvvm.rsqrt.approx.ftz.f" => "__nvvm_rsqrt_approx_ftz_f", + "llvm.nvvm.sad.i" => "__nvvm_sad_i", + "llvm.nvvm.sad.ui" => "__nvvm_sad_ui", + "llvm.nvvm.saturate.d" => "__nvvm_saturate_d", + "llvm.nvvm.saturate.f" => "__nvvm_saturate_f", + "llvm.nvvm.saturate.ftz.f" => "__nvvm_saturate_ftz_f", + "llvm.nvvm.shfl.bfly.f32" => "__nvvm_shfl_bfly_f32", + "llvm.nvvm.shfl.bfly.i32" => "__nvvm_shfl_bfly_i32", + "llvm.nvvm.shfl.down.f32" => "__nvvm_shfl_down_f32", + "llvm.nvvm.shfl.down.i32" => "__nvvm_shfl_down_i32", + "llvm.nvvm.shfl.idx.f32" => "__nvvm_shfl_idx_f32", + "llvm.nvvm.shfl.idx.i32" => "__nvvm_shfl_idx_i32", + "llvm.nvvm.shfl.sync.bfly.f32" => "__nvvm_shfl_sync_bfly_f32", + "llvm.nvvm.shfl.sync.bfly.i32" => "__nvvm_shfl_sync_bfly_i32", + "llvm.nvvm.shfl.sync.down.f32" => "__nvvm_shfl_sync_down_f32", + "llvm.nvvm.shfl.sync.down.i32" => "__nvvm_shfl_sync_down_i32", + "llvm.nvvm.shfl.sync.idx.f32" => "__nvvm_shfl_sync_idx_f32", + "llvm.nvvm.shfl.sync.idx.i32" => "__nvvm_shfl_sync_idx_i32", + "llvm.nvvm.shfl.sync.up.f32" => "__nvvm_shfl_sync_up_f32", + "llvm.nvvm.shfl.sync.up.i32" => "__nvvm_shfl_sync_up_i32", + "llvm.nvvm.shfl.up.f32" => "__nvvm_shfl_up_f32", + "llvm.nvvm.shfl.up.i32" => "__nvvm_shfl_up_i32", + "llvm.nvvm.sin.approx.f" => "__nvvm_sin_approx_f", + "llvm.nvvm.sin.approx.ftz.f" => "__nvvm_sin_approx_ftz_f", + "llvm.nvvm.sqrt.approx.f" => "__nvvm_sqrt_approx_f", + "llvm.nvvm.sqrt.approx.ftz.f" => "__nvvm_sqrt_approx_ftz_f", + "llvm.nvvm.sqrt.f" => "__nvvm_sqrt_f", + "llvm.nvvm.sqrt.rm.d" => "__nvvm_sqrt_rm_d", + "llvm.nvvm.sqrt.rm.f" => "__nvvm_sqrt_rm_f", + "llvm.nvvm.sqrt.rm.ftz.f" => "__nvvm_sqrt_rm_ftz_f", + "llvm.nvvm.sqrt.rn.d" => "__nvvm_sqrt_rn_d", + "llvm.nvvm.sqrt.rn.f" => "__nvvm_sqrt_rn_f", + "llvm.nvvm.sqrt.rn.ftz.f" => "__nvvm_sqrt_rn_ftz_f", + "llvm.nvvm.sqrt.rp.d" => "__nvvm_sqrt_rp_d", + "llvm.nvvm.sqrt.rp.f" => "__nvvm_sqrt_rp_f", + "llvm.nvvm.sqrt.rp.ftz.f" => "__nvvm_sqrt_rp_ftz_f", + "llvm.nvvm.sqrt.rz.d" => "__nvvm_sqrt_rz_d", + "llvm.nvvm.sqrt.rz.f" => "__nvvm_sqrt_rz_f", + "llvm.nvvm.sqrt.rz.ftz.f" => "__nvvm_sqrt_rz_ftz_f", + "llvm.nvvm.suq.array.size" => "__nvvm_suq_array_size", + "llvm.nvvm.suq.channel.data.type" => "__nvvm_suq_channel_data_type", + "llvm.nvvm.suq.channel.order" => "__nvvm_suq_channel_order", + "llvm.nvvm.suq.depth" => "__nvvm_suq_depth", + "llvm.nvvm.suq.height" => "__nvvm_suq_height", + "llvm.nvvm.suq.width" => "__nvvm_suq_width", + "llvm.nvvm.sust.b.1d.array.i16.clamp" => "__nvvm_sust_b_1d_array_i16_clamp", + "llvm.nvvm.sust.b.1d.array.i16.trap" => "__nvvm_sust_b_1d_array_i16_trap", + "llvm.nvvm.sust.b.1d.array.i16.zero" => "__nvvm_sust_b_1d_array_i16_zero", + "llvm.nvvm.sust.b.1d.array.i32.clamp" => "__nvvm_sust_b_1d_array_i32_clamp", + "llvm.nvvm.sust.b.1d.array.i32.trap" => "__nvvm_sust_b_1d_array_i32_trap", + "llvm.nvvm.sust.b.1d.array.i32.zero" => "__nvvm_sust_b_1d_array_i32_zero", + "llvm.nvvm.sust.b.1d.array.i64.clamp" => "__nvvm_sust_b_1d_array_i64_clamp", + "llvm.nvvm.sust.b.1d.array.i64.trap" => "__nvvm_sust_b_1d_array_i64_trap", + "llvm.nvvm.sust.b.1d.array.i64.zero" => "__nvvm_sust_b_1d_array_i64_zero", + "llvm.nvvm.sust.b.1d.array.i8.clamp" => "__nvvm_sust_b_1d_array_i8_clamp", + "llvm.nvvm.sust.b.1d.array.i8.trap" => "__nvvm_sust_b_1d_array_i8_trap", + "llvm.nvvm.sust.b.1d.array.i8.zero" => "__nvvm_sust_b_1d_array_i8_zero", + "llvm.nvvm.sust.b.1d.array.v2i16.clamp" => "__nvvm_sust_b_1d_array_v2i16_clamp", + "llvm.nvvm.sust.b.1d.array.v2i16.trap" => "__nvvm_sust_b_1d_array_v2i16_trap", + "llvm.nvvm.sust.b.1d.array.v2i16.zero" => "__nvvm_sust_b_1d_array_v2i16_zero", + "llvm.nvvm.sust.b.1d.array.v2i32.clamp" => "__nvvm_sust_b_1d_array_v2i32_clamp", + "llvm.nvvm.sust.b.1d.array.v2i32.trap" => "__nvvm_sust_b_1d_array_v2i32_trap", + "llvm.nvvm.sust.b.1d.array.v2i32.zero" => "__nvvm_sust_b_1d_array_v2i32_zero", + "llvm.nvvm.sust.b.1d.array.v2i64.clamp" => "__nvvm_sust_b_1d_array_v2i64_clamp", + "llvm.nvvm.sust.b.1d.array.v2i64.trap" => "__nvvm_sust_b_1d_array_v2i64_trap", + "llvm.nvvm.sust.b.1d.array.v2i64.zero" => "__nvvm_sust_b_1d_array_v2i64_zero", + "llvm.nvvm.sust.b.1d.array.v2i8.clamp" => "__nvvm_sust_b_1d_array_v2i8_clamp", + "llvm.nvvm.sust.b.1d.array.v2i8.trap" => "__nvvm_sust_b_1d_array_v2i8_trap", + "llvm.nvvm.sust.b.1d.array.v2i8.zero" => "__nvvm_sust_b_1d_array_v2i8_zero", + "llvm.nvvm.sust.b.1d.array.v4i16.clamp" => "__nvvm_sust_b_1d_array_v4i16_clamp", + "llvm.nvvm.sust.b.1d.array.v4i16.trap" => "__nvvm_sust_b_1d_array_v4i16_trap", + "llvm.nvvm.sust.b.1d.array.v4i16.zero" => "__nvvm_sust_b_1d_array_v4i16_zero", + "llvm.nvvm.sust.b.1d.array.v4i32.clamp" => "__nvvm_sust_b_1d_array_v4i32_clamp", + "llvm.nvvm.sust.b.1d.array.v4i32.trap" => "__nvvm_sust_b_1d_array_v4i32_trap", + "llvm.nvvm.sust.b.1d.array.v4i32.zero" => "__nvvm_sust_b_1d_array_v4i32_zero", + "llvm.nvvm.sust.b.1d.array.v4i8.clamp" => "__nvvm_sust_b_1d_array_v4i8_clamp", + "llvm.nvvm.sust.b.1d.array.v4i8.trap" => "__nvvm_sust_b_1d_array_v4i8_trap", + "llvm.nvvm.sust.b.1d.array.v4i8.zero" => "__nvvm_sust_b_1d_array_v4i8_zero", + "llvm.nvvm.sust.b.1d.i16.clamp" => "__nvvm_sust_b_1d_i16_clamp", + "llvm.nvvm.sust.b.1d.i16.trap" => "__nvvm_sust_b_1d_i16_trap", + "llvm.nvvm.sust.b.1d.i16.zero" => "__nvvm_sust_b_1d_i16_zero", + "llvm.nvvm.sust.b.1d.i32.clamp" => "__nvvm_sust_b_1d_i32_clamp", + "llvm.nvvm.sust.b.1d.i32.trap" => "__nvvm_sust_b_1d_i32_trap", + "llvm.nvvm.sust.b.1d.i32.zero" => "__nvvm_sust_b_1d_i32_zero", + "llvm.nvvm.sust.b.1d.i64.clamp" => "__nvvm_sust_b_1d_i64_clamp", + "llvm.nvvm.sust.b.1d.i64.trap" => "__nvvm_sust_b_1d_i64_trap", + "llvm.nvvm.sust.b.1d.i64.zero" => "__nvvm_sust_b_1d_i64_zero", + "llvm.nvvm.sust.b.1d.i8.clamp" => "__nvvm_sust_b_1d_i8_clamp", + "llvm.nvvm.sust.b.1d.i8.trap" => "__nvvm_sust_b_1d_i8_trap", + "llvm.nvvm.sust.b.1d.i8.zero" => "__nvvm_sust_b_1d_i8_zero", + "llvm.nvvm.sust.b.1d.v2i16.clamp" => "__nvvm_sust_b_1d_v2i16_clamp", + "llvm.nvvm.sust.b.1d.v2i16.trap" => "__nvvm_sust_b_1d_v2i16_trap", + "llvm.nvvm.sust.b.1d.v2i16.zero" => "__nvvm_sust_b_1d_v2i16_zero", + "llvm.nvvm.sust.b.1d.v2i32.clamp" => "__nvvm_sust_b_1d_v2i32_clamp", + "llvm.nvvm.sust.b.1d.v2i32.trap" => "__nvvm_sust_b_1d_v2i32_trap", + "llvm.nvvm.sust.b.1d.v2i32.zero" => "__nvvm_sust_b_1d_v2i32_zero", + "llvm.nvvm.sust.b.1d.v2i64.clamp" => "__nvvm_sust_b_1d_v2i64_clamp", + "llvm.nvvm.sust.b.1d.v2i64.trap" => "__nvvm_sust_b_1d_v2i64_trap", + "llvm.nvvm.sust.b.1d.v2i64.zero" => "__nvvm_sust_b_1d_v2i64_zero", + "llvm.nvvm.sust.b.1d.v2i8.clamp" => "__nvvm_sust_b_1d_v2i8_clamp", + "llvm.nvvm.sust.b.1d.v2i8.trap" => "__nvvm_sust_b_1d_v2i8_trap", + "llvm.nvvm.sust.b.1d.v2i8.zero" => "__nvvm_sust_b_1d_v2i8_zero", + "llvm.nvvm.sust.b.1d.v4i16.clamp" => "__nvvm_sust_b_1d_v4i16_clamp", + "llvm.nvvm.sust.b.1d.v4i16.trap" => "__nvvm_sust_b_1d_v4i16_trap", + "llvm.nvvm.sust.b.1d.v4i16.zero" => "__nvvm_sust_b_1d_v4i16_zero", + "llvm.nvvm.sust.b.1d.v4i32.clamp" => "__nvvm_sust_b_1d_v4i32_clamp", + "llvm.nvvm.sust.b.1d.v4i32.trap" => "__nvvm_sust_b_1d_v4i32_trap", + "llvm.nvvm.sust.b.1d.v4i32.zero" => "__nvvm_sust_b_1d_v4i32_zero", + "llvm.nvvm.sust.b.1d.v4i8.clamp" => "__nvvm_sust_b_1d_v4i8_clamp", + "llvm.nvvm.sust.b.1d.v4i8.trap" => "__nvvm_sust_b_1d_v4i8_trap", + "llvm.nvvm.sust.b.1d.v4i8.zero" => "__nvvm_sust_b_1d_v4i8_zero", + "llvm.nvvm.sust.b.2d.array.i16.clamp" => "__nvvm_sust_b_2d_array_i16_clamp", + "llvm.nvvm.sust.b.2d.array.i16.trap" => "__nvvm_sust_b_2d_array_i16_trap", + "llvm.nvvm.sust.b.2d.array.i16.zero" => "__nvvm_sust_b_2d_array_i16_zero", + "llvm.nvvm.sust.b.2d.array.i32.clamp" => "__nvvm_sust_b_2d_array_i32_clamp", + "llvm.nvvm.sust.b.2d.array.i32.trap" => "__nvvm_sust_b_2d_array_i32_trap", + "llvm.nvvm.sust.b.2d.array.i32.zero" => "__nvvm_sust_b_2d_array_i32_zero", + "llvm.nvvm.sust.b.2d.array.i64.clamp" => "__nvvm_sust_b_2d_array_i64_clamp", + "llvm.nvvm.sust.b.2d.array.i64.trap" => "__nvvm_sust_b_2d_array_i64_trap", + "llvm.nvvm.sust.b.2d.array.i64.zero" => "__nvvm_sust_b_2d_array_i64_zero", + "llvm.nvvm.sust.b.2d.array.i8.clamp" => "__nvvm_sust_b_2d_array_i8_clamp", + "llvm.nvvm.sust.b.2d.array.i8.trap" => "__nvvm_sust_b_2d_array_i8_trap", + "llvm.nvvm.sust.b.2d.array.i8.zero" => "__nvvm_sust_b_2d_array_i8_zero", + "llvm.nvvm.sust.b.2d.array.v2i16.clamp" => "__nvvm_sust_b_2d_array_v2i16_clamp", + "llvm.nvvm.sust.b.2d.array.v2i16.trap" => "__nvvm_sust_b_2d_array_v2i16_trap", + "llvm.nvvm.sust.b.2d.array.v2i16.zero" => "__nvvm_sust_b_2d_array_v2i16_zero", + "llvm.nvvm.sust.b.2d.array.v2i32.clamp" => "__nvvm_sust_b_2d_array_v2i32_clamp", + "llvm.nvvm.sust.b.2d.array.v2i32.trap" => "__nvvm_sust_b_2d_array_v2i32_trap", + "llvm.nvvm.sust.b.2d.array.v2i32.zero" => "__nvvm_sust_b_2d_array_v2i32_zero", + "llvm.nvvm.sust.b.2d.array.v2i64.clamp" => "__nvvm_sust_b_2d_array_v2i64_clamp", + "llvm.nvvm.sust.b.2d.array.v2i64.trap" => "__nvvm_sust_b_2d_array_v2i64_trap", + "llvm.nvvm.sust.b.2d.array.v2i64.zero" => "__nvvm_sust_b_2d_array_v2i64_zero", + "llvm.nvvm.sust.b.2d.array.v2i8.clamp" => "__nvvm_sust_b_2d_array_v2i8_clamp", + "llvm.nvvm.sust.b.2d.array.v2i8.trap" => "__nvvm_sust_b_2d_array_v2i8_trap", + "llvm.nvvm.sust.b.2d.array.v2i8.zero" => "__nvvm_sust_b_2d_array_v2i8_zero", + "llvm.nvvm.sust.b.2d.array.v4i16.clamp" => "__nvvm_sust_b_2d_array_v4i16_clamp", + "llvm.nvvm.sust.b.2d.array.v4i16.trap" => "__nvvm_sust_b_2d_array_v4i16_trap", + "llvm.nvvm.sust.b.2d.array.v4i16.zero" => "__nvvm_sust_b_2d_array_v4i16_zero", + "llvm.nvvm.sust.b.2d.array.v4i32.clamp" => "__nvvm_sust_b_2d_array_v4i32_clamp", + "llvm.nvvm.sust.b.2d.array.v4i32.trap" => "__nvvm_sust_b_2d_array_v4i32_trap", + "llvm.nvvm.sust.b.2d.array.v4i32.zero" => "__nvvm_sust_b_2d_array_v4i32_zero", + "llvm.nvvm.sust.b.2d.array.v4i8.clamp" => "__nvvm_sust_b_2d_array_v4i8_clamp", + "llvm.nvvm.sust.b.2d.array.v4i8.trap" => "__nvvm_sust_b_2d_array_v4i8_trap", + "llvm.nvvm.sust.b.2d.array.v4i8.zero" => "__nvvm_sust_b_2d_array_v4i8_zero", + "llvm.nvvm.sust.b.2d.i16.clamp" => "__nvvm_sust_b_2d_i16_clamp", + "llvm.nvvm.sust.b.2d.i16.trap" => "__nvvm_sust_b_2d_i16_trap", + "llvm.nvvm.sust.b.2d.i16.zero" => "__nvvm_sust_b_2d_i16_zero", + "llvm.nvvm.sust.b.2d.i32.clamp" => "__nvvm_sust_b_2d_i32_clamp", + "llvm.nvvm.sust.b.2d.i32.trap" => "__nvvm_sust_b_2d_i32_trap", + "llvm.nvvm.sust.b.2d.i32.zero" => "__nvvm_sust_b_2d_i32_zero", + "llvm.nvvm.sust.b.2d.i64.clamp" => "__nvvm_sust_b_2d_i64_clamp", + "llvm.nvvm.sust.b.2d.i64.trap" => "__nvvm_sust_b_2d_i64_trap", + "llvm.nvvm.sust.b.2d.i64.zero" => "__nvvm_sust_b_2d_i64_zero", + "llvm.nvvm.sust.b.2d.i8.clamp" => "__nvvm_sust_b_2d_i8_clamp", + "llvm.nvvm.sust.b.2d.i8.trap" => "__nvvm_sust_b_2d_i8_trap", + "llvm.nvvm.sust.b.2d.i8.zero" => "__nvvm_sust_b_2d_i8_zero", + "llvm.nvvm.sust.b.2d.v2i16.clamp" => "__nvvm_sust_b_2d_v2i16_clamp", + "llvm.nvvm.sust.b.2d.v2i16.trap" => "__nvvm_sust_b_2d_v2i16_trap", + "llvm.nvvm.sust.b.2d.v2i16.zero" => "__nvvm_sust_b_2d_v2i16_zero", + "llvm.nvvm.sust.b.2d.v2i32.clamp" => "__nvvm_sust_b_2d_v2i32_clamp", + "llvm.nvvm.sust.b.2d.v2i32.trap" => "__nvvm_sust_b_2d_v2i32_trap", + "llvm.nvvm.sust.b.2d.v2i32.zero" => "__nvvm_sust_b_2d_v2i32_zero", + "llvm.nvvm.sust.b.2d.v2i64.clamp" => "__nvvm_sust_b_2d_v2i64_clamp", + "llvm.nvvm.sust.b.2d.v2i64.trap" => "__nvvm_sust_b_2d_v2i64_trap", + "llvm.nvvm.sust.b.2d.v2i64.zero" => "__nvvm_sust_b_2d_v2i64_zero", + "llvm.nvvm.sust.b.2d.v2i8.clamp" => "__nvvm_sust_b_2d_v2i8_clamp", + "llvm.nvvm.sust.b.2d.v2i8.trap" => "__nvvm_sust_b_2d_v2i8_trap", + "llvm.nvvm.sust.b.2d.v2i8.zero" => "__nvvm_sust_b_2d_v2i8_zero", + "llvm.nvvm.sust.b.2d.v4i16.clamp" => "__nvvm_sust_b_2d_v4i16_clamp", + "llvm.nvvm.sust.b.2d.v4i16.trap" => "__nvvm_sust_b_2d_v4i16_trap", + "llvm.nvvm.sust.b.2d.v4i16.zero" => "__nvvm_sust_b_2d_v4i16_zero", + "llvm.nvvm.sust.b.2d.v4i32.clamp" => "__nvvm_sust_b_2d_v4i32_clamp", + "llvm.nvvm.sust.b.2d.v4i32.trap" => "__nvvm_sust_b_2d_v4i32_trap", + "llvm.nvvm.sust.b.2d.v4i32.zero" => "__nvvm_sust_b_2d_v4i32_zero", + "llvm.nvvm.sust.b.2d.v4i8.clamp" => "__nvvm_sust_b_2d_v4i8_clamp", + "llvm.nvvm.sust.b.2d.v4i8.trap" => "__nvvm_sust_b_2d_v4i8_trap", + "llvm.nvvm.sust.b.2d.v4i8.zero" => "__nvvm_sust_b_2d_v4i8_zero", + "llvm.nvvm.sust.b.3d.i16.clamp" => "__nvvm_sust_b_3d_i16_clamp", + "llvm.nvvm.sust.b.3d.i16.trap" => "__nvvm_sust_b_3d_i16_trap", + "llvm.nvvm.sust.b.3d.i16.zero" => "__nvvm_sust_b_3d_i16_zero", + "llvm.nvvm.sust.b.3d.i32.clamp" => "__nvvm_sust_b_3d_i32_clamp", + "llvm.nvvm.sust.b.3d.i32.trap" => "__nvvm_sust_b_3d_i32_trap", + "llvm.nvvm.sust.b.3d.i32.zero" => "__nvvm_sust_b_3d_i32_zero", + "llvm.nvvm.sust.b.3d.i64.clamp" => "__nvvm_sust_b_3d_i64_clamp", + "llvm.nvvm.sust.b.3d.i64.trap" => "__nvvm_sust_b_3d_i64_trap", + "llvm.nvvm.sust.b.3d.i64.zero" => "__nvvm_sust_b_3d_i64_zero", + "llvm.nvvm.sust.b.3d.i8.clamp" => "__nvvm_sust_b_3d_i8_clamp", + "llvm.nvvm.sust.b.3d.i8.trap" => "__nvvm_sust_b_3d_i8_trap", + "llvm.nvvm.sust.b.3d.i8.zero" => "__nvvm_sust_b_3d_i8_zero", + "llvm.nvvm.sust.b.3d.v2i16.clamp" => "__nvvm_sust_b_3d_v2i16_clamp", + "llvm.nvvm.sust.b.3d.v2i16.trap" => "__nvvm_sust_b_3d_v2i16_trap", + "llvm.nvvm.sust.b.3d.v2i16.zero" => "__nvvm_sust_b_3d_v2i16_zero", + "llvm.nvvm.sust.b.3d.v2i32.clamp" => "__nvvm_sust_b_3d_v2i32_clamp", + "llvm.nvvm.sust.b.3d.v2i32.trap" => "__nvvm_sust_b_3d_v2i32_trap", + "llvm.nvvm.sust.b.3d.v2i32.zero" => "__nvvm_sust_b_3d_v2i32_zero", + "llvm.nvvm.sust.b.3d.v2i64.clamp" => "__nvvm_sust_b_3d_v2i64_clamp", + "llvm.nvvm.sust.b.3d.v2i64.trap" => "__nvvm_sust_b_3d_v2i64_trap", + "llvm.nvvm.sust.b.3d.v2i64.zero" => "__nvvm_sust_b_3d_v2i64_zero", + "llvm.nvvm.sust.b.3d.v2i8.clamp" => "__nvvm_sust_b_3d_v2i8_clamp", + "llvm.nvvm.sust.b.3d.v2i8.trap" => "__nvvm_sust_b_3d_v2i8_trap", + "llvm.nvvm.sust.b.3d.v2i8.zero" => "__nvvm_sust_b_3d_v2i8_zero", + "llvm.nvvm.sust.b.3d.v4i16.clamp" => "__nvvm_sust_b_3d_v4i16_clamp", + "llvm.nvvm.sust.b.3d.v4i16.trap" => "__nvvm_sust_b_3d_v4i16_trap", + "llvm.nvvm.sust.b.3d.v4i16.zero" => "__nvvm_sust_b_3d_v4i16_zero", + "llvm.nvvm.sust.b.3d.v4i32.clamp" => "__nvvm_sust_b_3d_v4i32_clamp", + "llvm.nvvm.sust.b.3d.v4i32.trap" => "__nvvm_sust_b_3d_v4i32_trap", + "llvm.nvvm.sust.b.3d.v4i32.zero" => "__nvvm_sust_b_3d_v4i32_zero", + "llvm.nvvm.sust.b.3d.v4i8.clamp" => "__nvvm_sust_b_3d_v4i8_clamp", + "llvm.nvvm.sust.b.3d.v4i8.trap" => "__nvvm_sust_b_3d_v4i8_trap", + "llvm.nvvm.sust.b.3d.v4i8.zero" => "__nvvm_sust_b_3d_v4i8_zero", + "llvm.nvvm.sust.p.1d.array.i16.trap" => "__nvvm_sust_p_1d_array_i16_trap", + "llvm.nvvm.sust.p.1d.array.i32.trap" => "__nvvm_sust_p_1d_array_i32_trap", + "llvm.nvvm.sust.p.1d.array.i8.trap" => "__nvvm_sust_p_1d_array_i8_trap", + "llvm.nvvm.sust.p.1d.array.v2i16.trap" => "__nvvm_sust_p_1d_array_v2i16_trap", + "llvm.nvvm.sust.p.1d.array.v2i32.trap" => "__nvvm_sust_p_1d_array_v2i32_trap", + "llvm.nvvm.sust.p.1d.array.v2i8.trap" => "__nvvm_sust_p_1d_array_v2i8_trap", + "llvm.nvvm.sust.p.1d.array.v4i16.trap" => "__nvvm_sust_p_1d_array_v4i16_trap", + "llvm.nvvm.sust.p.1d.array.v4i32.trap" => "__nvvm_sust_p_1d_array_v4i32_trap", + "llvm.nvvm.sust.p.1d.array.v4i8.trap" => "__nvvm_sust_p_1d_array_v4i8_trap", + "llvm.nvvm.sust.p.1d.i16.trap" => "__nvvm_sust_p_1d_i16_trap", + "llvm.nvvm.sust.p.1d.i32.trap" => "__nvvm_sust_p_1d_i32_trap", + "llvm.nvvm.sust.p.1d.i8.trap" => "__nvvm_sust_p_1d_i8_trap", + "llvm.nvvm.sust.p.1d.v2i16.trap" => "__nvvm_sust_p_1d_v2i16_trap", + "llvm.nvvm.sust.p.1d.v2i32.trap" => "__nvvm_sust_p_1d_v2i32_trap", + "llvm.nvvm.sust.p.1d.v2i8.trap" => "__nvvm_sust_p_1d_v2i8_trap", + "llvm.nvvm.sust.p.1d.v4i16.trap" => "__nvvm_sust_p_1d_v4i16_trap", + "llvm.nvvm.sust.p.1d.v4i32.trap" => "__nvvm_sust_p_1d_v4i32_trap", + "llvm.nvvm.sust.p.1d.v4i8.trap" => "__nvvm_sust_p_1d_v4i8_trap", + "llvm.nvvm.sust.p.2d.array.i16.trap" => "__nvvm_sust_p_2d_array_i16_trap", + "llvm.nvvm.sust.p.2d.array.i32.trap" => "__nvvm_sust_p_2d_array_i32_trap", + "llvm.nvvm.sust.p.2d.array.i8.trap" => "__nvvm_sust_p_2d_array_i8_trap", + "llvm.nvvm.sust.p.2d.array.v2i16.trap" => "__nvvm_sust_p_2d_array_v2i16_trap", + "llvm.nvvm.sust.p.2d.array.v2i32.trap" => "__nvvm_sust_p_2d_array_v2i32_trap", + "llvm.nvvm.sust.p.2d.array.v2i8.trap" => "__nvvm_sust_p_2d_array_v2i8_trap", + "llvm.nvvm.sust.p.2d.array.v4i16.trap" => "__nvvm_sust_p_2d_array_v4i16_trap", + "llvm.nvvm.sust.p.2d.array.v4i32.trap" => "__nvvm_sust_p_2d_array_v4i32_trap", + "llvm.nvvm.sust.p.2d.array.v4i8.trap" => "__nvvm_sust_p_2d_array_v4i8_trap", + "llvm.nvvm.sust.p.2d.i16.trap" => "__nvvm_sust_p_2d_i16_trap", + "llvm.nvvm.sust.p.2d.i32.trap" => "__nvvm_sust_p_2d_i32_trap", + "llvm.nvvm.sust.p.2d.i8.trap" => "__nvvm_sust_p_2d_i8_trap", + "llvm.nvvm.sust.p.2d.v2i16.trap" => "__nvvm_sust_p_2d_v2i16_trap", + "llvm.nvvm.sust.p.2d.v2i32.trap" => "__nvvm_sust_p_2d_v2i32_trap", + "llvm.nvvm.sust.p.2d.v2i8.trap" => "__nvvm_sust_p_2d_v2i8_trap", + "llvm.nvvm.sust.p.2d.v4i16.trap" => "__nvvm_sust_p_2d_v4i16_trap", + "llvm.nvvm.sust.p.2d.v4i32.trap" => "__nvvm_sust_p_2d_v4i32_trap", + "llvm.nvvm.sust.p.2d.v4i8.trap" => "__nvvm_sust_p_2d_v4i8_trap", + "llvm.nvvm.sust.p.3d.i16.trap" => "__nvvm_sust_p_3d_i16_trap", + "llvm.nvvm.sust.p.3d.i32.trap" => "__nvvm_sust_p_3d_i32_trap", + "llvm.nvvm.sust.p.3d.i8.trap" => "__nvvm_sust_p_3d_i8_trap", + "llvm.nvvm.sust.p.3d.v2i16.trap" => "__nvvm_sust_p_3d_v2i16_trap", + "llvm.nvvm.sust.p.3d.v2i32.trap" => "__nvvm_sust_p_3d_v2i32_trap", + "llvm.nvvm.sust.p.3d.v2i8.trap" => "__nvvm_sust_p_3d_v2i8_trap", + "llvm.nvvm.sust.p.3d.v4i16.trap" => "__nvvm_sust_p_3d_v4i16_trap", + "llvm.nvvm.sust.p.3d.v4i32.trap" => "__nvvm_sust_p_3d_v4i32_trap", + "llvm.nvvm.sust.p.3d.v4i8.trap" => "__nvvm_sust_p_3d_v4i8_trap", + "llvm.nvvm.swap.lo.hi.b64" => "__nvvm_swap_lo_hi_b64", + "llvm.nvvm.trunc.d" => "__nvvm_trunc_d", + "llvm.nvvm.trunc.f" => "__nvvm_trunc_f", + "llvm.nvvm.trunc.ftz.f" => "__nvvm_trunc_ftz_f", + "llvm.nvvm.txq.array.size" => "__nvvm_txq_array_size", + "llvm.nvvm.txq.channel.data.type" => "__nvvm_txq_channel_data_type", + "llvm.nvvm.txq.channel.order" => "__nvvm_txq_channel_order", + "llvm.nvvm.txq.depth" => "__nvvm_txq_depth", + "llvm.nvvm.txq.height" => "__nvvm_txq_height", + "llvm.nvvm.txq.num.mipmap.levels" => "__nvvm_txq_num_mipmap_levels", + "llvm.nvvm.txq.num.samples" => "__nvvm_txq_num_samples", + "llvm.nvvm.txq.width" => "__nvvm_txq_width", + "llvm.nvvm.ui2d.rm" => "__nvvm_ui2d_rm", + "llvm.nvvm.ui2d.rn" => "__nvvm_ui2d_rn", + "llvm.nvvm.ui2d.rp" => "__nvvm_ui2d_rp", + "llvm.nvvm.ui2d.rz" => "__nvvm_ui2d_rz", + "llvm.nvvm.ui2f.rm" => "__nvvm_ui2f_rm", + "llvm.nvvm.ui2f.rn" => "__nvvm_ui2f_rn", + "llvm.nvvm.ui2f.rp" => "__nvvm_ui2f_rp", + "llvm.nvvm.ui2f.rz" => "__nvvm_ui2f_rz", + "llvm.nvvm.ull2d.rm" => "__nvvm_ull2d_rm", + "llvm.nvvm.ull2d.rn" => "__nvvm_ull2d_rn", + "llvm.nvvm.ull2d.rp" => "__nvvm_ull2d_rp", + "llvm.nvvm.ull2d.rz" => "__nvvm_ull2d_rz", + "llvm.nvvm.ull2f.rm" => "__nvvm_ull2f_rm", + "llvm.nvvm.ull2f.rn" => "__nvvm_ull2f_rn", + "llvm.nvvm.ull2f.rp" => "__nvvm_ull2f_rp", + "llvm.nvvm.ull2f.rz" => "__nvvm_ull2f_rz", + "llvm.nvvm.vote.all" => "__nvvm_vote_all", + "llvm.nvvm.vote.all.sync" => "__nvvm_vote_all_sync", + "llvm.nvvm.vote.any" => "__nvvm_vote_any", + "llvm.nvvm.vote.any.sync" => "__nvvm_vote_any_sync", + "llvm.nvvm.vote.ballot" => "__nvvm_vote_ballot", + "llvm.nvvm.vote.ballot.sync" => "__nvvm_vote_ballot_sync", + "llvm.nvvm.vote.uni" => "__nvvm_vote_uni", + "llvm.nvvm.vote.uni.sync" => "__nvvm_vote_uni_sync", + // ppc + "llvm.ppc.addex" => "__builtin_ppc_addex", + "llvm.ppc.addf128.round.to.odd" => "__builtin_addf128_round_to_odd", + "llvm.ppc.altivec.crypto.vcipher" => "__builtin_altivec_crypto_vcipher", + "llvm.ppc.altivec.crypto.vcipherlast" => "__builtin_altivec_crypto_vcipherlast", + "llvm.ppc.altivec.crypto.vncipher" => "__builtin_altivec_crypto_vncipher", + "llvm.ppc.altivec.crypto.vncipherlast" => "__builtin_altivec_crypto_vncipherlast", + "llvm.ppc.altivec.crypto.vpermxor" => "__builtin_altivec_crypto_vpermxor", + "llvm.ppc.altivec.crypto.vpermxor.be" => "__builtin_altivec_crypto_vpermxor_be", + "llvm.ppc.altivec.crypto.vpmsumb" => "__builtin_altivec_crypto_vpmsumb", + "llvm.ppc.altivec.crypto.vpmsumd" => "__builtin_altivec_crypto_vpmsumd", + "llvm.ppc.altivec.crypto.vpmsumh" => "__builtin_altivec_crypto_vpmsumh", + "llvm.ppc.altivec.crypto.vpmsumw" => "__builtin_altivec_crypto_vpmsumw", + "llvm.ppc.altivec.crypto.vsbox" => "__builtin_altivec_crypto_vsbox", + "llvm.ppc.altivec.crypto.vshasigmad" => "__builtin_altivec_crypto_vshasigmad", + "llvm.ppc.altivec.crypto.vshasigmaw" => "__builtin_altivec_crypto_vshasigmaw", + "llvm.ppc.altivec.dss" => "__builtin_altivec_dss", + "llvm.ppc.altivec.dssall" => "__builtin_altivec_dssall", + "llvm.ppc.altivec.dst" => "__builtin_altivec_dst", + "llvm.ppc.altivec.dstst" => "__builtin_altivec_dstst", + "llvm.ppc.altivec.dststt" => "__builtin_altivec_dststt", + "llvm.ppc.altivec.dstt" => "__builtin_altivec_dstt", + "llvm.ppc.altivec.mfvscr" => "__builtin_altivec_mfvscr", + "llvm.ppc.altivec.mtvscr" => "__builtin_altivec_mtvscr", + "llvm.ppc.altivec.mtvsrbm" => "__builtin_altivec_mtvsrbm", + "llvm.ppc.altivec.mtvsrdm" => "__builtin_altivec_mtvsrdm", + "llvm.ppc.altivec.mtvsrhm" => "__builtin_altivec_mtvsrhm", + "llvm.ppc.altivec.mtvsrqm" => "__builtin_altivec_mtvsrqm", + "llvm.ppc.altivec.mtvsrwm" => "__builtin_altivec_mtvsrwm", + "llvm.ppc.altivec.vabsdub" => "__builtin_altivec_vabsdub", + "llvm.ppc.altivec.vabsduh" => "__builtin_altivec_vabsduh", + "llvm.ppc.altivec.vabsduw" => "__builtin_altivec_vabsduw", + "llvm.ppc.altivec.vaddcuq" => "__builtin_altivec_vaddcuq", + "llvm.ppc.altivec.vaddcuw" => "__builtin_altivec_vaddcuw", + "llvm.ppc.altivec.vaddecuq" => "__builtin_altivec_vaddecuq", + "llvm.ppc.altivec.vaddeuqm" => "__builtin_altivec_vaddeuqm", + "llvm.ppc.altivec.vaddsbs" => "__builtin_altivec_vaddsbs", + "llvm.ppc.altivec.vaddshs" => "__builtin_altivec_vaddshs", + "llvm.ppc.altivec.vaddsws" => "__builtin_altivec_vaddsws", + "llvm.ppc.altivec.vaddubs" => "__builtin_altivec_vaddubs", + "llvm.ppc.altivec.vadduhs" => "__builtin_altivec_vadduhs", + "llvm.ppc.altivec.vadduws" => "__builtin_altivec_vadduws", + "llvm.ppc.altivec.vavgsb" => "__builtin_altivec_vavgsb", + "llvm.ppc.altivec.vavgsh" => "__builtin_altivec_vavgsh", + "llvm.ppc.altivec.vavgsw" => "__builtin_altivec_vavgsw", + "llvm.ppc.altivec.vavgub" => "__builtin_altivec_vavgub", + "llvm.ppc.altivec.vavguh" => "__builtin_altivec_vavguh", + "llvm.ppc.altivec.vavguw" => "__builtin_altivec_vavguw", + "llvm.ppc.altivec.vbpermd" => "__builtin_altivec_vbpermd", + "llvm.ppc.altivec.vbpermq" => "__builtin_altivec_vbpermq", + "llvm.ppc.altivec.vcfsx" => "__builtin_altivec_vcfsx", + "llvm.ppc.altivec.vcfuged" => "__builtin_altivec_vcfuged", + "llvm.ppc.altivec.vcfux" => "__builtin_altivec_vcfux", + "llvm.ppc.altivec.vclrlb" => "__builtin_altivec_vclrlb", + "llvm.ppc.altivec.vclrrb" => "__builtin_altivec_vclrrb", + "llvm.ppc.altivec.vclzdm" => "__builtin_altivec_vclzdm", + "llvm.ppc.altivec.vclzlsbb" => "__builtin_altivec_vclzlsbb", + "llvm.ppc.altivec.vcmpbfp" => "__builtin_altivec_vcmpbfp", + "llvm.ppc.altivec.vcmpbfp.p" => "__builtin_altivec_vcmpbfp_p", + "llvm.ppc.altivec.vcmpeqfp" => "__builtin_altivec_vcmpeqfp", + "llvm.ppc.altivec.vcmpeqfp.p" => "__builtin_altivec_vcmpeqfp_p", + "llvm.ppc.altivec.vcmpequb" => "__builtin_altivec_vcmpequb", + "llvm.ppc.altivec.vcmpequb.p" => "__builtin_altivec_vcmpequb_p", + "llvm.ppc.altivec.vcmpequd" => "__builtin_altivec_vcmpequd", + "llvm.ppc.altivec.vcmpequd.p" => "__builtin_altivec_vcmpequd_p", + "llvm.ppc.altivec.vcmpequh" => "__builtin_altivec_vcmpequh", + "llvm.ppc.altivec.vcmpequh.p" => "__builtin_altivec_vcmpequh_p", + "llvm.ppc.altivec.vcmpequq" => "__builtin_altivec_vcmpequq", + "llvm.ppc.altivec.vcmpequq.p" => "__builtin_altivec_vcmpequq_p", + "llvm.ppc.altivec.vcmpequw" => "__builtin_altivec_vcmpequw", + "llvm.ppc.altivec.vcmpequw.p" => "__builtin_altivec_vcmpequw_p", + "llvm.ppc.altivec.vcmpgefp" => "__builtin_altivec_vcmpgefp", + "llvm.ppc.altivec.vcmpgefp.p" => "__builtin_altivec_vcmpgefp_p", + "llvm.ppc.altivec.vcmpgtfp" => "__builtin_altivec_vcmpgtfp", + "llvm.ppc.altivec.vcmpgtfp.p" => "__builtin_altivec_vcmpgtfp_p", + "llvm.ppc.altivec.vcmpgtsb" => "__builtin_altivec_vcmpgtsb", + "llvm.ppc.altivec.vcmpgtsb.p" => "__builtin_altivec_vcmpgtsb_p", + "llvm.ppc.altivec.vcmpgtsd" => "__builtin_altivec_vcmpgtsd", + "llvm.ppc.altivec.vcmpgtsd.p" => "__builtin_altivec_vcmpgtsd_p", + "llvm.ppc.altivec.vcmpgtsh" => "__builtin_altivec_vcmpgtsh", + "llvm.ppc.altivec.vcmpgtsh.p" => "__builtin_altivec_vcmpgtsh_p", + "llvm.ppc.altivec.vcmpgtsq" => "__builtin_altivec_vcmpgtsq", + "llvm.ppc.altivec.vcmpgtsq.p" => "__builtin_altivec_vcmpgtsq_p", + "llvm.ppc.altivec.vcmpgtsw" => "__builtin_altivec_vcmpgtsw", + "llvm.ppc.altivec.vcmpgtsw.p" => "__builtin_altivec_vcmpgtsw_p", + "llvm.ppc.altivec.vcmpgtub" => "__builtin_altivec_vcmpgtub", + "llvm.ppc.altivec.vcmpgtub.p" => "__builtin_altivec_vcmpgtub_p", + "llvm.ppc.altivec.vcmpgtud" => "__builtin_altivec_vcmpgtud", + "llvm.ppc.altivec.vcmpgtud.p" => "__builtin_altivec_vcmpgtud_p", + "llvm.ppc.altivec.vcmpgtuh" => "__builtin_altivec_vcmpgtuh", + "llvm.ppc.altivec.vcmpgtuh.p" => "__builtin_altivec_vcmpgtuh_p", + "llvm.ppc.altivec.vcmpgtuq" => "__builtin_altivec_vcmpgtuq", + "llvm.ppc.altivec.vcmpgtuq.p" => "__builtin_altivec_vcmpgtuq_p", + "llvm.ppc.altivec.vcmpgtuw" => "__builtin_altivec_vcmpgtuw", + "llvm.ppc.altivec.vcmpgtuw.p" => "__builtin_altivec_vcmpgtuw_p", + "llvm.ppc.altivec.vcmpneb" => "__builtin_altivec_vcmpneb", + "llvm.ppc.altivec.vcmpneb.p" => "__builtin_altivec_vcmpneb_p", + "llvm.ppc.altivec.vcmpneh" => "__builtin_altivec_vcmpneh", + "llvm.ppc.altivec.vcmpneh.p" => "__builtin_altivec_vcmpneh_p", + "llvm.ppc.altivec.vcmpnew" => "__builtin_altivec_vcmpnew", + "llvm.ppc.altivec.vcmpnew.p" => "__builtin_altivec_vcmpnew_p", + "llvm.ppc.altivec.vcmpnezb" => "__builtin_altivec_vcmpnezb", + "llvm.ppc.altivec.vcmpnezb.p" => "__builtin_altivec_vcmpnezb_p", + "llvm.ppc.altivec.vcmpnezh" => "__builtin_altivec_vcmpnezh", + "llvm.ppc.altivec.vcmpnezh.p" => "__builtin_altivec_vcmpnezh_p", + "llvm.ppc.altivec.vcmpnezw" => "__builtin_altivec_vcmpnezw", + "llvm.ppc.altivec.vcmpnezw.p" => "__builtin_altivec_vcmpnezw_p", + "llvm.ppc.altivec.vcntmbb" => "__builtin_altivec_vcntmbb", + "llvm.ppc.altivec.vcntmbd" => "__builtin_altivec_vcntmbd", + "llvm.ppc.altivec.vcntmbh" => "__builtin_altivec_vcntmbh", + "llvm.ppc.altivec.vcntmbw" => "__builtin_altivec_vcntmbw", + "llvm.ppc.altivec.vctsxs" => "__builtin_altivec_vctsxs", + "llvm.ppc.altivec.vctuxs" => "__builtin_altivec_vctuxs", + "llvm.ppc.altivec.vctzdm" => "__builtin_altivec_vctzdm", + "llvm.ppc.altivec.vctzlsbb" => "__builtin_altivec_vctzlsbb", + "llvm.ppc.altivec.vdivesd" => "__builtin_altivec_vdivesd", + "llvm.ppc.altivec.vdivesq" => "__builtin_altivec_vdivesq", + "llvm.ppc.altivec.vdivesw" => "__builtin_altivec_vdivesw", + "llvm.ppc.altivec.vdiveud" => "__builtin_altivec_vdiveud", + "llvm.ppc.altivec.vdiveuq" => "__builtin_altivec_vdiveuq", + "llvm.ppc.altivec.vdiveuw" => "__builtin_altivec_vdiveuw", + "llvm.ppc.altivec.vexpandbm" => "__builtin_altivec_vexpandbm", + "llvm.ppc.altivec.vexpanddm" => "__builtin_altivec_vexpanddm", + "llvm.ppc.altivec.vexpandhm" => "__builtin_altivec_vexpandhm", + "llvm.ppc.altivec.vexpandqm" => "__builtin_altivec_vexpandqm", + "llvm.ppc.altivec.vexpandwm" => "__builtin_altivec_vexpandwm", + "llvm.ppc.altivec.vexptefp" => "__builtin_altivec_vexptefp", + "llvm.ppc.altivec.vextddvlx" => "__builtin_altivec_vextddvlx", + "llvm.ppc.altivec.vextddvrx" => "__builtin_altivec_vextddvrx", + "llvm.ppc.altivec.vextdubvlx" => "__builtin_altivec_vextdubvlx", + "llvm.ppc.altivec.vextdubvrx" => "__builtin_altivec_vextdubvrx", + "llvm.ppc.altivec.vextduhvlx" => "__builtin_altivec_vextduhvlx", + "llvm.ppc.altivec.vextduhvrx" => "__builtin_altivec_vextduhvrx", + "llvm.ppc.altivec.vextduwvlx" => "__builtin_altivec_vextduwvlx", + "llvm.ppc.altivec.vextduwvrx" => "__builtin_altivec_vextduwvrx", + "llvm.ppc.altivec.vextractbm" => "__builtin_altivec_vextractbm", + "llvm.ppc.altivec.vextractdm" => "__builtin_altivec_vextractdm", + "llvm.ppc.altivec.vextracthm" => "__builtin_altivec_vextracthm", + "llvm.ppc.altivec.vextractqm" => "__builtin_altivec_vextractqm", + "llvm.ppc.altivec.vextractwm" => "__builtin_altivec_vextractwm", + "llvm.ppc.altivec.vextsb2d" => "__builtin_altivec_vextsb2d", + "llvm.ppc.altivec.vextsb2w" => "__builtin_altivec_vextsb2w", + "llvm.ppc.altivec.vextsd2q" => "__builtin_altivec_vextsd2q", + "llvm.ppc.altivec.vextsh2d" => "__builtin_altivec_vextsh2d", + "llvm.ppc.altivec.vextsh2w" => "__builtin_altivec_vextsh2w", + "llvm.ppc.altivec.vextsw2d" => "__builtin_altivec_vextsw2d", + "llvm.ppc.altivec.vgbbd" => "__builtin_altivec_vgbbd", + "llvm.ppc.altivec.vgnb" => "__builtin_altivec_vgnb", + "llvm.ppc.altivec.vinsblx" => "__builtin_altivec_vinsblx", + "llvm.ppc.altivec.vinsbrx" => "__builtin_altivec_vinsbrx", + "llvm.ppc.altivec.vinsbvlx" => "__builtin_altivec_vinsbvlx", + "llvm.ppc.altivec.vinsbvrx" => "__builtin_altivec_vinsbvrx", + "llvm.ppc.altivec.vinsdlx" => "__builtin_altivec_vinsdlx", + "llvm.ppc.altivec.vinsdrx" => "__builtin_altivec_vinsdrx", + "llvm.ppc.altivec.vinshlx" => "__builtin_altivec_vinshlx", + "llvm.ppc.altivec.vinshrx" => "__builtin_altivec_vinshrx", + "llvm.ppc.altivec.vinshvlx" => "__builtin_altivec_vinshvlx", + "llvm.ppc.altivec.vinshvrx" => "__builtin_altivec_vinshvrx", + "llvm.ppc.altivec.vinswlx" => "__builtin_altivec_vinswlx", + "llvm.ppc.altivec.vinswrx" => "__builtin_altivec_vinswrx", + "llvm.ppc.altivec.vinswvlx" => "__builtin_altivec_vinswvlx", + "llvm.ppc.altivec.vinswvrx" => "__builtin_altivec_vinswvrx", + "llvm.ppc.altivec.vlogefp" => "__builtin_altivec_vlogefp", + "llvm.ppc.altivec.vmaddfp" => "__builtin_altivec_vmaddfp", + "llvm.ppc.altivec.vmaxfp" => "__builtin_altivec_vmaxfp", + "llvm.ppc.altivec.vmaxsb" => "__builtin_altivec_vmaxsb", + "llvm.ppc.altivec.vmaxsd" => "__builtin_altivec_vmaxsd", + "llvm.ppc.altivec.vmaxsh" => "__builtin_altivec_vmaxsh", + "llvm.ppc.altivec.vmaxsw" => "__builtin_altivec_vmaxsw", + "llvm.ppc.altivec.vmaxub" => "__builtin_altivec_vmaxub", + "llvm.ppc.altivec.vmaxud" => "__builtin_altivec_vmaxud", + "llvm.ppc.altivec.vmaxuh" => "__builtin_altivec_vmaxuh", + "llvm.ppc.altivec.vmaxuw" => "__builtin_altivec_vmaxuw", + "llvm.ppc.altivec.vmhaddshs" => "__builtin_altivec_vmhaddshs", + "llvm.ppc.altivec.vmhraddshs" => "__builtin_altivec_vmhraddshs", + "llvm.ppc.altivec.vminfp" => "__builtin_altivec_vminfp", + "llvm.ppc.altivec.vminsb" => "__builtin_altivec_vminsb", + "llvm.ppc.altivec.vminsd" => "__builtin_altivec_vminsd", + "llvm.ppc.altivec.vminsh" => "__builtin_altivec_vminsh", + "llvm.ppc.altivec.vminsw" => "__builtin_altivec_vminsw", + "llvm.ppc.altivec.vminub" => "__builtin_altivec_vminub", + "llvm.ppc.altivec.vminud" => "__builtin_altivec_vminud", + "llvm.ppc.altivec.vminuh" => "__builtin_altivec_vminuh", + "llvm.ppc.altivec.vminuw" => "__builtin_altivec_vminuw", + "llvm.ppc.altivec.vmladduhm" => "__builtin_altivec_vmladduhm", + "llvm.ppc.altivec.vmsumcud" => "__builtin_altivec_vmsumcud", + "llvm.ppc.altivec.vmsummbm" => "__builtin_altivec_vmsummbm", + "llvm.ppc.altivec.vmsumshm" => "__builtin_altivec_vmsumshm", + "llvm.ppc.altivec.vmsumshs" => "__builtin_altivec_vmsumshs", + "llvm.ppc.altivec.vmsumubm" => "__builtin_altivec_vmsumubm", + "llvm.ppc.altivec.vmsumudm" => "__builtin_altivec_vmsumudm", + "llvm.ppc.altivec.vmsumuhm" => "__builtin_altivec_vmsumuhm", + "llvm.ppc.altivec.vmsumuhs" => "__builtin_altivec_vmsumuhs", + "llvm.ppc.altivec.vmulesb" => "__builtin_altivec_vmulesb", + "llvm.ppc.altivec.vmulesd" => "__builtin_altivec_vmulesd", + "llvm.ppc.altivec.vmulesh" => "__builtin_altivec_vmulesh", + "llvm.ppc.altivec.vmulesw" => "__builtin_altivec_vmulesw", + "llvm.ppc.altivec.vmuleub" => "__builtin_altivec_vmuleub", + "llvm.ppc.altivec.vmuleud" => "__builtin_altivec_vmuleud", + "llvm.ppc.altivec.vmuleuh" => "__builtin_altivec_vmuleuh", + "llvm.ppc.altivec.vmuleuw" => "__builtin_altivec_vmuleuw", + "llvm.ppc.altivec.vmulhsd" => "__builtin_altivec_vmulhsd", + "llvm.ppc.altivec.vmulhsw" => "__builtin_altivec_vmulhsw", + "llvm.ppc.altivec.vmulhud" => "__builtin_altivec_vmulhud", + "llvm.ppc.altivec.vmulhuw" => "__builtin_altivec_vmulhuw", + "llvm.ppc.altivec.vmulosb" => "__builtin_altivec_vmulosb", + "llvm.ppc.altivec.vmulosd" => "__builtin_altivec_vmulosd", + "llvm.ppc.altivec.vmulosh" => "__builtin_altivec_vmulosh", + "llvm.ppc.altivec.vmulosw" => "__builtin_altivec_vmulosw", + "llvm.ppc.altivec.vmuloub" => "__builtin_altivec_vmuloub", + "llvm.ppc.altivec.vmuloud" => "__builtin_altivec_vmuloud", + "llvm.ppc.altivec.vmulouh" => "__builtin_altivec_vmulouh", + "llvm.ppc.altivec.vmulouw" => "__builtin_altivec_vmulouw", + "llvm.ppc.altivec.vnmsubfp" => "__builtin_altivec_vnmsubfp", + "llvm.ppc.altivec.vpdepd" => "__builtin_altivec_vpdepd", + "llvm.ppc.altivec.vperm" => "__builtin_altivec_vperm_4si", + "llvm.ppc.altivec.vpextd" => "__builtin_altivec_vpextd", + "llvm.ppc.altivec.vpkpx" => "__builtin_altivec_vpkpx", + "llvm.ppc.altivec.vpksdss" => "__builtin_altivec_vpksdss", + "llvm.ppc.altivec.vpksdus" => "__builtin_altivec_vpksdus", + "llvm.ppc.altivec.vpkshss" => "__builtin_altivec_vpkshss", + "llvm.ppc.altivec.vpkshus" => "__builtin_altivec_vpkshus", + "llvm.ppc.altivec.vpkswss" => "__builtin_altivec_vpkswss", + "llvm.ppc.altivec.vpkswus" => "__builtin_altivec_vpkswus", + "llvm.ppc.altivec.vpkudus" => "__builtin_altivec_vpkudus", + "llvm.ppc.altivec.vpkuhus" => "__builtin_altivec_vpkuhus", + "llvm.ppc.altivec.vpkuwus" => "__builtin_altivec_vpkuwus", + "llvm.ppc.altivec.vprtybd" => "__builtin_altivec_vprtybd", + "llvm.ppc.altivec.vprtybq" => "__builtin_altivec_vprtybq", + "llvm.ppc.altivec.vprtybw" => "__builtin_altivec_vprtybw", + "llvm.ppc.altivec.vrefp" => "__builtin_altivec_vrefp", + "llvm.ppc.altivec.vrfim" => "__builtin_altivec_vrfim", + "llvm.ppc.altivec.vrfin" => "__builtin_altivec_vrfin", + "llvm.ppc.altivec.vrfip" => "__builtin_altivec_vrfip", + "llvm.ppc.altivec.vrfiz" => "__builtin_altivec_vrfiz", + "llvm.ppc.altivec.vrlb" => "__builtin_altivec_vrlb", + "llvm.ppc.altivec.vrld" => "__builtin_altivec_vrld", + "llvm.ppc.altivec.vrldmi" => "__builtin_altivec_vrldmi", + "llvm.ppc.altivec.vrldnm" => "__builtin_altivec_vrldnm", + "llvm.ppc.altivec.vrlh" => "__builtin_altivec_vrlh", + "llvm.ppc.altivec.vrlqmi" => "__builtin_altivec_vrlqmi", + "llvm.ppc.altivec.vrlqnm" => "__builtin_altivec_vrlqnm", + "llvm.ppc.altivec.vrlw" => "__builtin_altivec_vrlw", + "llvm.ppc.altivec.vrlwmi" => "__builtin_altivec_vrlwmi", + "llvm.ppc.altivec.vrlwnm" => "__builtin_altivec_vrlwnm", + "llvm.ppc.altivec.vrsqrtefp" => "__builtin_altivec_vrsqrtefp", + "llvm.ppc.altivec.vsel" => "__builtin_altivec_vsel_4si", + "llvm.ppc.altivec.vsl" => "__builtin_altivec_vsl", + "llvm.ppc.altivec.vslb" => "__builtin_altivec_vslb", + "llvm.ppc.altivec.vsldbi" => "__builtin_altivec_vsldbi", + "llvm.ppc.altivec.vslh" => "__builtin_altivec_vslh", + "llvm.ppc.altivec.vslo" => "__builtin_altivec_vslo", + "llvm.ppc.altivec.vslv" => "__builtin_altivec_vslv", + "llvm.ppc.altivec.vslw" => "__builtin_altivec_vslw", + "llvm.ppc.altivec.vsr" => "__builtin_altivec_vsr", + "llvm.ppc.altivec.vsrab" => "__builtin_altivec_vsrab", + "llvm.ppc.altivec.vsrah" => "__builtin_altivec_vsrah", + "llvm.ppc.altivec.vsraw" => "__builtin_altivec_vsraw", + "llvm.ppc.altivec.vsrb" => "__builtin_altivec_vsrb", + "llvm.ppc.altivec.vsrdbi" => "__builtin_altivec_vsrdbi", + "llvm.ppc.altivec.vsrh" => "__builtin_altivec_vsrh", + "llvm.ppc.altivec.vsro" => "__builtin_altivec_vsro", + "llvm.ppc.altivec.vsrv" => "__builtin_altivec_vsrv", + "llvm.ppc.altivec.vsrw" => "__builtin_altivec_vsrw", + "llvm.ppc.altivec.vstribl" => "__builtin_altivec_vstribl", + "llvm.ppc.altivec.vstribl.p" => "__builtin_altivec_vstribl_p", + "llvm.ppc.altivec.vstribr" => "__builtin_altivec_vstribr", + "llvm.ppc.altivec.vstribr.p" => "__builtin_altivec_vstribr_p", + "llvm.ppc.altivec.vstrihl" => "__builtin_altivec_vstrihl", + "llvm.ppc.altivec.vstrihl.p" => "__builtin_altivec_vstrihl_p", + "llvm.ppc.altivec.vstrihr" => "__builtin_altivec_vstrihr", + "llvm.ppc.altivec.vstrihr.p" => "__builtin_altivec_vstrihr_p", + "llvm.ppc.altivec.vsubcuq" => "__builtin_altivec_vsubcuq", + "llvm.ppc.altivec.vsubcuw" => "__builtin_altivec_vsubcuw", + "llvm.ppc.altivec.vsubecuq" => "__builtin_altivec_vsubecuq", + "llvm.ppc.altivec.vsubeuqm" => "__builtin_altivec_vsubeuqm", + "llvm.ppc.altivec.vsubsbs" => "__builtin_altivec_vsubsbs", + "llvm.ppc.altivec.vsubshs" => "__builtin_altivec_vsubshs", + "llvm.ppc.altivec.vsubsws" => "__builtin_altivec_vsubsws", + "llvm.ppc.altivec.vsububs" => "__builtin_altivec_vsububs", + "llvm.ppc.altivec.vsubuhs" => "__builtin_altivec_vsubuhs", + "llvm.ppc.altivec.vsubuws" => "__builtin_altivec_vsubuws", + "llvm.ppc.altivec.vsum2sws" => "__builtin_altivec_vsum2sws", + "llvm.ppc.altivec.vsum4sbs" => "__builtin_altivec_vsum4sbs", + "llvm.ppc.altivec.vsum4shs" => "__builtin_altivec_vsum4shs", + "llvm.ppc.altivec.vsum4ubs" => "__builtin_altivec_vsum4ubs", + "llvm.ppc.altivec.vsumsws" => "__builtin_altivec_vsumsws", + "llvm.ppc.altivec.vupkhpx" => "__builtin_altivec_vupkhpx", + "llvm.ppc.altivec.vupkhsb" => "__builtin_altivec_vupkhsb", + "llvm.ppc.altivec.vupkhsh" => "__builtin_altivec_vupkhsh", + "llvm.ppc.altivec.vupkhsw" => "__builtin_altivec_vupkhsw", + "llvm.ppc.altivec.vupklpx" => "__builtin_altivec_vupklpx", + "llvm.ppc.altivec.vupklsb" => "__builtin_altivec_vupklsb", + "llvm.ppc.altivec.vupklsh" => "__builtin_altivec_vupklsh", + "llvm.ppc.altivec.vupklsw" => "__builtin_altivec_vupklsw", + "llvm.ppc.bcdadd" => "__builtin_ppc_bcdadd", + "llvm.ppc.bcdadd.p" => "__builtin_ppc_bcdadd_p", + "llvm.ppc.bcdsub" => "__builtin_ppc_bcdsub", + "llvm.ppc.bcdsub.p" => "__builtin_ppc_bcdsub_p", + "llvm.ppc.bpermd" => "__builtin_bpermd", + "llvm.ppc.cfuged" => "__builtin_cfuged", + "llvm.ppc.cmpeqb" => "__builtin_ppc_cmpeqb", + "llvm.ppc.cmprb" => "__builtin_ppc_cmprb", + "llvm.ppc.cntlzdm" => "__builtin_cntlzdm", + "llvm.ppc.cnttzdm" => "__builtin_cnttzdm", + "llvm.ppc.compare.exp.eq" => "__builtin_ppc_compare_exp_eq", + "llvm.ppc.compare.exp.gt" => "__builtin_ppc_compare_exp_gt", + "llvm.ppc.compare.exp.lt" => "__builtin_ppc_compare_exp_lt", + "llvm.ppc.compare.exp.uo" => "__builtin_ppc_compare_exp_uo", + "llvm.ppc.darn" => "__builtin_darn", + "llvm.ppc.darn32" => "__builtin_darn_32", + "llvm.ppc.darnraw" => "__builtin_darn_raw", + "llvm.ppc.dcbf" => "__builtin_dcbf", + "llvm.ppc.dcbfl" => "__builtin_ppc_dcbfl", + "llvm.ppc.dcbflp" => "__builtin_ppc_dcbflp", + "llvm.ppc.dcbst" => "__builtin_ppc_dcbst", + "llvm.ppc.dcbt" => "__builtin_ppc_dcbt", + "llvm.ppc.dcbtst" => "__builtin_ppc_dcbtst", + "llvm.ppc.dcbtstt" => "__builtin_ppc_dcbtstt", + "llvm.ppc.dcbtt" => "__builtin_ppc_dcbtt", + "llvm.ppc.dcbz" => "__builtin_ppc_dcbz", + "llvm.ppc.divde" => "__builtin_divde", + "llvm.ppc.divdeu" => "__builtin_divdeu", + "llvm.ppc.divf128.round.to.odd" => "__builtin_divf128_round_to_odd", + "llvm.ppc.divwe" => "__builtin_divwe", + "llvm.ppc.divweu" => "__builtin_divweu", + "llvm.ppc.eieio" => "__builtin_ppc_eieio", + "llvm.ppc.extract.exp" => "__builtin_ppc_extract_exp", + "llvm.ppc.extract.sig" => "__builtin_ppc_extract_sig", + "llvm.ppc.fcfid" => "__builtin_ppc_fcfid", + "llvm.ppc.fcfud" => "__builtin_ppc_fcfud", + "llvm.ppc.fctid" => "__builtin_ppc_fctid", + "llvm.ppc.fctidz" => "__builtin_ppc_fctidz", + "llvm.ppc.fctiw" => "__builtin_ppc_fctiw", + "llvm.ppc.fctiwz" => "__builtin_ppc_fctiwz", + "llvm.ppc.fctudz" => "__builtin_ppc_fctudz", + "llvm.ppc.fctuwz" => "__builtin_ppc_fctuwz", + "llvm.ppc.fmaf128.round.to.odd" => "__builtin_fmaf128_round_to_odd", + "llvm.ppc.fmsub" => "__builtin_ppc_fmsub", + "llvm.ppc.fmsubs" => "__builtin_ppc_fmsubs", + "llvm.ppc.fnabs" => "__builtin_ppc_fnabs", + "llvm.ppc.fnabss" => "__builtin_ppc_fnabss", + "llvm.ppc.fnmadd" => "__builtin_ppc_fnmadd", + "llvm.ppc.fnmadds" => "__builtin_ppc_fnmadds", + "llvm.ppc.fre" => "__builtin_ppc_fre", + "llvm.ppc.fres" => "__builtin_ppc_fres", + "llvm.ppc.frsqrte" => "__builtin_ppc_frsqrte", + "llvm.ppc.frsqrtes" => "__builtin_ppc_frsqrtes", + "llvm.ppc.fsel" => "__builtin_ppc_fsel", + "llvm.ppc.fsels" => "__builtin_ppc_fsels", + "llvm.ppc.get.texasr" => "__builtin_get_texasr", + "llvm.ppc.get.texasru" => "__builtin_get_texasru", + "llvm.ppc.get.tfhar" => "__builtin_get_tfhar", + "llvm.ppc.get.tfiar" => "__builtin_get_tfiar", + "llvm.ppc.icbt" => "__builtin_ppc_icbt", + "llvm.ppc.insert.exp" => "__builtin_ppc_insert_exp", + "llvm.ppc.iospace.eieio" => "__builtin_ppc_iospace_eieio", + "llvm.ppc.iospace.lwsync" => "__builtin_ppc_iospace_lwsync", + "llvm.ppc.iospace.sync" => "__builtin_ppc_iospace_sync", + "llvm.ppc.isync" => "__builtin_ppc_isync", + "llvm.ppc.load4r" => "__builtin_ppc_load4r", + "llvm.ppc.load8r" => "__builtin_ppc_load8r", + "llvm.ppc.lwsync" => "__builtin_ppc_lwsync", + "llvm.ppc.maddhd" => "__builtin_ppc_maddhd", + "llvm.ppc.maddhdu" => "__builtin_ppc_maddhdu", + "llvm.ppc.maddld" => "__builtin_ppc_maddld", + "llvm.ppc.mfmsr" => "__builtin_ppc_mfmsr", + "llvm.ppc.mftbu" => "__builtin_ppc_mftbu", + "llvm.ppc.mtfsb0" => "__builtin_ppc_mtfsb0", + "llvm.ppc.mtfsb1" => "__builtin_ppc_mtfsb1", + "llvm.ppc.mtfsfi" => "__builtin_ppc_mtfsfi", + "llvm.ppc.mtmsr" => "__builtin_ppc_mtmsr", + "llvm.ppc.mulf128.round.to.odd" => "__builtin_mulf128_round_to_odd", + "llvm.ppc.mulhd" => "__builtin_ppc_mulhd", + "llvm.ppc.mulhdu" => "__builtin_ppc_mulhdu", + "llvm.ppc.mulhw" => "__builtin_ppc_mulhw", + "llvm.ppc.mulhwu" => "__builtin_ppc_mulhwu", + "llvm.ppc.pack.longdouble" => "__builtin_pack_longdouble", + "llvm.ppc.pdepd" => "__builtin_pdepd", + "llvm.ppc.pextd" => "__builtin_pextd", + "llvm.ppc.qpx.qvfabs" => "__builtin_qpx_qvfabs", + "llvm.ppc.qpx.qvfadd" => "__builtin_qpx_qvfadd", + "llvm.ppc.qpx.qvfadds" => "__builtin_qpx_qvfadds", + "llvm.ppc.qpx.qvfcfid" => "__builtin_qpx_qvfcfid", + "llvm.ppc.qpx.qvfcfids" => "__builtin_qpx_qvfcfids", + "llvm.ppc.qpx.qvfcfidu" => "__builtin_qpx_qvfcfidu", + "llvm.ppc.qpx.qvfcfidus" => "__builtin_qpx_qvfcfidus", + "llvm.ppc.qpx.qvfcmpeq" => "__builtin_qpx_qvfcmpeq", + "llvm.ppc.qpx.qvfcmpgt" => "__builtin_qpx_qvfcmpgt", + "llvm.ppc.qpx.qvfcmplt" => "__builtin_qpx_qvfcmplt", + "llvm.ppc.qpx.qvfcpsgn" => "__builtin_qpx_qvfcpsgn", + "llvm.ppc.qpx.qvfctid" => "__builtin_qpx_qvfctid", + "llvm.ppc.qpx.qvfctidu" => "__builtin_qpx_qvfctidu", + "llvm.ppc.qpx.qvfctiduz" => "__builtin_qpx_qvfctiduz", + "llvm.ppc.qpx.qvfctidz" => "__builtin_qpx_qvfctidz", + "llvm.ppc.qpx.qvfctiw" => "__builtin_qpx_qvfctiw", + "llvm.ppc.qpx.qvfctiwu" => "__builtin_qpx_qvfctiwu", + "llvm.ppc.qpx.qvfctiwuz" => "__builtin_qpx_qvfctiwuz", + "llvm.ppc.qpx.qvfctiwz" => "__builtin_qpx_qvfctiwz", + "llvm.ppc.qpx.qvflogical" => "__builtin_qpx_qvflogical", + "llvm.ppc.qpx.qvfmadd" => "__builtin_qpx_qvfmadd", + "llvm.ppc.qpx.qvfmadds" => "__builtin_qpx_qvfmadds", + "llvm.ppc.qpx.qvfmsub" => "__builtin_qpx_qvfmsub", + "llvm.ppc.qpx.qvfmsubs" => "__builtin_qpx_qvfmsubs", + "llvm.ppc.qpx.qvfmul" => "__builtin_qpx_qvfmul", + "llvm.ppc.qpx.qvfmuls" => "__builtin_qpx_qvfmuls", + "llvm.ppc.qpx.qvfnabs" => "__builtin_qpx_qvfnabs", + "llvm.ppc.qpx.qvfneg" => "__builtin_qpx_qvfneg", + "llvm.ppc.qpx.qvfnmadd" => "__builtin_qpx_qvfnmadd", + "llvm.ppc.qpx.qvfnmadds" => "__builtin_qpx_qvfnmadds", + "llvm.ppc.qpx.qvfnmsub" => "__builtin_qpx_qvfnmsub", + "llvm.ppc.qpx.qvfnmsubs" => "__builtin_qpx_qvfnmsubs", + "llvm.ppc.qpx.qvfperm" => "__builtin_qpx_qvfperm", + "llvm.ppc.qpx.qvfre" => "__builtin_qpx_qvfre", + "llvm.ppc.qpx.qvfres" => "__builtin_qpx_qvfres", + "llvm.ppc.qpx.qvfrim" => "__builtin_qpx_qvfrim", + "llvm.ppc.qpx.qvfrin" => "__builtin_qpx_qvfrin", + "llvm.ppc.qpx.qvfrip" => "__builtin_qpx_qvfrip", + "llvm.ppc.qpx.qvfriz" => "__builtin_qpx_qvfriz", + "llvm.ppc.qpx.qvfrsp" => "__builtin_qpx_qvfrsp", + "llvm.ppc.qpx.qvfrsqrte" => "__builtin_qpx_qvfrsqrte", + "llvm.ppc.qpx.qvfrsqrtes" => "__builtin_qpx_qvfrsqrtes", + "llvm.ppc.qpx.qvfsel" => "__builtin_qpx_qvfsel", + "llvm.ppc.qpx.qvfsub" => "__builtin_qpx_qvfsub", + "llvm.ppc.qpx.qvfsubs" => "__builtin_qpx_qvfsubs", + "llvm.ppc.qpx.qvftstnan" => "__builtin_qpx_qvftstnan", + "llvm.ppc.qpx.qvfxmadd" => "__builtin_qpx_qvfxmadd", + "llvm.ppc.qpx.qvfxmadds" => "__builtin_qpx_qvfxmadds", + "llvm.ppc.qpx.qvfxmul" => "__builtin_qpx_qvfxmul", + "llvm.ppc.qpx.qvfxmuls" => "__builtin_qpx_qvfxmuls", + "llvm.ppc.qpx.qvfxxcpnmadd" => "__builtin_qpx_qvfxxcpnmadd", + "llvm.ppc.qpx.qvfxxcpnmadds" => "__builtin_qpx_qvfxxcpnmadds", + "llvm.ppc.qpx.qvfxxmadd" => "__builtin_qpx_qvfxxmadd", + "llvm.ppc.qpx.qvfxxmadds" => "__builtin_qpx_qvfxxmadds", + "llvm.ppc.qpx.qvfxxnpmadd" => "__builtin_qpx_qvfxxnpmadd", + "llvm.ppc.qpx.qvfxxnpmadds" => "__builtin_qpx_qvfxxnpmadds", + "llvm.ppc.qpx.qvgpci" => "__builtin_qpx_qvgpci", + "llvm.ppc.qpx.qvlfcd" => "__builtin_qpx_qvlfcd", + "llvm.ppc.qpx.qvlfcda" => "__builtin_qpx_qvlfcda", + "llvm.ppc.qpx.qvlfcs" => "__builtin_qpx_qvlfcs", + "llvm.ppc.qpx.qvlfcsa" => "__builtin_qpx_qvlfcsa", + "llvm.ppc.qpx.qvlfd" => "__builtin_qpx_qvlfd", + "llvm.ppc.qpx.qvlfda" => "__builtin_qpx_qvlfda", + "llvm.ppc.qpx.qvlfiwa" => "__builtin_qpx_qvlfiwa", + "llvm.ppc.qpx.qvlfiwaa" => "__builtin_qpx_qvlfiwaa", + "llvm.ppc.qpx.qvlfiwz" => "__builtin_qpx_qvlfiwz", + "llvm.ppc.qpx.qvlfiwza" => "__builtin_qpx_qvlfiwza", + "llvm.ppc.qpx.qvlfs" => "__builtin_qpx_qvlfs", + "llvm.ppc.qpx.qvlfsa" => "__builtin_qpx_qvlfsa", + "llvm.ppc.qpx.qvlpcld" => "__builtin_qpx_qvlpcld", + "llvm.ppc.qpx.qvlpcls" => "__builtin_qpx_qvlpcls", + "llvm.ppc.qpx.qvlpcrd" => "__builtin_qpx_qvlpcrd", + "llvm.ppc.qpx.qvlpcrs" => "__builtin_qpx_qvlpcrs", + "llvm.ppc.qpx.qvstfcd" => "__builtin_qpx_qvstfcd", + "llvm.ppc.qpx.qvstfcda" => "__builtin_qpx_qvstfcda", + "llvm.ppc.qpx.qvstfcs" => "__builtin_qpx_qvstfcs", + "llvm.ppc.qpx.qvstfcsa" => "__builtin_qpx_qvstfcsa", + "llvm.ppc.qpx.qvstfd" => "__builtin_qpx_qvstfd", + "llvm.ppc.qpx.qvstfda" => "__builtin_qpx_qvstfda", + "llvm.ppc.qpx.qvstfiw" => "__builtin_qpx_qvstfiw", + "llvm.ppc.qpx.qvstfiwa" => "__builtin_qpx_qvstfiwa", + "llvm.ppc.qpx.qvstfs" => "__builtin_qpx_qvstfs", + "llvm.ppc.qpx.qvstfsa" => "__builtin_qpx_qvstfsa", + "llvm.ppc.readflm" => "__builtin_readflm", + "llvm.ppc.scalar.extract.expq" => "__builtin_vsx_scalar_extract_expq", + "llvm.ppc.scalar.insert.exp.qp" => "__builtin_vsx_scalar_insert_exp_qp", + "llvm.ppc.set.texasr" => "__builtin_set_texasr", + "llvm.ppc.set.texasru" => "__builtin_set_texasru", + "llvm.ppc.set.tfhar" => "__builtin_set_tfhar", + "llvm.ppc.set.tfiar" => "__builtin_set_tfiar", + "llvm.ppc.setb" => "__builtin_ppc_setb", + "llvm.ppc.setflm" => "__builtin_setflm", + "llvm.ppc.setrnd" => "__builtin_setrnd", + "llvm.ppc.sqrtf128.round.to.odd" => "__builtin_sqrtf128_round_to_odd", + "llvm.ppc.stbcx" => "__builtin_ppc_stbcx", + "llvm.ppc.stdcx" => "__builtin_ppc_stdcx", + "llvm.ppc.stfiw" => "__builtin_ppc_stfiw", + "llvm.ppc.store2r" => "__builtin_ppc_store2r", + "llvm.ppc.store4r" => "__builtin_ppc_store4r", + "llvm.ppc.store8r" => "__builtin_ppc_store8r", + "llvm.ppc.stwcx" => "__builtin_ppc_stwcx", + "llvm.ppc.subf128.round.to.odd" => "__builtin_subf128_round_to_odd", + "llvm.ppc.sync" => "__builtin_ppc_sync", + "llvm.ppc.tabort" => "__builtin_tabort", + "llvm.ppc.tabortdc" => "__builtin_tabortdc", + "llvm.ppc.tabortdci" => "__builtin_tabortdci", + "llvm.ppc.tabortwc" => "__builtin_tabortwc", + "llvm.ppc.tabortwci" => "__builtin_tabortwci", + "llvm.ppc.tbegin" => "__builtin_tbegin", + "llvm.ppc.tcheck" => "__builtin_tcheck", + "llvm.ppc.tdw" => "__builtin_ppc_tdw", + "llvm.ppc.tend" => "__builtin_tend", + "llvm.ppc.tendall" => "__builtin_tendall", + "llvm.ppc.trap" => "__builtin_ppc_trap", + "llvm.ppc.trapd" => "__builtin_ppc_trapd", + "llvm.ppc.trechkpt" => "__builtin_trechkpt", + "llvm.ppc.treclaim" => "__builtin_treclaim", + "llvm.ppc.tresume" => "__builtin_tresume", + "llvm.ppc.truncf128.round.to.odd" => "__builtin_truncf128_round_to_odd", + "llvm.ppc.tsr" => "__builtin_tsr", + "llvm.ppc.tsuspend" => "__builtin_tsuspend", + "llvm.ppc.ttest" => "__builtin_ttest", + "llvm.ppc.tw" => "__builtin_ppc_tw", + "llvm.ppc.unpack.longdouble" => "__builtin_unpack_longdouble", + "llvm.ppc.vsx.xsmaxdp" => "__builtin_vsx_xsmaxdp", + "llvm.ppc.vsx.xsmindp" => "__builtin_vsx_xsmindp", + "llvm.ppc.vsx.xvcmpeqdp" => "__builtin_vsx_xvcmpeqdp", + "llvm.ppc.vsx.xvcmpeqdp.p" => "__builtin_vsx_xvcmpeqdp_p", + "llvm.ppc.vsx.xvcmpeqsp" => "__builtin_vsx_xvcmpeqsp", + "llvm.ppc.vsx.xvcmpeqsp.p" => "__builtin_vsx_xvcmpeqsp_p", + "llvm.ppc.vsx.xvcmpgedp" => "__builtin_vsx_xvcmpgedp", + "llvm.ppc.vsx.xvcmpgedp.p" => "__builtin_vsx_xvcmpgedp_p", + "llvm.ppc.vsx.xvcmpgesp" => "__builtin_vsx_xvcmpgesp", + "llvm.ppc.vsx.xvcmpgesp.p" => "__builtin_vsx_xvcmpgesp_p", + "llvm.ppc.vsx.xvcmpgtdp" => "__builtin_vsx_xvcmpgtdp", + "llvm.ppc.vsx.xvcmpgtdp.p" => "__builtin_vsx_xvcmpgtdp_p", + "llvm.ppc.vsx.xvcmpgtsp" => "__builtin_vsx_xvcmpgtsp", + "llvm.ppc.vsx.xvcmpgtsp.p" => "__builtin_vsx_xvcmpgtsp_p", + "llvm.ppc.vsx.xvcvbf16spn" => "__builtin_vsx_xvcvbf16spn", + "llvm.ppc.vsx.xvcvdpsp" => "__builtin_vsx_xvcvdpsp", + "llvm.ppc.vsx.xvcvdpsxws" => "__builtin_vsx_xvcvdpsxws", + "llvm.ppc.vsx.xvcvdpuxws" => "__builtin_vsx_xvcvdpuxws", + "llvm.ppc.vsx.xvcvhpsp" => "__builtin_vsx_xvcvhpsp", + "llvm.ppc.vsx.xvcvspbf16" => "__builtin_vsx_xvcvspbf16", + "llvm.ppc.vsx.xvcvspdp" => "__builtin_vsx_xvcvspdp", + "llvm.ppc.vsx.xvcvsphp" => "__builtin_vsx_xvcvsphp", + "llvm.ppc.vsx.xvcvspsxds" => "__builtin_vsx_xvcvspsxds", + "llvm.ppc.vsx.xvcvspuxds" => "__builtin_vsx_xvcvspuxds", + "llvm.ppc.vsx.xvcvsxdsp" => "__builtin_vsx_xvcvsxdsp", + "llvm.ppc.vsx.xvcvsxwdp" => "__builtin_vsx_xvcvsxwdp", + "llvm.ppc.vsx.xvcvuxdsp" => "__builtin_vsx_xvcvuxdsp", + "llvm.ppc.vsx.xvcvuxwdp" => "__builtin_vsx_xvcvuxwdp", + "llvm.ppc.vsx.xvdivdp" => "__builtin_vsx_xvdivdp", + "llvm.ppc.vsx.xvdivsp" => "__builtin_vsx_xvdivsp", + "llvm.ppc.vsx.xviexpdp" => "__builtin_vsx_xviexpdp", + "llvm.ppc.vsx.xviexpsp" => "__builtin_vsx_xviexpsp", + "llvm.ppc.vsx.xvmaxdp" => "__builtin_vsx_xvmaxdp", + "llvm.ppc.vsx.xvmaxsp" => "__builtin_vsx_xvmaxsp", + "llvm.ppc.vsx.xvmindp" => "__builtin_vsx_xvmindp", + "llvm.ppc.vsx.xvminsp" => "__builtin_vsx_xvminsp", + "llvm.ppc.vsx.xvredp" => "__builtin_vsx_xvredp", + "llvm.ppc.vsx.xvresp" => "__builtin_vsx_xvresp", + "llvm.ppc.vsx.xvrsqrtedp" => "__builtin_vsx_xvrsqrtedp", + "llvm.ppc.vsx.xvrsqrtesp" => "__builtin_vsx_xvrsqrtesp", + "llvm.ppc.vsx.xvtdivdp" => "__builtin_vsx_xvtdivdp", + "llvm.ppc.vsx.xvtdivsp" => "__builtin_vsx_xvtdivsp", + "llvm.ppc.vsx.xvtlsbb" => "__builtin_vsx_xvtlsbb", + "llvm.ppc.vsx.xvtsqrtdp" => "__builtin_vsx_xvtsqrtdp", + "llvm.ppc.vsx.xvtsqrtsp" => "__builtin_vsx_xvtsqrtsp", + "llvm.ppc.vsx.xvtstdcdp" => "__builtin_vsx_xvtstdcdp", + "llvm.ppc.vsx.xvtstdcsp" => "__builtin_vsx_xvtstdcsp", + "llvm.ppc.vsx.xvxexpdp" => "__builtin_vsx_xvxexpdp", + "llvm.ppc.vsx.xvxexpsp" => "__builtin_vsx_xvxexpsp", + "llvm.ppc.vsx.xvxsigdp" => "__builtin_vsx_xvxsigdp", + "llvm.ppc.vsx.xvxsigsp" => "__builtin_vsx_xvxsigsp", + "llvm.ppc.vsx.xxblendvb" => "__builtin_vsx_xxblendvb", + "llvm.ppc.vsx.xxblendvd" => "__builtin_vsx_xxblendvd", + "llvm.ppc.vsx.xxblendvh" => "__builtin_vsx_xxblendvh", + "llvm.ppc.vsx.xxblendvw" => "__builtin_vsx_xxblendvw", + "llvm.ppc.vsx.xxeval" => "__builtin_vsx_xxeval", + "llvm.ppc.vsx.xxextractuw" => "__builtin_vsx_xxextractuw", + "llvm.ppc.vsx.xxgenpcvbm" => "__builtin_vsx_xxgenpcvbm", + "llvm.ppc.vsx.xxgenpcvdm" => "__builtin_vsx_xxgenpcvdm", + "llvm.ppc.vsx.xxgenpcvhm" => "__builtin_vsx_xxgenpcvhm", + "llvm.ppc.vsx.xxgenpcvwm" => "__builtin_vsx_xxgenpcvwm", + "llvm.ppc.vsx.xxinsertw" => "__builtin_vsx_xxinsertw", + "llvm.ppc.vsx.xxleqv" => "__builtin_vsx_xxleqv", + "llvm.ppc.vsx.xxpermx" => "__builtin_vsx_xxpermx", + // ptx + "llvm.ptx.bar.sync" => "__builtin_ptx_bar_sync", + "llvm.ptx.read.clock" => "__builtin_ptx_read_clock", + "llvm.ptx.read.clock64" => "__builtin_ptx_read_clock64", + "llvm.ptx.read.gridid" => "__builtin_ptx_read_gridid", + "llvm.ptx.read.laneid" => "__builtin_ptx_read_laneid", + "llvm.ptx.read.lanemask.eq" => "__builtin_ptx_read_lanemask_eq", + "llvm.ptx.read.lanemask.ge" => "__builtin_ptx_read_lanemask_ge", + "llvm.ptx.read.lanemask.gt" => "__builtin_ptx_read_lanemask_gt", + "llvm.ptx.read.lanemask.le" => "__builtin_ptx_read_lanemask_le", + "llvm.ptx.read.lanemask.lt" => "__builtin_ptx_read_lanemask_lt", + "llvm.ptx.read.nsmid" => "__builtin_ptx_read_nsmid", + "llvm.ptx.read.nwarpid" => "__builtin_ptx_read_nwarpid", + "llvm.ptx.read.pm0" => "__builtin_ptx_read_pm0", + "llvm.ptx.read.pm1" => "__builtin_ptx_read_pm1", + "llvm.ptx.read.pm2" => "__builtin_ptx_read_pm2", + "llvm.ptx.read.pm3" => "__builtin_ptx_read_pm3", + "llvm.ptx.read.smid" => "__builtin_ptx_read_smid", + "llvm.ptx.read.warpid" => "__builtin_ptx_read_warpid", + // r600 + "llvm.r600.group.barrier" => "__builtin_r600_group_barrier", + "llvm.r600.implicitarg.ptr" => "__builtin_r600_implicitarg_ptr", + "llvm.r600.rat.store.typed" => "__builtin_r600_rat_store_typed", + "llvm.r600.read.global.size.x" => "__builtin_r600_read_global_size_x", + "llvm.r600.read.global.size.y" => "__builtin_r600_read_global_size_y", + "llvm.r600.read.global.size.z" => "__builtin_r600_read_global_size_z", + "llvm.r600.read.ngroups.x" => "__builtin_r600_read_ngroups_x", + "llvm.r600.read.ngroups.y" => "__builtin_r600_read_ngroups_y", + "llvm.r600.read.ngroups.z" => "__builtin_r600_read_ngroups_z", + "llvm.r600.read.tgid.x" => "__builtin_r600_read_tgid_x", + "llvm.r600.read.tgid.y" => "__builtin_r600_read_tgid_y", + "llvm.r600.read.tgid.z" => "__builtin_r600_read_tgid_z", + // s390 + "llvm.s390.efpc" => "__builtin_s390_efpc", + "llvm.s390.etnd" => "__builtin_tx_nesting_depth", + "llvm.s390.lcbb" => "__builtin_s390_lcbb", + "llvm.s390.ppa.txassist" => "__builtin_tx_assist", + "llvm.s390.sfpc" => "__builtin_s390_sfpc", + "llvm.s390.tend" => "__builtin_tend", + "llvm.s390.vaccb" => "__builtin_s390_vaccb", + "llvm.s390.vacccq" => "__builtin_s390_vacccq", + "llvm.s390.vaccf" => "__builtin_s390_vaccf", + "llvm.s390.vaccg" => "__builtin_s390_vaccg", + "llvm.s390.vacch" => "__builtin_s390_vacch", + "llvm.s390.vaccq" => "__builtin_s390_vaccq", + "llvm.s390.vacq" => "__builtin_s390_vacq", + "llvm.s390.vaq" => "__builtin_s390_vaq", + "llvm.s390.vavgb" => "__builtin_s390_vavgb", + "llvm.s390.vavgf" => "__builtin_s390_vavgf", + "llvm.s390.vavgg" => "__builtin_s390_vavgg", + "llvm.s390.vavgh" => "__builtin_s390_vavgh", + "llvm.s390.vavglb" => "__builtin_s390_vavglb", + "llvm.s390.vavglf" => "__builtin_s390_vavglf", + "llvm.s390.vavglg" => "__builtin_s390_vavglg", + "llvm.s390.vavglh" => "__builtin_s390_vavglh", + "llvm.s390.vbperm" => "__builtin_s390_vbperm", + "llvm.s390.vcfn" => "__builtin_s390_vcfn", + "llvm.s390.vcksm" => "__builtin_s390_vcksm", + "llvm.s390.vclfnhs" => "__builtin_s390_vclfnhs", + "llvm.s390.vclfnls" => "__builtin_s390_vclfnls", + "llvm.s390.vcnf" => "__builtin_s390_vcnf", + "llvm.s390.vcrnfs" => "__builtin_s390_vcrnfs", + "llvm.s390.verimb" => "__builtin_s390_verimb", + "llvm.s390.verimf" => "__builtin_s390_verimf", + "llvm.s390.verimg" => "__builtin_s390_verimg", + "llvm.s390.verimh" => "__builtin_s390_verimh", + "llvm.s390.verllb" => "__builtin_s390_verllb", + "llvm.s390.verllf" => "__builtin_s390_verllf", + "llvm.s390.verllg" => "__builtin_s390_verllg", + "llvm.s390.verllh" => "__builtin_s390_verllh", + "llvm.s390.verllvb" => "__builtin_s390_verllvb", + "llvm.s390.verllvf" => "__builtin_s390_verllvf", + "llvm.s390.verllvg" => "__builtin_s390_verllvg", + "llvm.s390.verllvh" => "__builtin_s390_verllvh", + "llvm.s390.vfaeb" => "__builtin_s390_vfaeb", + "llvm.s390.vfaef" => "__builtin_s390_vfaef", + "llvm.s390.vfaeh" => "__builtin_s390_vfaeh", + "llvm.s390.vfaezb" => "__builtin_s390_vfaezb", + "llvm.s390.vfaezf" => "__builtin_s390_vfaezf", + "llvm.s390.vfaezh" => "__builtin_s390_vfaezh", + "llvm.s390.vfeeb" => "__builtin_s390_vfeeb", + "llvm.s390.vfeef" => "__builtin_s390_vfeef", + "llvm.s390.vfeeh" => "__builtin_s390_vfeeh", + "llvm.s390.vfeezb" => "__builtin_s390_vfeezb", + "llvm.s390.vfeezf" => "__builtin_s390_vfeezf", + "llvm.s390.vfeezh" => "__builtin_s390_vfeezh", + "llvm.s390.vfeneb" => "__builtin_s390_vfeneb", + "llvm.s390.vfenef" => "__builtin_s390_vfenef", + "llvm.s390.vfeneh" => "__builtin_s390_vfeneh", + "llvm.s390.vfenezb" => "__builtin_s390_vfenezb", + "llvm.s390.vfenezf" => "__builtin_s390_vfenezf", + "llvm.s390.vfenezh" => "__builtin_s390_vfenezh", + "llvm.s390.vgfmab" => "__builtin_s390_vgfmab", + "llvm.s390.vgfmaf" => "__builtin_s390_vgfmaf", + "llvm.s390.vgfmag" => "__builtin_s390_vgfmag", + "llvm.s390.vgfmah" => "__builtin_s390_vgfmah", + "llvm.s390.vgfmb" => "__builtin_s390_vgfmb", + "llvm.s390.vgfmf" => "__builtin_s390_vgfmf", + "llvm.s390.vgfmg" => "__builtin_s390_vgfmg", + "llvm.s390.vgfmh" => "__builtin_s390_vgfmh", + "llvm.s390.vistrb" => "__builtin_s390_vistrb", + "llvm.s390.vistrf" => "__builtin_s390_vistrf", + "llvm.s390.vistrh" => "__builtin_s390_vistrh", + "llvm.s390.vlbb" => "__builtin_s390_vlbb", + "llvm.s390.vll" => "__builtin_s390_vll", + "llvm.s390.vlrl" => "__builtin_s390_vlrl", + "llvm.s390.vmaeb" => "__builtin_s390_vmaeb", + "llvm.s390.vmaef" => "__builtin_s390_vmaef", + "llvm.s390.vmaeh" => "__builtin_s390_vmaeh", + "llvm.s390.vmahb" => "__builtin_s390_vmahb", + "llvm.s390.vmahf" => "__builtin_s390_vmahf", + "llvm.s390.vmahh" => "__builtin_s390_vmahh", + "llvm.s390.vmaleb" => "__builtin_s390_vmaleb", + "llvm.s390.vmalef" => "__builtin_s390_vmalef", + "llvm.s390.vmaleh" => "__builtin_s390_vmaleh", + "llvm.s390.vmalhb" => "__builtin_s390_vmalhb", + "llvm.s390.vmalhf" => "__builtin_s390_vmalhf", + "llvm.s390.vmalhh" => "__builtin_s390_vmalhh", + "llvm.s390.vmalob" => "__builtin_s390_vmalob", + "llvm.s390.vmalof" => "__builtin_s390_vmalof", + "llvm.s390.vmaloh" => "__builtin_s390_vmaloh", + "llvm.s390.vmaob" => "__builtin_s390_vmaob", + "llvm.s390.vmaof" => "__builtin_s390_vmaof", + "llvm.s390.vmaoh" => "__builtin_s390_vmaoh", + "llvm.s390.vmeb" => "__builtin_s390_vmeb", + "llvm.s390.vmef" => "__builtin_s390_vmef", + "llvm.s390.vmeh" => "__builtin_s390_vmeh", + "llvm.s390.vmhb" => "__builtin_s390_vmhb", + "llvm.s390.vmhf" => "__builtin_s390_vmhf", + "llvm.s390.vmhh" => "__builtin_s390_vmhh", + "llvm.s390.vmleb" => "__builtin_s390_vmleb", + "llvm.s390.vmlef" => "__builtin_s390_vmlef", + "llvm.s390.vmleh" => "__builtin_s390_vmleh", + "llvm.s390.vmlhb" => "__builtin_s390_vmlhb", + "llvm.s390.vmlhf" => "__builtin_s390_vmlhf", + "llvm.s390.vmlhh" => "__builtin_s390_vmlhh", + "llvm.s390.vmlob" => "__builtin_s390_vmlob", + "llvm.s390.vmlof" => "__builtin_s390_vmlof", + "llvm.s390.vmloh" => "__builtin_s390_vmloh", + "llvm.s390.vmob" => "__builtin_s390_vmob", + "llvm.s390.vmof" => "__builtin_s390_vmof", + "llvm.s390.vmoh" => "__builtin_s390_vmoh", + "llvm.s390.vmslg" => "__builtin_s390_vmslg", + "llvm.s390.vpdi" => "__builtin_s390_vpdi", + "llvm.s390.vperm" => "__builtin_s390_vperm", + "llvm.s390.vpklsf" => "__builtin_s390_vpklsf", + "llvm.s390.vpklsg" => "__builtin_s390_vpklsg", + "llvm.s390.vpklsh" => "__builtin_s390_vpklsh", + "llvm.s390.vpksf" => "__builtin_s390_vpksf", + "llvm.s390.vpksg" => "__builtin_s390_vpksg", + "llvm.s390.vpksh" => "__builtin_s390_vpksh", + "llvm.s390.vsbcbiq" => "__builtin_s390_vsbcbiq", + "llvm.s390.vsbiq" => "__builtin_s390_vsbiq", + "llvm.s390.vscbib" => "__builtin_s390_vscbib", + "llvm.s390.vscbif" => "__builtin_s390_vscbif", + "llvm.s390.vscbig" => "__builtin_s390_vscbig", + "llvm.s390.vscbih" => "__builtin_s390_vscbih", + "llvm.s390.vscbiq" => "__builtin_s390_vscbiq", + "llvm.s390.vsl" => "__builtin_s390_vsl", + "llvm.s390.vslb" => "__builtin_s390_vslb", + "llvm.s390.vsld" => "__builtin_s390_vsld", + "llvm.s390.vsldb" => "__builtin_s390_vsldb", + "llvm.s390.vsq" => "__builtin_s390_vsq", + "llvm.s390.vsra" => "__builtin_s390_vsra", + "llvm.s390.vsrab" => "__builtin_s390_vsrab", + "llvm.s390.vsrd" => "__builtin_s390_vsrd", + "llvm.s390.vsrl" => "__builtin_s390_vsrl", + "llvm.s390.vsrlb" => "__builtin_s390_vsrlb", + "llvm.s390.vstl" => "__builtin_s390_vstl", + "llvm.s390.vstrcb" => "__builtin_s390_vstrcb", + "llvm.s390.vstrcf" => "__builtin_s390_vstrcf", + "llvm.s390.vstrch" => "__builtin_s390_vstrch", + "llvm.s390.vstrczb" => "__builtin_s390_vstrczb", + "llvm.s390.vstrczf" => "__builtin_s390_vstrczf", + "llvm.s390.vstrczh" => "__builtin_s390_vstrczh", + "llvm.s390.vstrl" => "__builtin_s390_vstrl", + "llvm.s390.vsumb" => "__builtin_s390_vsumb", + "llvm.s390.vsumgf" => "__builtin_s390_vsumgf", + "llvm.s390.vsumgh" => "__builtin_s390_vsumgh", + "llvm.s390.vsumh" => "__builtin_s390_vsumh", + "llvm.s390.vsumqf" => "__builtin_s390_vsumqf", + "llvm.s390.vsumqg" => "__builtin_s390_vsumqg", + "llvm.s390.vtm" => "__builtin_s390_vtm", + "llvm.s390.vuphb" => "__builtin_s390_vuphb", + "llvm.s390.vuphf" => "__builtin_s390_vuphf", + "llvm.s390.vuphh" => "__builtin_s390_vuphh", + "llvm.s390.vuplb" => "__builtin_s390_vuplb", + "llvm.s390.vuplf" => "__builtin_s390_vuplf", + "llvm.s390.vuplhb" => "__builtin_s390_vuplhb", + "llvm.s390.vuplhf" => "__builtin_s390_vuplhf", + "llvm.s390.vuplhh" => "__builtin_s390_vuplhh", + "llvm.s390.vuplhw" => "__builtin_s390_vuplhw", + "llvm.s390.vupllb" => "__builtin_s390_vupllb", + "llvm.s390.vupllf" => "__builtin_s390_vupllf", + "llvm.s390.vupllh" => "__builtin_s390_vupllh", + // ve + "llvm.ve.vl.andm.MMM" => "__builtin_ve_vl_andm_MMM", + "llvm.ve.vl.andm.mmm" => "__builtin_ve_vl_andm_mmm", + "llvm.ve.vl.eqvm.MMM" => "__builtin_ve_vl_eqvm_MMM", + "llvm.ve.vl.eqvm.mmm" => "__builtin_ve_vl_eqvm_mmm", + "llvm.ve.vl.extract.vm512l" => "__builtin_ve_vl_extract_vm512l", + "llvm.ve.vl.extract.vm512u" => "__builtin_ve_vl_extract_vm512u", + "llvm.ve.vl.fencec.s" => "__builtin_ve_vl_fencec_s", + "llvm.ve.vl.fencei" => "__builtin_ve_vl_fencei", + "llvm.ve.vl.fencem.s" => "__builtin_ve_vl_fencem_s", + "llvm.ve.vl.fidcr.sss" => "__builtin_ve_vl_fidcr_sss", + "llvm.ve.vl.insert.vm512l" => "__builtin_ve_vl_insert_vm512l", + "llvm.ve.vl.insert.vm512u" => "__builtin_ve_vl_insert_vm512u", + "llvm.ve.vl.lcr.sss" => "__builtin_ve_vl_lcr_sss", + "llvm.ve.vl.lsv.vvss" => "__builtin_ve_vl_lsv_vvss", + "llvm.ve.vl.lvm.MMss" => "__builtin_ve_vl_lvm_MMss", + "llvm.ve.vl.lvm.mmss" => "__builtin_ve_vl_lvm_mmss", + "llvm.ve.vl.lvsd.svs" => "__builtin_ve_vl_lvsd_svs", + "llvm.ve.vl.lvsl.svs" => "__builtin_ve_vl_lvsl_svs", + "llvm.ve.vl.lvss.svs" => "__builtin_ve_vl_lvss_svs", + "llvm.ve.vl.lzvm.sml" => "__builtin_ve_vl_lzvm_sml", + "llvm.ve.vl.negm.MM" => "__builtin_ve_vl_negm_MM", + "llvm.ve.vl.negm.mm" => "__builtin_ve_vl_negm_mm", + "llvm.ve.vl.nndm.MMM" => "__builtin_ve_vl_nndm_MMM", + "llvm.ve.vl.nndm.mmm" => "__builtin_ve_vl_nndm_mmm", + "llvm.ve.vl.orm.MMM" => "__builtin_ve_vl_orm_MMM", + "llvm.ve.vl.orm.mmm" => "__builtin_ve_vl_orm_mmm", + "llvm.ve.vl.pack.f32a" => "__builtin_ve_vl_pack_f32a", + "llvm.ve.vl.pack.f32p" => "__builtin_ve_vl_pack_f32p", + "llvm.ve.vl.pcvm.sml" => "__builtin_ve_vl_pcvm_sml", + "llvm.ve.vl.pfchv.ssl" => "__builtin_ve_vl_pfchv_ssl", + "llvm.ve.vl.pfchvnc.ssl" => "__builtin_ve_vl_pfchvnc_ssl", + "llvm.ve.vl.pvadds.vsvMvl" => "__builtin_ve_vl_pvadds_vsvMvl", + "llvm.ve.vl.pvadds.vsvl" => "__builtin_ve_vl_pvadds_vsvl", + "llvm.ve.vl.pvadds.vsvvl" => "__builtin_ve_vl_pvadds_vsvvl", + "llvm.ve.vl.pvadds.vvvMvl" => "__builtin_ve_vl_pvadds_vvvMvl", + "llvm.ve.vl.pvadds.vvvl" => "__builtin_ve_vl_pvadds_vvvl", + "llvm.ve.vl.pvadds.vvvvl" => "__builtin_ve_vl_pvadds_vvvvl", + "llvm.ve.vl.pvaddu.vsvMvl" => "__builtin_ve_vl_pvaddu_vsvMvl", + "llvm.ve.vl.pvaddu.vsvl" => "__builtin_ve_vl_pvaddu_vsvl", + "llvm.ve.vl.pvaddu.vsvvl" => "__builtin_ve_vl_pvaddu_vsvvl", + "llvm.ve.vl.pvaddu.vvvMvl" => "__builtin_ve_vl_pvaddu_vvvMvl", + "llvm.ve.vl.pvaddu.vvvl" => "__builtin_ve_vl_pvaddu_vvvl", + "llvm.ve.vl.pvaddu.vvvvl" => "__builtin_ve_vl_pvaddu_vvvvl", + "llvm.ve.vl.pvand.vsvMvl" => "__builtin_ve_vl_pvand_vsvMvl", + "llvm.ve.vl.pvand.vsvl" => "__builtin_ve_vl_pvand_vsvl", + "llvm.ve.vl.pvand.vsvvl" => "__builtin_ve_vl_pvand_vsvvl", + "llvm.ve.vl.pvand.vvvMvl" => "__builtin_ve_vl_pvand_vvvMvl", + "llvm.ve.vl.pvand.vvvl" => "__builtin_ve_vl_pvand_vvvl", + "llvm.ve.vl.pvand.vvvvl" => "__builtin_ve_vl_pvand_vvvvl", + "llvm.ve.vl.pvbrd.vsMvl" => "__builtin_ve_vl_pvbrd_vsMvl", + "llvm.ve.vl.pvbrd.vsl" => "__builtin_ve_vl_pvbrd_vsl", + "llvm.ve.vl.pvbrd.vsvl" => "__builtin_ve_vl_pvbrd_vsvl", + "llvm.ve.vl.pvbrv.vvMvl" => "__builtin_ve_vl_pvbrv_vvMvl", + "llvm.ve.vl.pvbrv.vvl" => "__builtin_ve_vl_pvbrv_vvl", + "llvm.ve.vl.pvbrv.vvvl" => "__builtin_ve_vl_pvbrv_vvvl", + "llvm.ve.vl.pvbrvlo.vvl" => "__builtin_ve_vl_pvbrvlo_vvl", + "llvm.ve.vl.pvbrvlo.vvmvl" => "__builtin_ve_vl_pvbrvlo_vvmvl", + "llvm.ve.vl.pvbrvlo.vvvl" => "__builtin_ve_vl_pvbrvlo_vvvl", + "llvm.ve.vl.pvbrvup.vvl" => "__builtin_ve_vl_pvbrvup_vvl", + "llvm.ve.vl.pvbrvup.vvmvl" => "__builtin_ve_vl_pvbrvup_vvmvl", + "llvm.ve.vl.pvbrvup.vvvl" => "__builtin_ve_vl_pvbrvup_vvvl", + "llvm.ve.vl.pvcmps.vsvMvl" => "__builtin_ve_vl_pvcmps_vsvMvl", + "llvm.ve.vl.pvcmps.vsvl" => "__builtin_ve_vl_pvcmps_vsvl", + "llvm.ve.vl.pvcmps.vsvvl" => "__builtin_ve_vl_pvcmps_vsvvl", + "llvm.ve.vl.pvcmps.vvvMvl" => "__builtin_ve_vl_pvcmps_vvvMvl", + "llvm.ve.vl.pvcmps.vvvl" => "__builtin_ve_vl_pvcmps_vvvl", + "llvm.ve.vl.pvcmps.vvvvl" => "__builtin_ve_vl_pvcmps_vvvvl", + "llvm.ve.vl.pvcmpu.vsvMvl" => "__builtin_ve_vl_pvcmpu_vsvMvl", + "llvm.ve.vl.pvcmpu.vsvl" => "__builtin_ve_vl_pvcmpu_vsvl", + "llvm.ve.vl.pvcmpu.vsvvl" => "__builtin_ve_vl_pvcmpu_vsvvl", + "llvm.ve.vl.pvcmpu.vvvMvl" => "__builtin_ve_vl_pvcmpu_vvvMvl", + "llvm.ve.vl.pvcmpu.vvvl" => "__builtin_ve_vl_pvcmpu_vvvl", + "llvm.ve.vl.pvcmpu.vvvvl" => "__builtin_ve_vl_pvcmpu_vvvvl", + "llvm.ve.vl.pvcvtsw.vvl" => "__builtin_ve_vl_pvcvtsw_vvl", + "llvm.ve.vl.pvcvtsw.vvvl" => "__builtin_ve_vl_pvcvtsw_vvvl", + "llvm.ve.vl.pvcvtws.vvMvl" => "__builtin_ve_vl_pvcvtws_vvMvl", + "llvm.ve.vl.pvcvtws.vvl" => "__builtin_ve_vl_pvcvtws_vvl", + "llvm.ve.vl.pvcvtws.vvvl" => "__builtin_ve_vl_pvcvtws_vvvl", + "llvm.ve.vl.pvcvtwsrz.vvMvl" => "__builtin_ve_vl_pvcvtwsrz_vvMvl", + "llvm.ve.vl.pvcvtwsrz.vvl" => "__builtin_ve_vl_pvcvtwsrz_vvl", + "llvm.ve.vl.pvcvtwsrz.vvvl" => "__builtin_ve_vl_pvcvtwsrz_vvvl", + "llvm.ve.vl.pveqv.vsvMvl" => "__builtin_ve_vl_pveqv_vsvMvl", + "llvm.ve.vl.pveqv.vsvl" => "__builtin_ve_vl_pveqv_vsvl", + "llvm.ve.vl.pveqv.vsvvl" => "__builtin_ve_vl_pveqv_vsvvl", + "llvm.ve.vl.pveqv.vvvMvl" => "__builtin_ve_vl_pveqv_vvvMvl", + "llvm.ve.vl.pveqv.vvvl" => "__builtin_ve_vl_pveqv_vvvl", + "llvm.ve.vl.pveqv.vvvvl" => "__builtin_ve_vl_pveqv_vvvvl", + "llvm.ve.vl.pvfadd.vsvMvl" => "__builtin_ve_vl_pvfadd_vsvMvl", + "llvm.ve.vl.pvfadd.vsvl" => "__builtin_ve_vl_pvfadd_vsvl", + "llvm.ve.vl.pvfadd.vsvvl" => "__builtin_ve_vl_pvfadd_vsvvl", + "llvm.ve.vl.pvfadd.vvvMvl" => "__builtin_ve_vl_pvfadd_vvvMvl", + "llvm.ve.vl.pvfadd.vvvl" => "__builtin_ve_vl_pvfadd_vvvl", + "llvm.ve.vl.pvfadd.vvvvl" => "__builtin_ve_vl_pvfadd_vvvvl", + "llvm.ve.vl.pvfcmp.vsvMvl" => "__builtin_ve_vl_pvfcmp_vsvMvl", + "llvm.ve.vl.pvfcmp.vsvl" => "__builtin_ve_vl_pvfcmp_vsvl", + "llvm.ve.vl.pvfcmp.vsvvl" => "__builtin_ve_vl_pvfcmp_vsvvl", + "llvm.ve.vl.pvfcmp.vvvMvl" => "__builtin_ve_vl_pvfcmp_vvvMvl", + "llvm.ve.vl.pvfcmp.vvvl" => "__builtin_ve_vl_pvfcmp_vvvl", + "llvm.ve.vl.pvfcmp.vvvvl" => "__builtin_ve_vl_pvfcmp_vvvvl", + "llvm.ve.vl.pvfmad.vsvvMvl" => "__builtin_ve_vl_pvfmad_vsvvMvl", + "llvm.ve.vl.pvfmad.vsvvl" => "__builtin_ve_vl_pvfmad_vsvvl", + "llvm.ve.vl.pvfmad.vsvvvl" => "__builtin_ve_vl_pvfmad_vsvvvl", + "llvm.ve.vl.pvfmad.vvsvMvl" => "__builtin_ve_vl_pvfmad_vvsvMvl", + "llvm.ve.vl.pvfmad.vvsvl" => "__builtin_ve_vl_pvfmad_vvsvl", + "llvm.ve.vl.pvfmad.vvsvvl" => "__builtin_ve_vl_pvfmad_vvsvvl", + "llvm.ve.vl.pvfmad.vvvvMvl" => "__builtin_ve_vl_pvfmad_vvvvMvl", + "llvm.ve.vl.pvfmad.vvvvl" => "__builtin_ve_vl_pvfmad_vvvvl", + "llvm.ve.vl.pvfmad.vvvvvl" => "__builtin_ve_vl_pvfmad_vvvvvl", + "llvm.ve.vl.pvfmax.vsvMvl" => "__builtin_ve_vl_pvfmax_vsvMvl", + "llvm.ve.vl.pvfmax.vsvl" => "__builtin_ve_vl_pvfmax_vsvl", + "llvm.ve.vl.pvfmax.vsvvl" => "__builtin_ve_vl_pvfmax_vsvvl", + "llvm.ve.vl.pvfmax.vvvMvl" => "__builtin_ve_vl_pvfmax_vvvMvl", + "llvm.ve.vl.pvfmax.vvvl" => "__builtin_ve_vl_pvfmax_vvvl", + "llvm.ve.vl.pvfmax.vvvvl" => "__builtin_ve_vl_pvfmax_vvvvl", + "llvm.ve.vl.pvfmin.vsvMvl" => "__builtin_ve_vl_pvfmin_vsvMvl", + "llvm.ve.vl.pvfmin.vsvl" => "__builtin_ve_vl_pvfmin_vsvl", + "llvm.ve.vl.pvfmin.vsvvl" => "__builtin_ve_vl_pvfmin_vsvvl", + "llvm.ve.vl.pvfmin.vvvMvl" => "__builtin_ve_vl_pvfmin_vvvMvl", + "llvm.ve.vl.pvfmin.vvvl" => "__builtin_ve_vl_pvfmin_vvvl", + "llvm.ve.vl.pvfmin.vvvvl" => "__builtin_ve_vl_pvfmin_vvvvl", + "llvm.ve.vl.pvfmkaf.Ml" => "__builtin_ve_vl_pvfmkaf_Ml", + "llvm.ve.vl.pvfmkat.Ml" => "__builtin_ve_vl_pvfmkat_Ml", + "llvm.ve.vl.pvfmkseq.MvMl" => "__builtin_ve_vl_pvfmkseq_MvMl", + "llvm.ve.vl.pvfmkseq.Mvl" => "__builtin_ve_vl_pvfmkseq_Mvl", + "llvm.ve.vl.pvfmkseqnan.MvMl" => "__builtin_ve_vl_pvfmkseqnan_MvMl", + "llvm.ve.vl.pvfmkseqnan.Mvl" => "__builtin_ve_vl_pvfmkseqnan_Mvl", + "llvm.ve.vl.pvfmksge.MvMl" => "__builtin_ve_vl_pvfmksge_MvMl", + "llvm.ve.vl.pvfmksge.Mvl" => "__builtin_ve_vl_pvfmksge_Mvl", + "llvm.ve.vl.pvfmksgenan.MvMl" => "__builtin_ve_vl_pvfmksgenan_MvMl", + "llvm.ve.vl.pvfmksgenan.Mvl" => "__builtin_ve_vl_pvfmksgenan_Mvl", + "llvm.ve.vl.pvfmksgt.MvMl" => "__builtin_ve_vl_pvfmksgt_MvMl", + "llvm.ve.vl.pvfmksgt.Mvl" => "__builtin_ve_vl_pvfmksgt_Mvl", + "llvm.ve.vl.pvfmksgtnan.MvMl" => "__builtin_ve_vl_pvfmksgtnan_MvMl", + "llvm.ve.vl.pvfmksgtnan.Mvl" => "__builtin_ve_vl_pvfmksgtnan_Mvl", + "llvm.ve.vl.pvfmksle.MvMl" => "__builtin_ve_vl_pvfmksle_MvMl", + "llvm.ve.vl.pvfmksle.Mvl" => "__builtin_ve_vl_pvfmksle_Mvl", + "llvm.ve.vl.pvfmkslenan.MvMl" => "__builtin_ve_vl_pvfmkslenan_MvMl", + "llvm.ve.vl.pvfmkslenan.Mvl" => "__builtin_ve_vl_pvfmkslenan_Mvl", + "llvm.ve.vl.pvfmksloeq.mvl" => "__builtin_ve_vl_pvfmksloeq_mvl", + "llvm.ve.vl.pvfmksloeq.mvml" => "__builtin_ve_vl_pvfmksloeq_mvml", + "llvm.ve.vl.pvfmksloeqnan.mvl" => "__builtin_ve_vl_pvfmksloeqnan_mvl", + "llvm.ve.vl.pvfmksloeqnan.mvml" => "__builtin_ve_vl_pvfmksloeqnan_mvml", + "llvm.ve.vl.pvfmksloge.mvl" => "__builtin_ve_vl_pvfmksloge_mvl", + "llvm.ve.vl.pvfmksloge.mvml" => "__builtin_ve_vl_pvfmksloge_mvml", + "llvm.ve.vl.pvfmkslogenan.mvl" => "__builtin_ve_vl_pvfmkslogenan_mvl", + "llvm.ve.vl.pvfmkslogenan.mvml" => "__builtin_ve_vl_pvfmkslogenan_mvml", + "llvm.ve.vl.pvfmkslogt.mvl" => "__builtin_ve_vl_pvfmkslogt_mvl", + "llvm.ve.vl.pvfmkslogt.mvml" => "__builtin_ve_vl_pvfmkslogt_mvml", + "llvm.ve.vl.pvfmkslogtnan.mvl" => "__builtin_ve_vl_pvfmkslogtnan_mvl", + "llvm.ve.vl.pvfmkslogtnan.mvml" => "__builtin_ve_vl_pvfmkslogtnan_mvml", + "llvm.ve.vl.pvfmkslole.mvl" => "__builtin_ve_vl_pvfmkslole_mvl", + "llvm.ve.vl.pvfmkslole.mvml" => "__builtin_ve_vl_pvfmkslole_mvml", + "llvm.ve.vl.pvfmkslolenan.mvl" => "__builtin_ve_vl_pvfmkslolenan_mvl", + "llvm.ve.vl.pvfmkslolenan.mvml" => "__builtin_ve_vl_pvfmkslolenan_mvml", + "llvm.ve.vl.pvfmkslolt.mvl" => "__builtin_ve_vl_pvfmkslolt_mvl", + "llvm.ve.vl.pvfmkslolt.mvml" => "__builtin_ve_vl_pvfmkslolt_mvml", + "llvm.ve.vl.pvfmksloltnan.mvl" => "__builtin_ve_vl_pvfmksloltnan_mvl", + "llvm.ve.vl.pvfmksloltnan.mvml" => "__builtin_ve_vl_pvfmksloltnan_mvml", + "llvm.ve.vl.pvfmkslonan.mvl" => "__builtin_ve_vl_pvfmkslonan_mvl", + "llvm.ve.vl.pvfmkslonan.mvml" => "__builtin_ve_vl_pvfmkslonan_mvml", + "llvm.ve.vl.pvfmkslone.mvl" => "__builtin_ve_vl_pvfmkslone_mvl", + "llvm.ve.vl.pvfmkslone.mvml" => "__builtin_ve_vl_pvfmkslone_mvml", + "llvm.ve.vl.pvfmkslonenan.mvl" => "__builtin_ve_vl_pvfmkslonenan_mvl", + "llvm.ve.vl.pvfmkslonenan.mvml" => "__builtin_ve_vl_pvfmkslonenan_mvml", + "llvm.ve.vl.pvfmkslonum.mvl" => "__builtin_ve_vl_pvfmkslonum_mvl", + "llvm.ve.vl.pvfmkslonum.mvml" => "__builtin_ve_vl_pvfmkslonum_mvml", + "llvm.ve.vl.pvfmkslt.MvMl" => "__builtin_ve_vl_pvfmkslt_MvMl", + "llvm.ve.vl.pvfmkslt.Mvl" => "__builtin_ve_vl_pvfmkslt_Mvl", + "llvm.ve.vl.pvfmksltnan.MvMl" => "__builtin_ve_vl_pvfmksltnan_MvMl", + "llvm.ve.vl.pvfmksltnan.Mvl" => "__builtin_ve_vl_pvfmksltnan_Mvl", + "llvm.ve.vl.pvfmksnan.MvMl" => "__builtin_ve_vl_pvfmksnan_MvMl", + "llvm.ve.vl.pvfmksnan.Mvl" => "__builtin_ve_vl_pvfmksnan_Mvl", + "llvm.ve.vl.pvfmksne.MvMl" => "__builtin_ve_vl_pvfmksne_MvMl", + "llvm.ve.vl.pvfmksne.Mvl" => "__builtin_ve_vl_pvfmksne_Mvl", + "llvm.ve.vl.pvfmksnenan.MvMl" => "__builtin_ve_vl_pvfmksnenan_MvMl", + "llvm.ve.vl.pvfmksnenan.Mvl" => "__builtin_ve_vl_pvfmksnenan_Mvl", + "llvm.ve.vl.pvfmksnum.MvMl" => "__builtin_ve_vl_pvfmksnum_MvMl", + "llvm.ve.vl.pvfmksnum.Mvl" => "__builtin_ve_vl_pvfmksnum_Mvl", + "llvm.ve.vl.pvfmksupeq.mvl" => "__builtin_ve_vl_pvfmksupeq_mvl", + "llvm.ve.vl.pvfmksupeq.mvml" => "__builtin_ve_vl_pvfmksupeq_mvml", + "llvm.ve.vl.pvfmksupeqnan.mvl" => "__builtin_ve_vl_pvfmksupeqnan_mvl", + "llvm.ve.vl.pvfmksupeqnan.mvml" => "__builtin_ve_vl_pvfmksupeqnan_mvml", + "llvm.ve.vl.pvfmksupge.mvl" => "__builtin_ve_vl_pvfmksupge_mvl", + "llvm.ve.vl.pvfmksupge.mvml" => "__builtin_ve_vl_pvfmksupge_mvml", + "llvm.ve.vl.pvfmksupgenan.mvl" => "__builtin_ve_vl_pvfmksupgenan_mvl", + "llvm.ve.vl.pvfmksupgenan.mvml" => "__builtin_ve_vl_pvfmksupgenan_mvml", + "llvm.ve.vl.pvfmksupgt.mvl" => "__builtin_ve_vl_pvfmksupgt_mvl", + "llvm.ve.vl.pvfmksupgt.mvml" => "__builtin_ve_vl_pvfmksupgt_mvml", + "llvm.ve.vl.pvfmksupgtnan.mvl" => "__builtin_ve_vl_pvfmksupgtnan_mvl", + "llvm.ve.vl.pvfmksupgtnan.mvml" => "__builtin_ve_vl_pvfmksupgtnan_mvml", + "llvm.ve.vl.pvfmksuple.mvl" => "__builtin_ve_vl_pvfmksuple_mvl", + "llvm.ve.vl.pvfmksuple.mvml" => "__builtin_ve_vl_pvfmksuple_mvml", + "llvm.ve.vl.pvfmksuplenan.mvl" => "__builtin_ve_vl_pvfmksuplenan_mvl", + "llvm.ve.vl.pvfmksuplenan.mvml" => "__builtin_ve_vl_pvfmksuplenan_mvml", + "llvm.ve.vl.pvfmksuplt.mvl" => "__builtin_ve_vl_pvfmksuplt_mvl", + "llvm.ve.vl.pvfmksuplt.mvml" => "__builtin_ve_vl_pvfmksuplt_mvml", + "llvm.ve.vl.pvfmksupltnan.mvl" => "__builtin_ve_vl_pvfmksupltnan_mvl", + "llvm.ve.vl.pvfmksupltnan.mvml" => "__builtin_ve_vl_pvfmksupltnan_mvml", + "llvm.ve.vl.pvfmksupnan.mvl" => "__builtin_ve_vl_pvfmksupnan_mvl", + "llvm.ve.vl.pvfmksupnan.mvml" => "__builtin_ve_vl_pvfmksupnan_mvml", + "llvm.ve.vl.pvfmksupne.mvl" => "__builtin_ve_vl_pvfmksupne_mvl", + "llvm.ve.vl.pvfmksupne.mvml" => "__builtin_ve_vl_pvfmksupne_mvml", + "llvm.ve.vl.pvfmksupnenan.mvl" => "__builtin_ve_vl_pvfmksupnenan_mvl", + "llvm.ve.vl.pvfmksupnenan.mvml" => "__builtin_ve_vl_pvfmksupnenan_mvml", + "llvm.ve.vl.pvfmksupnum.mvl" => "__builtin_ve_vl_pvfmksupnum_mvl", + "llvm.ve.vl.pvfmksupnum.mvml" => "__builtin_ve_vl_pvfmksupnum_mvml", + "llvm.ve.vl.pvfmkweq.MvMl" => "__builtin_ve_vl_pvfmkweq_MvMl", + "llvm.ve.vl.pvfmkweq.Mvl" => "__builtin_ve_vl_pvfmkweq_Mvl", + "llvm.ve.vl.pvfmkweqnan.MvMl" => "__builtin_ve_vl_pvfmkweqnan_MvMl", + "llvm.ve.vl.pvfmkweqnan.Mvl" => "__builtin_ve_vl_pvfmkweqnan_Mvl", + "llvm.ve.vl.pvfmkwge.MvMl" => "__builtin_ve_vl_pvfmkwge_MvMl", + "llvm.ve.vl.pvfmkwge.Mvl" => "__builtin_ve_vl_pvfmkwge_Mvl", + "llvm.ve.vl.pvfmkwgenan.MvMl" => "__builtin_ve_vl_pvfmkwgenan_MvMl", + "llvm.ve.vl.pvfmkwgenan.Mvl" => "__builtin_ve_vl_pvfmkwgenan_Mvl", + "llvm.ve.vl.pvfmkwgt.MvMl" => "__builtin_ve_vl_pvfmkwgt_MvMl", + "llvm.ve.vl.pvfmkwgt.Mvl" => "__builtin_ve_vl_pvfmkwgt_Mvl", + "llvm.ve.vl.pvfmkwgtnan.MvMl" => "__builtin_ve_vl_pvfmkwgtnan_MvMl", + "llvm.ve.vl.pvfmkwgtnan.Mvl" => "__builtin_ve_vl_pvfmkwgtnan_Mvl", + "llvm.ve.vl.pvfmkwle.MvMl" => "__builtin_ve_vl_pvfmkwle_MvMl", + "llvm.ve.vl.pvfmkwle.Mvl" => "__builtin_ve_vl_pvfmkwle_Mvl", + "llvm.ve.vl.pvfmkwlenan.MvMl" => "__builtin_ve_vl_pvfmkwlenan_MvMl", + "llvm.ve.vl.pvfmkwlenan.Mvl" => "__builtin_ve_vl_pvfmkwlenan_Mvl", + "llvm.ve.vl.pvfmkwloeq.mvl" => "__builtin_ve_vl_pvfmkwloeq_mvl", + "llvm.ve.vl.pvfmkwloeq.mvml" => "__builtin_ve_vl_pvfmkwloeq_mvml", + "llvm.ve.vl.pvfmkwloeqnan.mvl" => "__builtin_ve_vl_pvfmkwloeqnan_mvl", + "llvm.ve.vl.pvfmkwloeqnan.mvml" => "__builtin_ve_vl_pvfmkwloeqnan_mvml", + "llvm.ve.vl.pvfmkwloge.mvl" => "__builtin_ve_vl_pvfmkwloge_mvl", + "llvm.ve.vl.pvfmkwloge.mvml" => "__builtin_ve_vl_pvfmkwloge_mvml", + "llvm.ve.vl.pvfmkwlogenan.mvl" => "__builtin_ve_vl_pvfmkwlogenan_mvl", + "llvm.ve.vl.pvfmkwlogenan.mvml" => "__builtin_ve_vl_pvfmkwlogenan_mvml", + "llvm.ve.vl.pvfmkwlogt.mvl" => "__builtin_ve_vl_pvfmkwlogt_mvl", + "llvm.ve.vl.pvfmkwlogt.mvml" => "__builtin_ve_vl_pvfmkwlogt_mvml", + "llvm.ve.vl.pvfmkwlogtnan.mvl" => "__builtin_ve_vl_pvfmkwlogtnan_mvl", + "llvm.ve.vl.pvfmkwlogtnan.mvml" => "__builtin_ve_vl_pvfmkwlogtnan_mvml", + "llvm.ve.vl.pvfmkwlole.mvl" => "__builtin_ve_vl_pvfmkwlole_mvl", + "llvm.ve.vl.pvfmkwlole.mvml" => "__builtin_ve_vl_pvfmkwlole_mvml", + "llvm.ve.vl.pvfmkwlolenan.mvl" => "__builtin_ve_vl_pvfmkwlolenan_mvl", + "llvm.ve.vl.pvfmkwlolenan.mvml" => "__builtin_ve_vl_pvfmkwlolenan_mvml", + "llvm.ve.vl.pvfmkwlolt.mvl" => "__builtin_ve_vl_pvfmkwlolt_mvl", + "llvm.ve.vl.pvfmkwlolt.mvml" => "__builtin_ve_vl_pvfmkwlolt_mvml", + "llvm.ve.vl.pvfmkwloltnan.mvl" => "__builtin_ve_vl_pvfmkwloltnan_mvl", + "llvm.ve.vl.pvfmkwloltnan.mvml" => "__builtin_ve_vl_pvfmkwloltnan_mvml", + "llvm.ve.vl.pvfmkwlonan.mvl" => "__builtin_ve_vl_pvfmkwlonan_mvl", + "llvm.ve.vl.pvfmkwlonan.mvml" => "__builtin_ve_vl_pvfmkwlonan_mvml", + "llvm.ve.vl.pvfmkwlone.mvl" => "__builtin_ve_vl_pvfmkwlone_mvl", + "llvm.ve.vl.pvfmkwlone.mvml" => "__builtin_ve_vl_pvfmkwlone_mvml", + "llvm.ve.vl.pvfmkwlonenan.mvl" => "__builtin_ve_vl_pvfmkwlonenan_mvl", + "llvm.ve.vl.pvfmkwlonenan.mvml" => "__builtin_ve_vl_pvfmkwlonenan_mvml", + "llvm.ve.vl.pvfmkwlonum.mvl" => "__builtin_ve_vl_pvfmkwlonum_mvl", + "llvm.ve.vl.pvfmkwlonum.mvml" => "__builtin_ve_vl_pvfmkwlonum_mvml", + "llvm.ve.vl.pvfmkwlt.MvMl" => "__builtin_ve_vl_pvfmkwlt_MvMl", + "llvm.ve.vl.pvfmkwlt.Mvl" => "__builtin_ve_vl_pvfmkwlt_Mvl", + "llvm.ve.vl.pvfmkwltnan.MvMl" => "__builtin_ve_vl_pvfmkwltnan_MvMl", + "llvm.ve.vl.pvfmkwltnan.Mvl" => "__builtin_ve_vl_pvfmkwltnan_Mvl", + "llvm.ve.vl.pvfmkwnan.MvMl" => "__builtin_ve_vl_pvfmkwnan_MvMl", + "llvm.ve.vl.pvfmkwnan.Mvl" => "__builtin_ve_vl_pvfmkwnan_Mvl", + "llvm.ve.vl.pvfmkwne.MvMl" => "__builtin_ve_vl_pvfmkwne_MvMl", + "llvm.ve.vl.pvfmkwne.Mvl" => "__builtin_ve_vl_pvfmkwne_Mvl", + "llvm.ve.vl.pvfmkwnenan.MvMl" => "__builtin_ve_vl_pvfmkwnenan_MvMl", + "llvm.ve.vl.pvfmkwnenan.Mvl" => "__builtin_ve_vl_pvfmkwnenan_Mvl", + "llvm.ve.vl.pvfmkwnum.MvMl" => "__builtin_ve_vl_pvfmkwnum_MvMl", + "llvm.ve.vl.pvfmkwnum.Mvl" => "__builtin_ve_vl_pvfmkwnum_Mvl", + "llvm.ve.vl.pvfmkwupeq.mvl" => "__builtin_ve_vl_pvfmkwupeq_mvl", + "llvm.ve.vl.pvfmkwupeq.mvml" => "__builtin_ve_vl_pvfmkwupeq_mvml", + "llvm.ve.vl.pvfmkwupeqnan.mvl" => "__builtin_ve_vl_pvfmkwupeqnan_mvl", + "llvm.ve.vl.pvfmkwupeqnan.mvml" => "__builtin_ve_vl_pvfmkwupeqnan_mvml", + "llvm.ve.vl.pvfmkwupge.mvl" => "__builtin_ve_vl_pvfmkwupge_mvl", + "llvm.ve.vl.pvfmkwupge.mvml" => "__builtin_ve_vl_pvfmkwupge_mvml", + "llvm.ve.vl.pvfmkwupgenan.mvl" => "__builtin_ve_vl_pvfmkwupgenan_mvl", + "llvm.ve.vl.pvfmkwupgenan.mvml" => "__builtin_ve_vl_pvfmkwupgenan_mvml", + "llvm.ve.vl.pvfmkwupgt.mvl" => "__builtin_ve_vl_pvfmkwupgt_mvl", + "llvm.ve.vl.pvfmkwupgt.mvml" => "__builtin_ve_vl_pvfmkwupgt_mvml", + "llvm.ve.vl.pvfmkwupgtnan.mvl" => "__builtin_ve_vl_pvfmkwupgtnan_mvl", + "llvm.ve.vl.pvfmkwupgtnan.mvml" => "__builtin_ve_vl_pvfmkwupgtnan_mvml", + "llvm.ve.vl.pvfmkwuple.mvl" => "__builtin_ve_vl_pvfmkwuple_mvl", + "llvm.ve.vl.pvfmkwuple.mvml" => "__builtin_ve_vl_pvfmkwuple_mvml", + "llvm.ve.vl.pvfmkwuplenan.mvl" => "__builtin_ve_vl_pvfmkwuplenan_mvl", + "llvm.ve.vl.pvfmkwuplenan.mvml" => "__builtin_ve_vl_pvfmkwuplenan_mvml", + "llvm.ve.vl.pvfmkwuplt.mvl" => "__builtin_ve_vl_pvfmkwuplt_mvl", + "llvm.ve.vl.pvfmkwuplt.mvml" => "__builtin_ve_vl_pvfmkwuplt_mvml", + "llvm.ve.vl.pvfmkwupltnan.mvl" => "__builtin_ve_vl_pvfmkwupltnan_mvl", + "llvm.ve.vl.pvfmkwupltnan.mvml" => "__builtin_ve_vl_pvfmkwupltnan_mvml", + "llvm.ve.vl.pvfmkwupnan.mvl" => "__builtin_ve_vl_pvfmkwupnan_mvl", + "llvm.ve.vl.pvfmkwupnan.mvml" => "__builtin_ve_vl_pvfmkwupnan_mvml", + "llvm.ve.vl.pvfmkwupne.mvl" => "__builtin_ve_vl_pvfmkwupne_mvl", + "llvm.ve.vl.pvfmkwupne.mvml" => "__builtin_ve_vl_pvfmkwupne_mvml", + "llvm.ve.vl.pvfmkwupnenan.mvl" => "__builtin_ve_vl_pvfmkwupnenan_mvl", + "llvm.ve.vl.pvfmkwupnenan.mvml" => "__builtin_ve_vl_pvfmkwupnenan_mvml", + "llvm.ve.vl.pvfmkwupnum.mvl" => "__builtin_ve_vl_pvfmkwupnum_mvl", + "llvm.ve.vl.pvfmkwupnum.mvml" => "__builtin_ve_vl_pvfmkwupnum_mvml", + "llvm.ve.vl.pvfmsb.vsvvMvl" => "__builtin_ve_vl_pvfmsb_vsvvMvl", + "llvm.ve.vl.pvfmsb.vsvvl" => "__builtin_ve_vl_pvfmsb_vsvvl", + "llvm.ve.vl.pvfmsb.vsvvvl" => "__builtin_ve_vl_pvfmsb_vsvvvl", + "llvm.ve.vl.pvfmsb.vvsvMvl" => "__builtin_ve_vl_pvfmsb_vvsvMvl", + "llvm.ve.vl.pvfmsb.vvsvl" => "__builtin_ve_vl_pvfmsb_vvsvl", + "llvm.ve.vl.pvfmsb.vvsvvl" => "__builtin_ve_vl_pvfmsb_vvsvvl", + "llvm.ve.vl.pvfmsb.vvvvMvl" => "__builtin_ve_vl_pvfmsb_vvvvMvl", + "llvm.ve.vl.pvfmsb.vvvvl" => "__builtin_ve_vl_pvfmsb_vvvvl", + "llvm.ve.vl.pvfmsb.vvvvvl" => "__builtin_ve_vl_pvfmsb_vvvvvl", + "llvm.ve.vl.pvfmul.vsvMvl" => "__builtin_ve_vl_pvfmul_vsvMvl", + "llvm.ve.vl.pvfmul.vsvl" => "__builtin_ve_vl_pvfmul_vsvl", + "llvm.ve.vl.pvfmul.vsvvl" => "__builtin_ve_vl_pvfmul_vsvvl", + "llvm.ve.vl.pvfmul.vvvMvl" => "__builtin_ve_vl_pvfmul_vvvMvl", + "llvm.ve.vl.pvfmul.vvvl" => "__builtin_ve_vl_pvfmul_vvvl", + "llvm.ve.vl.pvfmul.vvvvl" => "__builtin_ve_vl_pvfmul_vvvvl", + "llvm.ve.vl.pvfnmad.vsvvMvl" => "__builtin_ve_vl_pvfnmad_vsvvMvl", + "llvm.ve.vl.pvfnmad.vsvvl" => "__builtin_ve_vl_pvfnmad_vsvvl", + "llvm.ve.vl.pvfnmad.vsvvvl" => "__builtin_ve_vl_pvfnmad_vsvvvl", + "llvm.ve.vl.pvfnmad.vvsvMvl" => "__builtin_ve_vl_pvfnmad_vvsvMvl", + "llvm.ve.vl.pvfnmad.vvsvl" => "__builtin_ve_vl_pvfnmad_vvsvl", + "llvm.ve.vl.pvfnmad.vvsvvl" => "__builtin_ve_vl_pvfnmad_vvsvvl", + "llvm.ve.vl.pvfnmad.vvvvMvl" => "__builtin_ve_vl_pvfnmad_vvvvMvl", + "llvm.ve.vl.pvfnmad.vvvvl" => "__builtin_ve_vl_pvfnmad_vvvvl", + "llvm.ve.vl.pvfnmad.vvvvvl" => "__builtin_ve_vl_pvfnmad_vvvvvl", + "llvm.ve.vl.pvfnmsb.vsvvMvl" => "__builtin_ve_vl_pvfnmsb_vsvvMvl", + "llvm.ve.vl.pvfnmsb.vsvvl" => "__builtin_ve_vl_pvfnmsb_vsvvl", + "llvm.ve.vl.pvfnmsb.vsvvvl" => "__builtin_ve_vl_pvfnmsb_vsvvvl", + "llvm.ve.vl.pvfnmsb.vvsvMvl" => "__builtin_ve_vl_pvfnmsb_vvsvMvl", + "llvm.ve.vl.pvfnmsb.vvsvl" => "__builtin_ve_vl_pvfnmsb_vvsvl", + "llvm.ve.vl.pvfnmsb.vvsvvl" => "__builtin_ve_vl_pvfnmsb_vvsvvl", + "llvm.ve.vl.pvfnmsb.vvvvMvl" => "__builtin_ve_vl_pvfnmsb_vvvvMvl", + "llvm.ve.vl.pvfnmsb.vvvvl" => "__builtin_ve_vl_pvfnmsb_vvvvl", + "llvm.ve.vl.pvfnmsb.vvvvvl" => "__builtin_ve_vl_pvfnmsb_vvvvvl", + "llvm.ve.vl.pvfsub.vsvMvl" => "__builtin_ve_vl_pvfsub_vsvMvl", + "llvm.ve.vl.pvfsub.vsvl" => "__builtin_ve_vl_pvfsub_vsvl", + "llvm.ve.vl.pvfsub.vsvvl" => "__builtin_ve_vl_pvfsub_vsvvl", + "llvm.ve.vl.pvfsub.vvvMvl" => "__builtin_ve_vl_pvfsub_vvvMvl", + "llvm.ve.vl.pvfsub.vvvl" => "__builtin_ve_vl_pvfsub_vvvl", + "llvm.ve.vl.pvfsub.vvvvl" => "__builtin_ve_vl_pvfsub_vvvvl", + "llvm.ve.vl.pvldz.vvMvl" => "__builtin_ve_vl_pvldz_vvMvl", + "llvm.ve.vl.pvldz.vvl" => "__builtin_ve_vl_pvldz_vvl", + "llvm.ve.vl.pvldz.vvvl" => "__builtin_ve_vl_pvldz_vvvl", + "llvm.ve.vl.pvldzlo.vvl" => "__builtin_ve_vl_pvldzlo_vvl", + "llvm.ve.vl.pvldzlo.vvmvl" => "__builtin_ve_vl_pvldzlo_vvmvl", + "llvm.ve.vl.pvldzlo.vvvl" => "__builtin_ve_vl_pvldzlo_vvvl", + "llvm.ve.vl.pvldzup.vvl" => "__builtin_ve_vl_pvldzup_vvl", + "llvm.ve.vl.pvldzup.vvmvl" => "__builtin_ve_vl_pvldzup_vvmvl", + "llvm.ve.vl.pvldzup.vvvl" => "__builtin_ve_vl_pvldzup_vvvl", + "llvm.ve.vl.pvmaxs.vsvMvl" => "__builtin_ve_vl_pvmaxs_vsvMvl", + "llvm.ve.vl.pvmaxs.vsvl" => "__builtin_ve_vl_pvmaxs_vsvl", + "llvm.ve.vl.pvmaxs.vsvvl" => "__builtin_ve_vl_pvmaxs_vsvvl", + "llvm.ve.vl.pvmaxs.vvvMvl" => "__builtin_ve_vl_pvmaxs_vvvMvl", + "llvm.ve.vl.pvmaxs.vvvl" => "__builtin_ve_vl_pvmaxs_vvvl", + "llvm.ve.vl.pvmaxs.vvvvl" => "__builtin_ve_vl_pvmaxs_vvvvl", + "llvm.ve.vl.pvmins.vsvMvl" => "__builtin_ve_vl_pvmins_vsvMvl", + "llvm.ve.vl.pvmins.vsvl" => "__builtin_ve_vl_pvmins_vsvl", + "llvm.ve.vl.pvmins.vsvvl" => "__builtin_ve_vl_pvmins_vsvvl", + "llvm.ve.vl.pvmins.vvvMvl" => "__builtin_ve_vl_pvmins_vvvMvl", + "llvm.ve.vl.pvmins.vvvl" => "__builtin_ve_vl_pvmins_vvvl", + "llvm.ve.vl.pvmins.vvvvl" => "__builtin_ve_vl_pvmins_vvvvl", + "llvm.ve.vl.pvor.vsvMvl" => "__builtin_ve_vl_pvor_vsvMvl", + "llvm.ve.vl.pvor.vsvl" => "__builtin_ve_vl_pvor_vsvl", + "llvm.ve.vl.pvor.vsvvl" => "__builtin_ve_vl_pvor_vsvvl", + "llvm.ve.vl.pvor.vvvMvl" => "__builtin_ve_vl_pvor_vvvMvl", + "llvm.ve.vl.pvor.vvvl" => "__builtin_ve_vl_pvor_vvvl", + "llvm.ve.vl.pvor.vvvvl" => "__builtin_ve_vl_pvor_vvvvl", + "llvm.ve.vl.pvpcnt.vvMvl" => "__builtin_ve_vl_pvpcnt_vvMvl", + "llvm.ve.vl.pvpcnt.vvl" => "__builtin_ve_vl_pvpcnt_vvl", + "llvm.ve.vl.pvpcnt.vvvl" => "__builtin_ve_vl_pvpcnt_vvvl", + "llvm.ve.vl.pvpcntlo.vvl" => "__builtin_ve_vl_pvpcntlo_vvl", + "llvm.ve.vl.pvpcntlo.vvmvl" => "__builtin_ve_vl_pvpcntlo_vvmvl", + "llvm.ve.vl.pvpcntlo.vvvl" => "__builtin_ve_vl_pvpcntlo_vvvl", + "llvm.ve.vl.pvpcntup.vvl" => "__builtin_ve_vl_pvpcntup_vvl", + "llvm.ve.vl.pvpcntup.vvmvl" => "__builtin_ve_vl_pvpcntup_vvmvl", + "llvm.ve.vl.pvpcntup.vvvl" => "__builtin_ve_vl_pvpcntup_vvvl", + "llvm.ve.vl.pvrcp.vvl" => "__builtin_ve_vl_pvrcp_vvl", + "llvm.ve.vl.pvrcp.vvvl" => "__builtin_ve_vl_pvrcp_vvvl", + "llvm.ve.vl.pvrsqrt.vvl" => "__builtin_ve_vl_pvrsqrt_vvl", + "llvm.ve.vl.pvrsqrt.vvvl" => "__builtin_ve_vl_pvrsqrt_vvvl", + "llvm.ve.vl.pvrsqrtnex.vvl" => "__builtin_ve_vl_pvrsqrtnex_vvl", + "llvm.ve.vl.pvrsqrtnex.vvvl" => "__builtin_ve_vl_pvrsqrtnex_vvvl", + "llvm.ve.vl.pvseq.vl" => "__builtin_ve_vl_pvseq_vl", + "llvm.ve.vl.pvseq.vvl" => "__builtin_ve_vl_pvseq_vvl", + "llvm.ve.vl.pvseqlo.vl" => "__builtin_ve_vl_pvseqlo_vl", + "llvm.ve.vl.pvseqlo.vvl" => "__builtin_ve_vl_pvseqlo_vvl", + "llvm.ve.vl.pvsequp.vl" => "__builtin_ve_vl_pvsequp_vl", + "llvm.ve.vl.pvsequp.vvl" => "__builtin_ve_vl_pvsequp_vvl", + "llvm.ve.vl.pvsla.vvsMvl" => "__builtin_ve_vl_pvsla_vvsMvl", + "llvm.ve.vl.pvsla.vvsl" => "__builtin_ve_vl_pvsla_vvsl", + "llvm.ve.vl.pvsla.vvsvl" => "__builtin_ve_vl_pvsla_vvsvl", + "llvm.ve.vl.pvsla.vvvMvl" => "__builtin_ve_vl_pvsla_vvvMvl", + "llvm.ve.vl.pvsla.vvvl" => "__builtin_ve_vl_pvsla_vvvl", + "llvm.ve.vl.pvsla.vvvvl" => "__builtin_ve_vl_pvsla_vvvvl", + "llvm.ve.vl.pvsll.vvsMvl" => "__builtin_ve_vl_pvsll_vvsMvl", + "llvm.ve.vl.pvsll.vvsl" => "__builtin_ve_vl_pvsll_vvsl", + "llvm.ve.vl.pvsll.vvsvl" => "__builtin_ve_vl_pvsll_vvsvl", + "llvm.ve.vl.pvsll.vvvMvl" => "__builtin_ve_vl_pvsll_vvvMvl", + "llvm.ve.vl.pvsll.vvvl" => "__builtin_ve_vl_pvsll_vvvl", + "llvm.ve.vl.pvsll.vvvvl" => "__builtin_ve_vl_pvsll_vvvvl", + "llvm.ve.vl.pvsra.vvsMvl" => "__builtin_ve_vl_pvsra_vvsMvl", + "llvm.ve.vl.pvsra.vvsl" => "__builtin_ve_vl_pvsra_vvsl", + "llvm.ve.vl.pvsra.vvsvl" => "__builtin_ve_vl_pvsra_vvsvl", + "llvm.ve.vl.pvsra.vvvMvl" => "__builtin_ve_vl_pvsra_vvvMvl", + "llvm.ve.vl.pvsra.vvvl" => "__builtin_ve_vl_pvsra_vvvl", + "llvm.ve.vl.pvsra.vvvvl" => "__builtin_ve_vl_pvsra_vvvvl", + "llvm.ve.vl.pvsrl.vvsMvl" => "__builtin_ve_vl_pvsrl_vvsMvl", + "llvm.ve.vl.pvsrl.vvsl" => "__builtin_ve_vl_pvsrl_vvsl", + "llvm.ve.vl.pvsrl.vvsvl" => "__builtin_ve_vl_pvsrl_vvsvl", + "llvm.ve.vl.pvsrl.vvvMvl" => "__builtin_ve_vl_pvsrl_vvvMvl", + "llvm.ve.vl.pvsrl.vvvl" => "__builtin_ve_vl_pvsrl_vvvl", + "llvm.ve.vl.pvsrl.vvvvl" => "__builtin_ve_vl_pvsrl_vvvvl", + "llvm.ve.vl.pvsubs.vsvMvl" => "__builtin_ve_vl_pvsubs_vsvMvl", + "llvm.ve.vl.pvsubs.vsvl" => "__builtin_ve_vl_pvsubs_vsvl", + "llvm.ve.vl.pvsubs.vsvvl" => "__builtin_ve_vl_pvsubs_vsvvl", + "llvm.ve.vl.pvsubs.vvvMvl" => "__builtin_ve_vl_pvsubs_vvvMvl", + "llvm.ve.vl.pvsubs.vvvl" => "__builtin_ve_vl_pvsubs_vvvl", + "llvm.ve.vl.pvsubs.vvvvl" => "__builtin_ve_vl_pvsubs_vvvvl", + "llvm.ve.vl.pvsubu.vsvMvl" => "__builtin_ve_vl_pvsubu_vsvMvl", + "llvm.ve.vl.pvsubu.vsvl" => "__builtin_ve_vl_pvsubu_vsvl", + "llvm.ve.vl.pvsubu.vsvvl" => "__builtin_ve_vl_pvsubu_vsvvl", + "llvm.ve.vl.pvsubu.vvvMvl" => "__builtin_ve_vl_pvsubu_vvvMvl", + "llvm.ve.vl.pvsubu.vvvl" => "__builtin_ve_vl_pvsubu_vvvl", + "llvm.ve.vl.pvsubu.vvvvl" => "__builtin_ve_vl_pvsubu_vvvvl", + "llvm.ve.vl.pvxor.vsvMvl" => "__builtin_ve_vl_pvxor_vsvMvl", + "llvm.ve.vl.pvxor.vsvl" => "__builtin_ve_vl_pvxor_vsvl", + "llvm.ve.vl.pvxor.vsvvl" => "__builtin_ve_vl_pvxor_vsvvl", + "llvm.ve.vl.pvxor.vvvMvl" => "__builtin_ve_vl_pvxor_vvvMvl", + "llvm.ve.vl.pvxor.vvvl" => "__builtin_ve_vl_pvxor_vvvl", + "llvm.ve.vl.pvxor.vvvvl" => "__builtin_ve_vl_pvxor_vvvvl", + "llvm.ve.vl.scr.sss" => "__builtin_ve_vl_scr_sss", + "llvm.ve.vl.svm.sMs" => "__builtin_ve_vl_svm_sMs", + "llvm.ve.vl.svm.sms" => "__builtin_ve_vl_svm_sms", + "llvm.ve.vl.svob" => "__builtin_ve_vl_svob", + "llvm.ve.vl.tovm.sml" => "__builtin_ve_vl_tovm_sml", + "llvm.ve.vl.tscr.ssss" => "__builtin_ve_vl_tscr_ssss", + "llvm.ve.vl.vaddsl.vsvl" => "__builtin_ve_vl_vaddsl_vsvl", + "llvm.ve.vl.vaddsl.vsvmvl" => "__builtin_ve_vl_vaddsl_vsvmvl", + "llvm.ve.vl.vaddsl.vsvvl" => "__builtin_ve_vl_vaddsl_vsvvl", + "llvm.ve.vl.vaddsl.vvvl" => "__builtin_ve_vl_vaddsl_vvvl", + "llvm.ve.vl.vaddsl.vvvmvl" => "__builtin_ve_vl_vaddsl_vvvmvl", + "llvm.ve.vl.vaddsl.vvvvl" => "__builtin_ve_vl_vaddsl_vvvvl", + "llvm.ve.vl.vaddswsx.vsvl" => "__builtin_ve_vl_vaddswsx_vsvl", + "llvm.ve.vl.vaddswsx.vsvmvl" => "__builtin_ve_vl_vaddswsx_vsvmvl", + "llvm.ve.vl.vaddswsx.vsvvl" => "__builtin_ve_vl_vaddswsx_vsvvl", + "llvm.ve.vl.vaddswsx.vvvl" => "__builtin_ve_vl_vaddswsx_vvvl", + "llvm.ve.vl.vaddswsx.vvvmvl" => "__builtin_ve_vl_vaddswsx_vvvmvl", + "llvm.ve.vl.vaddswsx.vvvvl" => "__builtin_ve_vl_vaddswsx_vvvvl", + "llvm.ve.vl.vaddswzx.vsvl" => "__builtin_ve_vl_vaddswzx_vsvl", + "llvm.ve.vl.vaddswzx.vsvmvl" => "__builtin_ve_vl_vaddswzx_vsvmvl", + "llvm.ve.vl.vaddswzx.vsvvl" => "__builtin_ve_vl_vaddswzx_vsvvl", + "llvm.ve.vl.vaddswzx.vvvl" => "__builtin_ve_vl_vaddswzx_vvvl", + "llvm.ve.vl.vaddswzx.vvvmvl" => "__builtin_ve_vl_vaddswzx_vvvmvl", + "llvm.ve.vl.vaddswzx.vvvvl" => "__builtin_ve_vl_vaddswzx_vvvvl", + "llvm.ve.vl.vaddul.vsvl" => "__builtin_ve_vl_vaddul_vsvl", + "llvm.ve.vl.vaddul.vsvmvl" => "__builtin_ve_vl_vaddul_vsvmvl", + "llvm.ve.vl.vaddul.vsvvl" => "__builtin_ve_vl_vaddul_vsvvl", + "llvm.ve.vl.vaddul.vvvl" => "__builtin_ve_vl_vaddul_vvvl", + "llvm.ve.vl.vaddul.vvvmvl" => "__builtin_ve_vl_vaddul_vvvmvl", + "llvm.ve.vl.vaddul.vvvvl" => "__builtin_ve_vl_vaddul_vvvvl", + "llvm.ve.vl.vadduw.vsvl" => "__builtin_ve_vl_vadduw_vsvl", + "llvm.ve.vl.vadduw.vsvmvl" => "__builtin_ve_vl_vadduw_vsvmvl", + "llvm.ve.vl.vadduw.vsvvl" => "__builtin_ve_vl_vadduw_vsvvl", + "llvm.ve.vl.vadduw.vvvl" => "__builtin_ve_vl_vadduw_vvvl", + "llvm.ve.vl.vadduw.vvvmvl" => "__builtin_ve_vl_vadduw_vvvmvl", + "llvm.ve.vl.vadduw.vvvvl" => "__builtin_ve_vl_vadduw_vvvvl", + "llvm.ve.vl.vand.vsvl" => "__builtin_ve_vl_vand_vsvl", + "llvm.ve.vl.vand.vsvmvl" => "__builtin_ve_vl_vand_vsvmvl", + "llvm.ve.vl.vand.vsvvl" => "__builtin_ve_vl_vand_vsvvl", + "llvm.ve.vl.vand.vvvl" => "__builtin_ve_vl_vand_vvvl", + "llvm.ve.vl.vand.vvvmvl" => "__builtin_ve_vl_vand_vvvmvl", + "llvm.ve.vl.vand.vvvvl" => "__builtin_ve_vl_vand_vvvvl", + "llvm.ve.vl.vbrdd.vsl" => "__builtin_ve_vl_vbrdd_vsl", + "llvm.ve.vl.vbrdd.vsmvl" => "__builtin_ve_vl_vbrdd_vsmvl", + "llvm.ve.vl.vbrdd.vsvl" => "__builtin_ve_vl_vbrdd_vsvl", + "llvm.ve.vl.vbrdl.vsl" => "__builtin_ve_vl_vbrdl_vsl", + "llvm.ve.vl.vbrdl.vsmvl" => "__builtin_ve_vl_vbrdl_vsmvl", + "llvm.ve.vl.vbrdl.vsvl" => "__builtin_ve_vl_vbrdl_vsvl", + "llvm.ve.vl.vbrds.vsl" => "__builtin_ve_vl_vbrds_vsl", + "llvm.ve.vl.vbrds.vsmvl" => "__builtin_ve_vl_vbrds_vsmvl", + "llvm.ve.vl.vbrds.vsvl" => "__builtin_ve_vl_vbrds_vsvl", + "llvm.ve.vl.vbrdw.vsl" => "__builtin_ve_vl_vbrdw_vsl", + "llvm.ve.vl.vbrdw.vsmvl" => "__builtin_ve_vl_vbrdw_vsmvl", + "llvm.ve.vl.vbrdw.vsvl" => "__builtin_ve_vl_vbrdw_vsvl", + "llvm.ve.vl.vbrv.vvl" => "__builtin_ve_vl_vbrv_vvl", + "llvm.ve.vl.vbrv.vvmvl" => "__builtin_ve_vl_vbrv_vvmvl", + "llvm.ve.vl.vbrv.vvvl" => "__builtin_ve_vl_vbrv_vvvl", + "llvm.ve.vl.vcmpsl.vsvl" => "__builtin_ve_vl_vcmpsl_vsvl", + "llvm.ve.vl.vcmpsl.vsvmvl" => "__builtin_ve_vl_vcmpsl_vsvmvl", + "llvm.ve.vl.vcmpsl.vsvvl" => "__builtin_ve_vl_vcmpsl_vsvvl", + "llvm.ve.vl.vcmpsl.vvvl" => "__builtin_ve_vl_vcmpsl_vvvl", + "llvm.ve.vl.vcmpsl.vvvmvl" => "__builtin_ve_vl_vcmpsl_vvvmvl", + "llvm.ve.vl.vcmpsl.vvvvl" => "__builtin_ve_vl_vcmpsl_vvvvl", + "llvm.ve.vl.vcmpswsx.vsvl" => "__builtin_ve_vl_vcmpswsx_vsvl", + "llvm.ve.vl.vcmpswsx.vsvmvl" => "__builtin_ve_vl_vcmpswsx_vsvmvl", + "llvm.ve.vl.vcmpswsx.vsvvl" => "__builtin_ve_vl_vcmpswsx_vsvvl", + "llvm.ve.vl.vcmpswsx.vvvl" => "__builtin_ve_vl_vcmpswsx_vvvl", + "llvm.ve.vl.vcmpswsx.vvvmvl" => "__builtin_ve_vl_vcmpswsx_vvvmvl", + "llvm.ve.vl.vcmpswsx.vvvvl" => "__builtin_ve_vl_vcmpswsx_vvvvl", + "llvm.ve.vl.vcmpswzx.vsvl" => "__builtin_ve_vl_vcmpswzx_vsvl", + "llvm.ve.vl.vcmpswzx.vsvmvl" => "__builtin_ve_vl_vcmpswzx_vsvmvl", + "llvm.ve.vl.vcmpswzx.vsvvl" => "__builtin_ve_vl_vcmpswzx_vsvvl", + "llvm.ve.vl.vcmpswzx.vvvl" => "__builtin_ve_vl_vcmpswzx_vvvl", + "llvm.ve.vl.vcmpswzx.vvvmvl" => "__builtin_ve_vl_vcmpswzx_vvvmvl", + "llvm.ve.vl.vcmpswzx.vvvvl" => "__builtin_ve_vl_vcmpswzx_vvvvl", + "llvm.ve.vl.vcmpul.vsvl" => "__builtin_ve_vl_vcmpul_vsvl", + "llvm.ve.vl.vcmpul.vsvmvl" => "__builtin_ve_vl_vcmpul_vsvmvl", + "llvm.ve.vl.vcmpul.vsvvl" => "__builtin_ve_vl_vcmpul_vsvvl", + "llvm.ve.vl.vcmpul.vvvl" => "__builtin_ve_vl_vcmpul_vvvl", + "llvm.ve.vl.vcmpul.vvvmvl" => "__builtin_ve_vl_vcmpul_vvvmvl", + "llvm.ve.vl.vcmpul.vvvvl" => "__builtin_ve_vl_vcmpul_vvvvl", + "llvm.ve.vl.vcmpuw.vsvl" => "__builtin_ve_vl_vcmpuw_vsvl", + "llvm.ve.vl.vcmpuw.vsvmvl" => "__builtin_ve_vl_vcmpuw_vsvmvl", + "llvm.ve.vl.vcmpuw.vsvvl" => "__builtin_ve_vl_vcmpuw_vsvvl", + "llvm.ve.vl.vcmpuw.vvvl" => "__builtin_ve_vl_vcmpuw_vvvl", + "llvm.ve.vl.vcmpuw.vvvmvl" => "__builtin_ve_vl_vcmpuw_vvvmvl", + "llvm.ve.vl.vcmpuw.vvvvl" => "__builtin_ve_vl_vcmpuw_vvvvl", + "llvm.ve.vl.vcp.vvmvl" => "__builtin_ve_vl_vcp_vvmvl", + "llvm.ve.vl.vcvtdl.vvl" => "__builtin_ve_vl_vcvtdl_vvl", + "llvm.ve.vl.vcvtdl.vvvl" => "__builtin_ve_vl_vcvtdl_vvvl", + "llvm.ve.vl.vcvtds.vvl" => "__builtin_ve_vl_vcvtds_vvl", + "llvm.ve.vl.vcvtds.vvvl" => "__builtin_ve_vl_vcvtds_vvvl", + "llvm.ve.vl.vcvtdw.vvl" => "__builtin_ve_vl_vcvtdw_vvl", + "llvm.ve.vl.vcvtdw.vvvl" => "__builtin_ve_vl_vcvtdw_vvvl", + "llvm.ve.vl.vcvtld.vvl" => "__builtin_ve_vl_vcvtld_vvl", + "llvm.ve.vl.vcvtld.vvmvl" => "__builtin_ve_vl_vcvtld_vvmvl", + "llvm.ve.vl.vcvtld.vvvl" => "__builtin_ve_vl_vcvtld_vvvl", + "llvm.ve.vl.vcvtldrz.vvl" => "__builtin_ve_vl_vcvtldrz_vvl", + "llvm.ve.vl.vcvtldrz.vvmvl" => "__builtin_ve_vl_vcvtldrz_vvmvl", + "llvm.ve.vl.vcvtldrz.vvvl" => "__builtin_ve_vl_vcvtldrz_vvvl", + "llvm.ve.vl.vcvtsd.vvl" => "__builtin_ve_vl_vcvtsd_vvl", + "llvm.ve.vl.vcvtsd.vvvl" => "__builtin_ve_vl_vcvtsd_vvvl", + "llvm.ve.vl.vcvtsw.vvl" => "__builtin_ve_vl_vcvtsw_vvl", + "llvm.ve.vl.vcvtsw.vvvl" => "__builtin_ve_vl_vcvtsw_vvvl", + "llvm.ve.vl.vcvtwdsx.vvl" => "__builtin_ve_vl_vcvtwdsx_vvl", + "llvm.ve.vl.vcvtwdsx.vvmvl" => "__builtin_ve_vl_vcvtwdsx_vvmvl", + "llvm.ve.vl.vcvtwdsx.vvvl" => "__builtin_ve_vl_vcvtwdsx_vvvl", + "llvm.ve.vl.vcvtwdsxrz.vvl" => "__builtin_ve_vl_vcvtwdsxrz_vvl", + "llvm.ve.vl.vcvtwdsxrz.vvmvl" => "__builtin_ve_vl_vcvtwdsxrz_vvmvl", + "llvm.ve.vl.vcvtwdsxrz.vvvl" => "__builtin_ve_vl_vcvtwdsxrz_vvvl", + "llvm.ve.vl.vcvtwdzx.vvl" => "__builtin_ve_vl_vcvtwdzx_vvl", + "llvm.ve.vl.vcvtwdzx.vvmvl" => "__builtin_ve_vl_vcvtwdzx_vvmvl", + "llvm.ve.vl.vcvtwdzx.vvvl" => "__builtin_ve_vl_vcvtwdzx_vvvl", + "llvm.ve.vl.vcvtwdzxrz.vvl" => "__builtin_ve_vl_vcvtwdzxrz_vvl", + "llvm.ve.vl.vcvtwdzxrz.vvmvl" => "__builtin_ve_vl_vcvtwdzxrz_vvmvl", + "llvm.ve.vl.vcvtwdzxrz.vvvl" => "__builtin_ve_vl_vcvtwdzxrz_vvvl", + "llvm.ve.vl.vcvtwssx.vvl" => "__builtin_ve_vl_vcvtwssx_vvl", + "llvm.ve.vl.vcvtwssx.vvmvl" => "__builtin_ve_vl_vcvtwssx_vvmvl", + "llvm.ve.vl.vcvtwssx.vvvl" => "__builtin_ve_vl_vcvtwssx_vvvl", + "llvm.ve.vl.vcvtwssxrz.vvl" => "__builtin_ve_vl_vcvtwssxrz_vvl", + "llvm.ve.vl.vcvtwssxrz.vvmvl" => "__builtin_ve_vl_vcvtwssxrz_vvmvl", + "llvm.ve.vl.vcvtwssxrz.vvvl" => "__builtin_ve_vl_vcvtwssxrz_vvvl", + "llvm.ve.vl.vcvtwszx.vvl" => "__builtin_ve_vl_vcvtwszx_vvl", + "llvm.ve.vl.vcvtwszx.vvmvl" => "__builtin_ve_vl_vcvtwszx_vvmvl", + "llvm.ve.vl.vcvtwszx.vvvl" => "__builtin_ve_vl_vcvtwszx_vvvl", + "llvm.ve.vl.vcvtwszxrz.vvl" => "__builtin_ve_vl_vcvtwszxrz_vvl", + "llvm.ve.vl.vcvtwszxrz.vvmvl" => "__builtin_ve_vl_vcvtwszxrz_vvmvl", + "llvm.ve.vl.vcvtwszxrz.vvvl" => "__builtin_ve_vl_vcvtwszxrz_vvvl", + "llvm.ve.vl.vdivsl.vsvl" => "__builtin_ve_vl_vdivsl_vsvl", + "llvm.ve.vl.vdivsl.vsvmvl" => "__builtin_ve_vl_vdivsl_vsvmvl", + "llvm.ve.vl.vdivsl.vsvvl" => "__builtin_ve_vl_vdivsl_vsvvl", + "llvm.ve.vl.vdivsl.vvsl" => "__builtin_ve_vl_vdivsl_vvsl", + "llvm.ve.vl.vdivsl.vvsmvl" => "__builtin_ve_vl_vdivsl_vvsmvl", + "llvm.ve.vl.vdivsl.vvsvl" => "__builtin_ve_vl_vdivsl_vvsvl", + "llvm.ve.vl.vdivsl.vvvl" => "__builtin_ve_vl_vdivsl_vvvl", + "llvm.ve.vl.vdivsl.vvvmvl" => "__builtin_ve_vl_vdivsl_vvvmvl", + "llvm.ve.vl.vdivsl.vvvvl" => "__builtin_ve_vl_vdivsl_vvvvl", + "llvm.ve.vl.vdivswsx.vsvl" => "__builtin_ve_vl_vdivswsx_vsvl", + "llvm.ve.vl.vdivswsx.vsvmvl" => "__builtin_ve_vl_vdivswsx_vsvmvl", + "llvm.ve.vl.vdivswsx.vsvvl" => "__builtin_ve_vl_vdivswsx_vsvvl", + "llvm.ve.vl.vdivswsx.vvsl" => "__builtin_ve_vl_vdivswsx_vvsl", + "llvm.ve.vl.vdivswsx.vvsmvl" => "__builtin_ve_vl_vdivswsx_vvsmvl", + "llvm.ve.vl.vdivswsx.vvsvl" => "__builtin_ve_vl_vdivswsx_vvsvl", + "llvm.ve.vl.vdivswsx.vvvl" => "__builtin_ve_vl_vdivswsx_vvvl", + "llvm.ve.vl.vdivswsx.vvvmvl" => "__builtin_ve_vl_vdivswsx_vvvmvl", + "llvm.ve.vl.vdivswsx.vvvvl" => "__builtin_ve_vl_vdivswsx_vvvvl", + "llvm.ve.vl.vdivswzx.vsvl" => "__builtin_ve_vl_vdivswzx_vsvl", + "llvm.ve.vl.vdivswzx.vsvmvl" => "__builtin_ve_vl_vdivswzx_vsvmvl", + "llvm.ve.vl.vdivswzx.vsvvl" => "__builtin_ve_vl_vdivswzx_vsvvl", + "llvm.ve.vl.vdivswzx.vvsl" => "__builtin_ve_vl_vdivswzx_vvsl", + "llvm.ve.vl.vdivswzx.vvsmvl" => "__builtin_ve_vl_vdivswzx_vvsmvl", + "llvm.ve.vl.vdivswzx.vvsvl" => "__builtin_ve_vl_vdivswzx_vvsvl", + "llvm.ve.vl.vdivswzx.vvvl" => "__builtin_ve_vl_vdivswzx_vvvl", + "llvm.ve.vl.vdivswzx.vvvmvl" => "__builtin_ve_vl_vdivswzx_vvvmvl", + "llvm.ve.vl.vdivswzx.vvvvl" => "__builtin_ve_vl_vdivswzx_vvvvl", + "llvm.ve.vl.vdivul.vsvl" => "__builtin_ve_vl_vdivul_vsvl", + "llvm.ve.vl.vdivul.vsvmvl" => "__builtin_ve_vl_vdivul_vsvmvl", + "llvm.ve.vl.vdivul.vsvvl" => "__builtin_ve_vl_vdivul_vsvvl", + "llvm.ve.vl.vdivul.vvsl" => "__builtin_ve_vl_vdivul_vvsl", + "llvm.ve.vl.vdivul.vvsmvl" => "__builtin_ve_vl_vdivul_vvsmvl", + "llvm.ve.vl.vdivul.vvsvl" => "__builtin_ve_vl_vdivul_vvsvl", + "llvm.ve.vl.vdivul.vvvl" => "__builtin_ve_vl_vdivul_vvvl", + "llvm.ve.vl.vdivul.vvvmvl" => "__builtin_ve_vl_vdivul_vvvmvl", + "llvm.ve.vl.vdivul.vvvvl" => "__builtin_ve_vl_vdivul_vvvvl", + "llvm.ve.vl.vdivuw.vsvl" => "__builtin_ve_vl_vdivuw_vsvl", + "llvm.ve.vl.vdivuw.vsvmvl" => "__builtin_ve_vl_vdivuw_vsvmvl", + "llvm.ve.vl.vdivuw.vsvvl" => "__builtin_ve_vl_vdivuw_vsvvl", + "llvm.ve.vl.vdivuw.vvsl" => "__builtin_ve_vl_vdivuw_vvsl", + "llvm.ve.vl.vdivuw.vvsmvl" => "__builtin_ve_vl_vdivuw_vvsmvl", + "llvm.ve.vl.vdivuw.vvsvl" => "__builtin_ve_vl_vdivuw_vvsvl", + "llvm.ve.vl.vdivuw.vvvl" => "__builtin_ve_vl_vdivuw_vvvl", + "llvm.ve.vl.vdivuw.vvvmvl" => "__builtin_ve_vl_vdivuw_vvvmvl", + "llvm.ve.vl.vdivuw.vvvvl" => "__builtin_ve_vl_vdivuw_vvvvl", + "llvm.ve.vl.veqv.vsvl" => "__builtin_ve_vl_veqv_vsvl", + "llvm.ve.vl.veqv.vsvmvl" => "__builtin_ve_vl_veqv_vsvmvl", + "llvm.ve.vl.veqv.vsvvl" => "__builtin_ve_vl_veqv_vsvvl", + "llvm.ve.vl.veqv.vvvl" => "__builtin_ve_vl_veqv_vvvl", + "llvm.ve.vl.veqv.vvvmvl" => "__builtin_ve_vl_veqv_vvvmvl", + "llvm.ve.vl.veqv.vvvvl" => "__builtin_ve_vl_veqv_vvvvl", + "llvm.ve.vl.vex.vvmvl" => "__builtin_ve_vl_vex_vvmvl", + "llvm.ve.vl.vfaddd.vsvl" => "__builtin_ve_vl_vfaddd_vsvl", + "llvm.ve.vl.vfaddd.vsvmvl" => "__builtin_ve_vl_vfaddd_vsvmvl", + "llvm.ve.vl.vfaddd.vsvvl" => "__builtin_ve_vl_vfaddd_vsvvl", + "llvm.ve.vl.vfaddd.vvvl" => "__builtin_ve_vl_vfaddd_vvvl", + "llvm.ve.vl.vfaddd.vvvmvl" => "__builtin_ve_vl_vfaddd_vvvmvl", + "llvm.ve.vl.vfaddd.vvvvl" => "__builtin_ve_vl_vfaddd_vvvvl", + "llvm.ve.vl.vfadds.vsvl" => "__builtin_ve_vl_vfadds_vsvl", + "llvm.ve.vl.vfadds.vsvmvl" => "__builtin_ve_vl_vfadds_vsvmvl", + "llvm.ve.vl.vfadds.vsvvl" => "__builtin_ve_vl_vfadds_vsvvl", + "llvm.ve.vl.vfadds.vvvl" => "__builtin_ve_vl_vfadds_vvvl", + "llvm.ve.vl.vfadds.vvvmvl" => "__builtin_ve_vl_vfadds_vvvmvl", + "llvm.ve.vl.vfadds.vvvvl" => "__builtin_ve_vl_vfadds_vvvvl", + "llvm.ve.vl.vfcmpd.vsvl" => "__builtin_ve_vl_vfcmpd_vsvl", + "llvm.ve.vl.vfcmpd.vsvmvl" => "__builtin_ve_vl_vfcmpd_vsvmvl", + "llvm.ve.vl.vfcmpd.vsvvl" => "__builtin_ve_vl_vfcmpd_vsvvl", + "llvm.ve.vl.vfcmpd.vvvl" => "__builtin_ve_vl_vfcmpd_vvvl", + "llvm.ve.vl.vfcmpd.vvvmvl" => "__builtin_ve_vl_vfcmpd_vvvmvl", + "llvm.ve.vl.vfcmpd.vvvvl" => "__builtin_ve_vl_vfcmpd_vvvvl", + "llvm.ve.vl.vfcmps.vsvl" => "__builtin_ve_vl_vfcmps_vsvl", + "llvm.ve.vl.vfcmps.vsvmvl" => "__builtin_ve_vl_vfcmps_vsvmvl", + "llvm.ve.vl.vfcmps.vsvvl" => "__builtin_ve_vl_vfcmps_vsvvl", + "llvm.ve.vl.vfcmps.vvvl" => "__builtin_ve_vl_vfcmps_vvvl", + "llvm.ve.vl.vfcmps.vvvmvl" => "__builtin_ve_vl_vfcmps_vvvmvl", + "llvm.ve.vl.vfcmps.vvvvl" => "__builtin_ve_vl_vfcmps_vvvvl", + "llvm.ve.vl.vfdivd.vsvl" => "__builtin_ve_vl_vfdivd_vsvl", + "llvm.ve.vl.vfdivd.vsvmvl" => "__builtin_ve_vl_vfdivd_vsvmvl", + "llvm.ve.vl.vfdivd.vsvvl" => "__builtin_ve_vl_vfdivd_vsvvl", + "llvm.ve.vl.vfdivd.vvvl" => "__builtin_ve_vl_vfdivd_vvvl", + "llvm.ve.vl.vfdivd.vvvmvl" => "__builtin_ve_vl_vfdivd_vvvmvl", + "llvm.ve.vl.vfdivd.vvvvl" => "__builtin_ve_vl_vfdivd_vvvvl", + "llvm.ve.vl.vfdivs.vsvl" => "__builtin_ve_vl_vfdivs_vsvl", + "llvm.ve.vl.vfdivs.vsvmvl" => "__builtin_ve_vl_vfdivs_vsvmvl", + "llvm.ve.vl.vfdivs.vsvvl" => "__builtin_ve_vl_vfdivs_vsvvl", + "llvm.ve.vl.vfdivs.vvvl" => "__builtin_ve_vl_vfdivs_vvvl", + "llvm.ve.vl.vfdivs.vvvmvl" => "__builtin_ve_vl_vfdivs_vvvmvl", + "llvm.ve.vl.vfdivs.vvvvl" => "__builtin_ve_vl_vfdivs_vvvvl", + "llvm.ve.vl.vfmadd.vsvvl" => "__builtin_ve_vl_vfmadd_vsvvl", + "llvm.ve.vl.vfmadd.vsvvmvl" => "__builtin_ve_vl_vfmadd_vsvvmvl", + "llvm.ve.vl.vfmadd.vsvvvl" => "__builtin_ve_vl_vfmadd_vsvvvl", + "llvm.ve.vl.vfmadd.vvsvl" => "__builtin_ve_vl_vfmadd_vvsvl", + "llvm.ve.vl.vfmadd.vvsvmvl" => "__builtin_ve_vl_vfmadd_vvsvmvl", + "llvm.ve.vl.vfmadd.vvsvvl" => "__builtin_ve_vl_vfmadd_vvsvvl", + "llvm.ve.vl.vfmadd.vvvvl" => "__builtin_ve_vl_vfmadd_vvvvl", + "llvm.ve.vl.vfmadd.vvvvmvl" => "__builtin_ve_vl_vfmadd_vvvvmvl", + "llvm.ve.vl.vfmadd.vvvvvl" => "__builtin_ve_vl_vfmadd_vvvvvl", + "llvm.ve.vl.vfmads.vsvvl" => "__builtin_ve_vl_vfmads_vsvvl", + "llvm.ve.vl.vfmads.vsvvmvl" => "__builtin_ve_vl_vfmads_vsvvmvl", + "llvm.ve.vl.vfmads.vsvvvl" => "__builtin_ve_vl_vfmads_vsvvvl", + "llvm.ve.vl.vfmads.vvsvl" => "__builtin_ve_vl_vfmads_vvsvl", + "llvm.ve.vl.vfmads.vvsvmvl" => "__builtin_ve_vl_vfmads_vvsvmvl", + "llvm.ve.vl.vfmads.vvsvvl" => "__builtin_ve_vl_vfmads_vvsvvl", + "llvm.ve.vl.vfmads.vvvvl" => "__builtin_ve_vl_vfmads_vvvvl", + "llvm.ve.vl.vfmads.vvvvmvl" => "__builtin_ve_vl_vfmads_vvvvmvl", + "llvm.ve.vl.vfmads.vvvvvl" => "__builtin_ve_vl_vfmads_vvvvvl", + "llvm.ve.vl.vfmaxd.vsvl" => "__builtin_ve_vl_vfmaxd_vsvl", + "llvm.ve.vl.vfmaxd.vsvmvl" => "__builtin_ve_vl_vfmaxd_vsvmvl", + "llvm.ve.vl.vfmaxd.vsvvl" => "__builtin_ve_vl_vfmaxd_vsvvl", + "llvm.ve.vl.vfmaxd.vvvl" => "__builtin_ve_vl_vfmaxd_vvvl", + "llvm.ve.vl.vfmaxd.vvvmvl" => "__builtin_ve_vl_vfmaxd_vvvmvl", + "llvm.ve.vl.vfmaxd.vvvvl" => "__builtin_ve_vl_vfmaxd_vvvvl", + "llvm.ve.vl.vfmaxs.vsvl" => "__builtin_ve_vl_vfmaxs_vsvl", + "llvm.ve.vl.vfmaxs.vsvmvl" => "__builtin_ve_vl_vfmaxs_vsvmvl", + "llvm.ve.vl.vfmaxs.vsvvl" => "__builtin_ve_vl_vfmaxs_vsvvl", + "llvm.ve.vl.vfmaxs.vvvl" => "__builtin_ve_vl_vfmaxs_vvvl", + "llvm.ve.vl.vfmaxs.vvvmvl" => "__builtin_ve_vl_vfmaxs_vvvmvl", + "llvm.ve.vl.vfmaxs.vvvvl" => "__builtin_ve_vl_vfmaxs_vvvvl", + "llvm.ve.vl.vfmind.vsvl" => "__builtin_ve_vl_vfmind_vsvl", + "llvm.ve.vl.vfmind.vsvmvl" => "__builtin_ve_vl_vfmind_vsvmvl", + "llvm.ve.vl.vfmind.vsvvl" => "__builtin_ve_vl_vfmind_vsvvl", + "llvm.ve.vl.vfmind.vvvl" => "__builtin_ve_vl_vfmind_vvvl", + "llvm.ve.vl.vfmind.vvvmvl" => "__builtin_ve_vl_vfmind_vvvmvl", + "llvm.ve.vl.vfmind.vvvvl" => "__builtin_ve_vl_vfmind_vvvvl", + "llvm.ve.vl.vfmins.vsvl" => "__builtin_ve_vl_vfmins_vsvl", + "llvm.ve.vl.vfmins.vsvmvl" => "__builtin_ve_vl_vfmins_vsvmvl", + "llvm.ve.vl.vfmins.vsvvl" => "__builtin_ve_vl_vfmins_vsvvl", + "llvm.ve.vl.vfmins.vvvl" => "__builtin_ve_vl_vfmins_vvvl", + "llvm.ve.vl.vfmins.vvvmvl" => "__builtin_ve_vl_vfmins_vvvmvl", + "llvm.ve.vl.vfmins.vvvvl" => "__builtin_ve_vl_vfmins_vvvvl", + "llvm.ve.vl.vfmkdeq.mvl" => "__builtin_ve_vl_vfmkdeq_mvl", + "llvm.ve.vl.vfmkdeq.mvml" => "__builtin_ve_vl_vfmkdeq_mvml", + "llvm.ve.vl.vfmkdeqnan.mvl" => "__builtin_ve_vl_vfmkdeqnan_mvl", + "llvm.ve.vl.vfmkdeqnan.mvml" => "__builtin_ve_vl_vfmkdeqnan_mvml", + "llvm.ve.vl.vfmkdge.mvl" => "__builtin_ve_vl_vfmkdge_mvl", + "llvm.ve.vl.vfmkdge.mvml" => "__builtin_ve_vl_vfmkdge_mvml", + "llvm.ve.vl.vfmkdgenan.mvl" => "__builtin_ve_vl_vfmkdgenan_mvl", + "llvm.ve.vl.vfmkdgenan.mvml" => "__builtin_ve_vl_vfmkdgenan_mvml", + "llvm.ve.vl.vfmkdgt.mvl" => "__builtin_ve_vl_vfmkdgt_mvl", + "llvm.ve.vl.vfmkdgt.mvml" => "__builtin_ve_vl_vfmkdgt_mvml", + "llvm.ve.vl.vfmkdgtnan.mvl" => "__builtin_ve_vl_vfmkdgtnan_mvl", + "llvm.ve.vl.vfmkdgtnan.mvml" => "__builtin_ve_vl_vfmkdgtnan_mvml", + "llvm.ve.vl.vfmkdle.mvl" => "__builtin_ve_vl_vfmkdle_mvl", + "llvm.ve.vl.vfmkdle.mvml" => "__builtin_ve_vl_vfmkdle_mvml", + "llvm.ve.vl.vfmkdlenan.mvl" => "__builtin_ve_vl_vfmkdlenan_mvl", + "llvm.ve.vl.vfmkdlenan.mvml" => "__builtin_ve_vl_vfmkdlenan_mvml", + "llvm.ve.vl.vfmkdlt.mvl" => "__builtin_ve_vl_vfmkdlt_mvl", + "llvm.ve.vl.vfmkdlt.mvml" => "__builtin_ve_vl_vfmkdlt_mvml", + "llvm.ve.vl.vfmkdltnan.mvl" => "__builtin_ve_vl_vfmkdltnan_mvl", + "llvm.ve.vl.vfmkdltnan.mvml" => "__builtin_ve_vl_vfmkdltnan_mvml", + "llvm.ve.vl.vfmkdnan.mvl" => "__builtin_ve_vl_vfmkdnan_mvl", + "llvm.ve.vl.vfmkdnan.mvml" => "__builtin_ve_vl_vfmkdnan_mvml", + "llvm.ve.vl.vfmkdne.mvl" => "__builtin_ve_vl_vfmkdne_mvl", + "llvm.ve.vl.vfmkdne.mvml" => "__builtin_ve_vl_vfmkdne_mvml", + "llvm.ve.vl.vfmkdnenan.mvl" => "__builtin_ve_vl_vfmkdnenan_mvl", + "llvm.ve.vl.vfmkdnenan.mvml" => "__builtin_ve_vl_vfmkdnenan_mvml", + "llvm.ve.vl.vfmkdnum.mvl" => "__builtin_ve_vl_vfmkdnum_mvl", + "llvm.ve.vl.vfmkdnum.mvml" => "__builtin_ve_vl_vfmkdnum_mvml", + "llvm.ve.vl.vfmklaf.ml" => "__builtin_ve_vl_vfmklaf_ml", + "llvm.ve.vl.vfmklat.ml" => "__builtin_ve_vl_vfmklat_ml", + "llvm.ve.vl.vfmkleq.mvl" => "__builtin_ve_vl_vfmkleq_mvl", + "llvm.ve.vl.vfmkleq.mvml" => "__builtin_ve_vl_vfmkleq_mvml", + "llvm.ve.vl.vfmkleqnan.mvl" => "__builtin_ve_vl_vfmkleqnan_mvl", + "llvm.ve.vl.vfmkleqnan.mvml" => "__builtin_ve_vl_vfmkleqnan_mvml", + "llvm.ve.vl.vfmklge.mvl" => "__builtin_ve_vl_vfmklge_mvl", + "llvm.ve.vl.vfmklge.mvml" => "__builtin_ve_vl_vfmklge_mvml", + "llvm.ve.vl.vfmklgenan.mvl" => "__builtin_ve_vl_vfmklgenan_mvl", + "llvm.ve.vl.vfmklgenan.mvml" => "__builtin_ve_vl_vfmklgenan_mvml", + "llvm.ve.vl.vfmklgt.mvl" => "__builtin_ve_vl_vfmklgt_mvl", + "llvm.ve.vl.vfmklgt.mvml" => "__builtin_ve_vl_vfmklgt_mvml", + "llvm.ve.vl.vfmklgtnan.mvl" => "__builtin_ve_vl_vfmklgtnan_mvl", + "llvm.ve.vl.vfmklgtnan.mvml" => "__builtin_ve_vl_vfmklgtnan_mvml", + "llvm.ve.vl.vfmklle.mvl" => "__builtin_ve_vl_vfmklle_mvl", + "llvm.ve.vl.vfmklle.mvml" => "__builtin_ve_vl_vfmklle_mvml", + "llvm.ve.vl.vfmkllenan.mvl" => "__builtin_ve_vl_vfmkllenan_mvl", + "llvm.ve.vl.vfmkllenan.mvml" => "__builtin_ve_vl_vfmkllenan_mvml", + "llvm.ve.vl.vfmkllt.mvl" => "__builtin_ve_vl_vfmkllt_mvl", + "llvm.ve.vl.vfmkllt.mvml" => "__builtin_ve_vl_vfmkllt_mvml", + "llvm.ve.vl.vfmklltnan.mvl" => "__builtin_ve_vl_vfmklltnan_mvl", + "llvm.ve.vl.vfmklltnan.mvml" => "__builtin_ve_vl_vfmklltnan_mvml", + "llvm.ve.vl.vfmklnan.mvl" => "__builtin_ve_vl_vfmklnan_mvl", + "llvm.ve.vl.vfmklnan.mvml" => "__builtin_ve_vl_vfmklnan_mvml", + "llvm.ve.vl.vfmklne.mvl" => "__builtin_ve_vl_vfmklne_mvl", + "llvm.ve.vl.vfmklne.mvml" => "__builtin_ve_vl_vfmklne_mvml", + "llvm.ve.vl.vfmklnenan.mvl" => "__builtin_ve_vl_vfmklnenan_mvl", + "llvm.ve.vl.vfmklnenan.mvml" => "__builtin_ve_vl_vfmklnenan_mvml", + "llvm.ve.vl.vfmklnum.mvl" => "__builtin_ve_vl_vfmklnum_mvl", + "llvm.ve.vl.vfmklnum.mvml" => "__builtin_ve_vl_vfmklnum_mvml", + "llvm.ve.vl.vfmkseq.mvl" => "__builtin_ve_vl_vfmkseq_mvl", + "llvm.ve.vl.vfmkseq.mvml" => "__builtin_ve_vl_vfmkseq_mvml", + "llvm.ve.vl.vfmkseqnan.mvl" => "__builtin_ve_vl_vfmkseqnan_mvl", + "llvm.ve.vl.vfmkseqnan.mvml" => "__builtin_ve_vl_vfmkseqnan_mvml", + "llvm.ve.vl.vfmksge.mvl" => "__builtin_ve_vl_vfmksge_mvl", + "llvm.ve.vl.vfmksge.mvml" => "__builtin_ve_vl_vfmksge_mvml", + "llvm.ve.vl.vfmksgenan.mvl" => "__builtin_ve_vl_vfmksgenan_mvl", + "llvm.ve.vl.vfmksgenan.mvml" => "__builtin_ve_vl_vfmksgenan_mvml", + "llvm.ve.vl.vfmksgt.mvl" => "__builtin_ve_vl_vfmksgt_mvl", + "llvm.ve.vl.vfmksgt.mvml" => "__builtin_ve_vl_vfmksgt_mvml", + "llvm.ve.vl.vfmksgtnan.mvl" => "__builtin_ve_vl_vfmksgtnan_mvl", + "llvm.ve.vl.vfmksgtnan.mvml" => "__builtin_ve_vl_vfmksgtnan_mvml", + "llvm.ve.vl.vfmksle.mvl" => "__builtin_ve_vl_vfmksle_mvl", + "llvm.ve.vl.vfmksle.mvml" => "__builtin_ve_vl_vfmksle_mvml", + "llvm.ve.vl.vfmkslenan.mvl" => "__builtin_ve_vl_vfmkslenan_mvl", + "llvm.ve.vl.vfmkslenan.mvml" => "__builtin_ve_vl_vfmkslenan_mvml", + "llvm.ve.vl.vfmkslt.mvl" => "__builtin_ve_vl_vfmkslt_mvl", + "llvm.ve.vl.vfmkslt.mvml" => "__builtin_ve_vl_vfmkslt_mvml", + "llvm.ve.vl.vfmksltnan.mvl" => "__builtin_ve_vl_vfmksltnan_mvl", + "llvm.ve.vl.vfmksltnan.mvml" => "__builtin_ve_vl_vfmksltnan_mvml", + "llvm.ve.vl.vfmksnan.mvl" => "__builtin_ve_vl_vfmksnan_mvl", + "llvm.ve.vl.vfmksnan.mvml" => "__builtin_ve_vl_vfmksnan_mvml", + "llvm.ve.vl.vfmksne.mvl" => "__builtin_ve_vl_vfmksne_mvl", + "llvm.ve.vl.vfmksne.mvml" => "__builtin_ve_vl_vfmksne_mvml", + "llvm.ve.vl.vfmksnenan.mvl" => "__builtin_ve_vl_vfmksnenan_mvl", + "llvm.ve.vl.vfmksnenan.mvml" => "__builtin_ve_vl_vfmksnenan_mvml", + "llvm.ve.vl.vfmksnum.mvl" => "__builtin_ve_vl_vfmksnum_mvl", + "llvm.ve.vl.vfmksnum.mvml" => "__builtin_ve_vl_vfmksnum_mvml", + "llvm.ve.vl.vfmkweq.mvl" => "__builtin_ve_vl_vfmkweq_mvl", + "llvm.ve.vl.vfmkweq.mvml" => "__builtin_ve_vl_vfmkweq_mvml", + "llvm.ve.vl.vfmkweqnan.mvl" => "__builtin_ve_vl_vfmkweqnan_mvl", + "llvm.ve.vl.vfmkweqnan.mvml" => "__builtin_ve_vl_vfmkweqnan_mvml", + "llvm.ve.vl.vfmkwge.mvl" => "__builtin_ve_vl_vfmkwge_mvl", + "llvm.ve.vl.vfmkwge.mvml" => "__builtin_ve_vl_vfmkwge_mvml", + "llvm.ve.vl.vfmkwgenan.mvl" => "__builtin_ve_vl_vfmkwgenan_mvl", + "llvm.ve.vl.vfmkwgenan.mvml" => "__builtin_ve_vl_vfmkwgenan_mvml", + "llvm.ve.vl.vfmkwgt.mvl" => "__builtin_ve_vl_vfmkwgt_mvl", + "llvm.ve.vl.vfmkwgt.mvml" => "__builtin_ve_vl_vfmkwgt_mvml", + "llvm.ve.vl.vfmkwgtnan.mvl" => "__builtin_ve_vl_vfmkwgtnan_mvl", + "llvm.ve.vl.vfmkwgtnan.mvml" => "__builtin_ve_vl_vfmkwgtnan_mvml", + "llvm.ve.vl.vfmkwle.mvl" => "__builtin_ve_vl_vfmkwle_mvl", + "llvm.ve.vl.vfmkwle.mvml" => "__builtin_ve_vl_vfmkwle_mvml", + "llvm.ve.vl.vfmkwlenan.mvl" => "__builtin_ve_vl_vfmkwlenan_mvl", + "llvm.ve.vl.vfmkwlenan.mvml" => "__builtin_ve_vl_vfmkwlenan_mvml", + "llvm.ve.vl.vfmkwlt.mvl" => "__builtin_ve_vl_vfmkwlt_mvl", + "llvm.ve.vl.vfmkwlt.mvml" => "__builtin_ve_vl_vfmkwlt_mvml", + "llvm.ve.vl.vfmkwltnan.mvl" => "__builtin_ve_vl_vfmkwltnan_mvl", + "llvm.ve.vl.vfmkwltnan.mvml" => "__builtin_ve_vl_vfmkwltnan_mvml", + "llvm.ve.vl.vfmkwnan.mvl" => "__builtin_ve_vl_vfmkwnan_mvl", + "llvm.ve.vl.vfmkwnan.mvml" => "__builtin_ve_vl_vfmkwnan_mvml", + "llvm.ve.vl.vfmkwne.mvl" => "__builtin_ve_vl_vfmkwne_mvl", + "llvm.ve.vl.vfmkwne.mvml" => "__builtin_ve_vl_vfmkwne_mvml", + "llvm.ve.vl.vfmkwnenan.mvl" => "__builtin_ve_vl_vfmkwnenan_mvl", + "llvm.ve.vl.vfmkwnenan.mvml" => "__builtin_ve_vl_vfmkwnenan_mvml", + "llvm.ve.vl.vfmkwnum.mvl" => "__builtin_ve_vl_vfmkwnum_mvl", + "llvm.ve.vl.vfmkwnum.mvml" => "__builtin_ve_vl_vfmkwnum_mvml", + "llvm.ve.vl.vfmsbd.vsvvl" => "__builtin_ve_vl_vfmsbd_vsvvl", + "llvm.ve.vl.vfmsbd.vsvvmvl" => "__builtin_ve_vl_vfmsbd_vsvvmvl", + "llvm.ve.vl.vfmsbd.vsvvvl" => "__builtin_ve_vl_vfmsbd_vsvvvl", + "llvm.ve.vl.vfmsbd.vvsvl" => "__builtin_ve_vl_vfmsbd_vvsvl", + "llvm.ve.vl.vfmsbd.vvsvmvl" => "__builtin_ve_vl_vfmsbd_vvsvmvl", + "llvm.ve.vl.vfmsbd.vvsvvl" => "__builtin_ve_vl_vfmsbd_vvsvvl", + "llvm.ve.vl.vfmsbd.vvvvl" => "__builtin_ve_vl_vfmsbd_vvvvl", + "llvm.ve.vl.vfmsbd.vvvvmvl" => "__builtin_ve_vl_vfmsbd_vvvvmvl", + "llvm.ve.vl.vfmsbd.vvvvvl" => "__builtin_ve_vl_vfmsbd_vvvvvl", + "llvm.ve.vl.vfmsbs.vsvvl" => "__builtin_ve_vl_vfmsbs_vsvvl", + "llvm.ve.vl.vfmsbs.vsvvmvl" => "__builtin_ve_vl_vfmsbs_vsvvmvl", + "llvm.ve.vl.vfmsbs.vsvvvl" => "__builtin_ve_vl_vfmsbs_vsvvvl", + "llvm.ve.vl.vfmsbs.vvsvl" => "__builtin_ve_vl_vfmsbs_vvsvl", + "llvm.ve.vl.vfmsbs.vvsvmvl" => "__builtin_ve_vl_vfmsbs_vvsvmvl", + "llvm.ve.vl.vfmsbs.vvsvvl" => "__builtin_ve_vl_vfmsbs_vvsvvl", + "llvm.ve.vl.vfmsbs.vvvvl" => "__builtin_ve_vl_vfmsbs_vvvvl", + "llvm.ve.vl.vfmsbs.vvvvmvl" => "__builtin_ve_vl_vfmsbs_vvvvmvl", + "llvm.ve.vl.vfmsbs.vvvvvl" => "__builtin_ve_vl_vfmsbs_vvvvvl", + "llvm.ve.vl.vfmuld.vsvl" => "__builtin_ve_vl_vfmuld_vsvl", + "llvm.ve.vl.vfmuld.vsvmvl" => "__builtin_ve_vl_vfmuld_vsvmvl", + "llvm.ve.vl.vfmuld.vsvvl" => "__builtin_ve_vl_vfmuld_vsvvl", + "llvm.ve.vl.vfmuld.vvvl" => "__builtin_ve_vl_vfmuld_vvvl", + "llvm.ve.vl.vfmuld.vvvmvl" => "__builtin_ve_vl_vfmuld_vvvmvl", + "llvm.ve.vl.vfmuld.vvvvl" => "__builtin_ve_vl_vfmuld_vvvvl", + "llvm.ve.vl.vfmuls.vsvl" => "__builtin_ve_vl_vfmuls_vsvl", + "llvm.ve.vl.vfmuls.vsvmvl" => "__builtin_ve_vl_vfmuls_vsvmvl", + "llvm.ve.vl.vfmuls.vsvvl" => "__builtin_ve_vl_vfmuls_vsvvl", + "llvm.ve.vl.vfmuls.vvvl" => "__builtin_ve_vl_vfmuls_vvvl", + "llvm.ve.vl.vfmuls.vvvmvl" => "__builtin_ve_vl_vfmuls_vvvmvl", + "llvm.ve.vl.vfmuls.vvvvl" => "__builtin_ve_vl_vfmuls_vvvvl", + "llvm.ve.vl.vfnmadd.vsvvl" => "__builtin_ve_vl_vfnmadd_vsvvl", + "llvm.ve.vl.vfnmadd.vsvvmvl" => "__builtin_ve_vl_vfnmadd_vsvvmvl", + "llvm.ve.vl.vfnmadd.vsvvvl" => "__builtin_ve_vl_vfnmadd_vsvvvl", + "llvm.ve.vl.vfnmadd.vvsvl" => "__builtin_ve_vl_vfnmadd_vvsvl", + "llvm.ve.vl.vfnmadd.vvsvmvl" => "__builtin_ve_vl_vfnmadd_vvsvmvl", + "llvm.ve.vl.vfnmadd.vvsvvl" => "__builtin_ve_vl_vfnmadd_vvsvvl", + "llvm.ve.vl.vfnmadd.vvvvl" => "__builtin_ve_vl_vfnmadd_vvvvl", + "llvm.ve.vl.vfnmadd.vvvvmvl" => "__builtin_ve_vl_vfnmadd_vvvvmvl", + "llvm.ve.vl.vfnmadd.vvvvvl" => "__builtin_ve_vl_vfnmadd_vvvvvl", + "llvm.ve.vl.vfnmads.vsvvl" => "__builtin_ve_vl_vfnmads_vsvvl", + "llvm.ve.vl.vfnmads.vsvvmvl" => "__builtin_ve_vl_vfnmads_vsvvmvl", + "llvm.ve.vl.vfnmads.vsvvvl" => "__builtin_ve_vl_vfnmads_vsvvvl", + "llvm.ve.vl.vfnmads.vvsvl" => "__builtin_ve_vl_vfnmads_vvsvl", + "llvm.ve.vl.vfnmads.vvsvmvl" => "__builtin_ve_vl_vfnmads_vvsvmvl", + "llvm.ve.vl.vfnmads.vvsvvl" => "__builtin_ve_vl_vfnmads_vvsvvl", + "llvm.ve.vl.vfnmads.vvvvl" => "__builtin_ve_vl_vfnmads_vvvvl", + "llvm.ve.vl.vfnmads.vvvvmvl" => "__builtin_ve_vl_vfnmads_vvvvmvl", + "llvm.ve.vl.vfnmads.vvvvvl" => "__builtin_ve_vl_vfnmads_vvvvvl", + "llvm.ve.vl.vfnmsbd.vsvvl" => "__builtin_ve_vl_vfnmsbd_vsvvl", + "llvm.ve.vl.vfnmsbd.vsvvmvl" => "__builtin_ve_vl_vfnmsbd_vsvvmvl", + "llvm.ve.vl.vfnmsbd.vsvvvl" => "__builtin_ve_vl_vfnmsbd_vsvvvl", + "llvm.ve.vl.vfnmsbd.vvsvl" => "__builtin_ve_vl_vfnmsbd_vvsvl", + "llvm.ve.vl.vfnmsbd.vvsvmvl" => "__builtin_ve_vl_vfnmsbd_vvsvmvl", + "llvm.ve.vl.vfnmsbd.vvsvvl" => "__builtin_ve_vl_vfnmsbd_vvsvvl", + "llvm.ve.vl.vfnmsbd.vvvvl" => "__builtin_ve_vl_vfnmsbd_vvvvl", + "llvm.ve.vl.vfnmsbd.vvvvmvl" => "__builtin_ve_vl_vfnmsbd_vvvvmvl", + "llvm.ve.vl.vfnmsbd.vvvvvl" => "__builtin_ve_vl_vfnmsbd_vvvvvl", + "llvm.ve.vl.vfnmsbs.vsvvl" => "__builtin_ve_vl_vfnmsbs_vsvvl", + "llvm.ve.vl.vfnmsbs.vsvvmvl" => "__builtin_ve_vl_vfnmsbs_vsvvmvl", + "llvm.ve.vl.vfnmsbs.vsvvvl" => "__builtin_ve_vl_vfnmsbs_vsvvvl", + "llvm.ve.vl.vfnmsbs.vvsvl" => "__builtin_ve_vl_vfnmsbs_vvsvl", + "llvm.ve.vl.vfnmsbs.vvsvmvl" => "__builtin_ve_vl_vfnmsbs_vvsvmvl", + "llvm.ve.vl.vfnmsbs.vvsvvl" => "__builtin_ve_vl_vfnmsbs_vvsvvl", + "llvm.ve.vl.vfnmsbs.vvvvl" => "__builtin_ve_vl_vfnmsbs_vvvvl", + "llvm.ve.vl.vfnmsbs.vvvvmvl" => "__builtin_ve_vl_vfnmsbs_vvvvmvl", + "llvm.ve.vl.vfnmsbs.vvvvvl" => "__builtin_ve_vl_vfnmsbs_vvvvvl", + "llvm.ve.vl.vfrmaxdfst.vvl" => "__builtin_ve_vl_vfrmaxdfst_vvl", + "llvm.ve.vl.vfrmaxdfst.vvvl" => "__builtin_ve_vl_vfrmaxdfst_vvvl", + "llvm.ve.vl.vfrmaxdlst.vvl" => "__builtin_ve_vl_vfrmaxdlst_vvl", + "llvm.ve.vl.vfrmaxdlst.vvvl" => "__builtin_ve_vl_vfrmaxdlst_vvvl", + "llvm.ve.vl.vfrmaxsfst.vvl" => "__builtin_ve_vl_vfrmaxsfst_vvl", + "llvm.ve.vl.vfrmaxsfst.vvvl" => "__builtin_ve_vl_vfrmaxsfst_vvvl", + "llvm.ve.vl.vfrmaxslst.vvl" => "__builtin_ve_vl_vfrmaxslst_vvl", + "llvm.ve.vl.vfrmaxslst.vvvl" => "__builtin_ve_vl_vfrmaxslst_vvvl", + "llvm.ve.vl.vfrmindfst.vvl" => "__builtin_ve_vl_vfrmindfst_vvl", + "llvm.ve.vl.vfrmindfst.vvvl" => "__builtin_ve_vl_vfrmindfst_vvvl", + "llvm.ve.vl.vfrmindlst.vvl" => "__builtin_ve_vl_vfrmindlst_vvl", + "llvm.ve.vl.vfrmindlst.vvvl" => "__builtin_ve_vl_vfrmindlst_vvvl", + "llvm.ve.vl.vfrminsfst.vvl" => "__builtin_ve_vl_vfrminsfst_vvl", + "llvm.ve.vl.vfrminsfst.vvvl" => "__builtin_ve_vl_vfrminsfst_vvvl", + "llvm.ve.vl.vfrminslst.vvl" => "__builtin_ve_vl_vfrminslst_vvl", + "llvm.ve.vl.vfrminslst.vvvl" => "__builtin_ve_vl_vfrminslst_vvvl", + "llvm.ve.vl.vfsqrtd.vvl" => "__builtin_ve_vl_vfsqrtd_vvl", + "llvm.ve.vl.vfsqrtd.vvvl" => "__builtin_ve_vl_vfsqrtd_vvvl", + "llvm.ve.vl.vfsqrts.vvl" => "__builtin_ve_vl_vfsqrts_vvl", + "llvm.ve.vl.vfsqrts.vvvl" => "__builtin_ve_vl_vfsqrts_vvvl", + "llvm.ve.vl.vfsubd.vsvl" => "__builtin_ve_vl_vfsubd_vsvl", + "llvm.ve.vl.vfsubd.vsvmvl" => "__builtin_ve_vl_vfsubd_vsvmvl", + "llvm.ve.vl.vfsubd.vsvvl" => "__builtin_ve_vl_vfsubd_vsvvl", + "llvm.ve.vl.vfsubd.vvvl" => "__builtin_ve_vl_vfsubd_vvvl", + "llvm.ve.vl.vfsubd.vvvmvl" => "__builtin_ve_vl_vfsubd_vvvmvl", + "llvm.ve.vl.vfsubd.vvvvl" => "__builtin_ve_vl_vfsubd_vvvvl", + "llvm.ve.vl.vfsubs.vsvl" => "__builtin_ve_vl_vfsubs_vsvl", + "llvm.ve.vl.vfsubs.vsvmvl" => "__builtin_ve_vl_vfsubs_vsvmvl", + "llvm.ve.vl.vfsubs.vsvvl" => "__builtin_ve_vl_vfsubs_vsvvl", + "llvm.ve.vl.vfsubs.vvvl" => "__builtin_ve_vl_vfsubs_vvvl", + "llvm.ve.vl.vfsubs.vvvmvl" => "__builtin_ve_vl_vfsubs_vvvmvl", + "llvm.ve.vl.vfsubs.vvvvl" => "__builtin_ve_vl_vfsubs_vvvvl", + "llvm.ve.vl.vfsumd.vvl" => "__builtin_ve_vl_vfsumd_vvl", + "llvm.ve.vl.vfsumd.vvml" => "__builtin_ve_vl_vfsumd_vvml", + "llvm.ve.vl.vfsums.vvl" => "__builtin_ve_vl_vfsums_vvl", + "llvm.ve.vl.vfsums.vvml" => "__builtin_ve_vl_vfsums_vvml", + "llvm.ve.vl.vgt.vvssl" => "__builtin_ve_vl_vgt_vvssl", + "llvm.ve.vl.vgt.vvssml" => "__builtin_ve_vl_vgt_vvssml", + "llvm.ve.vl.vgt.vvssmvl" => "__builtin_ve_vl_vgt_vvssmvl", + "llvm.ve.vl.vgt.vvssvl" => "__builtin_ve_vl_vgt_vvssvl", + "llvm.ve.vl.vgtlsx.vvssl" => "__builtin_ve_vl_vgtlsx_vvssl", + "llvm.ve.vl.vgtlsx.vvssml" => "__builtin_ve_vl_vgtlsx_vvssml", + "llvm.ve.vl.vgtlsx.vvssmvl" => "__builtin_ve_vl_vgtlsx_vvssmvl", + "llvm.ve.vl.vgtlsx.vvssvl" => "__builtin_ve_vl_vgtlsx_vvssvl", + "llvm.ve.vl.vgtlsxnc.vvssl" => "__builtin_ve_vl_vgtlsxnc_vvssl", + "llvm.ve.vl.vgtlsxnc.vvssml" => "__builtin_ve_vl_vgtlsxnc_vvssml", + "llvm.ve.vl.vgtlsxnc.vvssmvl" => "__builtin_ve_vl_vgtlsxnc_vvssmvl", + "llvm.ve.vl.vgtlsxnc.vvssvl" => "__builtin_ve_vl_vgtlsxnc_vvssvl", + "llvm.ve.vl.vgtlzx.vvssl" => "__builtin_ve_vl_vgtlzx_vvssl", + "llvm.ve.vl.vgtlzx.vvssml" => "__builtin_ve_vl_vgtlzx_vvssml", + "llvm.ve.vl.vgtlzx.vvssmvl" => "__builtin_ve_vl_vgtlzx_vvssmvl", + "llvm.ve.vl.vgtlzx.vvssvl" => "__builtin_ve_vl_vgtlzx_vvssvl", + "llvm.ve.vl.vgtlzxnc.vvssl" => "__builtin_ve_vl_vgtlzxnc_vvssl", + "llvm.ve.vl.vgtlzxnc.vvssml" => "__builtin_ve_vl_vgtlzxnc_vvssml", + "llvm.ve.vl.vgtlzxnc.vvssmvl" => "__builtin_ve_vl_vgtlzxnc_vvssmvl", + "llvm.ve.vl.vgtlzxnc.vvssvl" => "__builtin_ve_vl_vgtlzxnc_vvssvl", + "llvm.ve.vl.vgtnc.vvssl" => "__builtin_ve_vl_vgtnc_vvssl", + "llvm.ve.vl.vgtnc.vvssml" => "__builtin_ve_vl_vgtnc_vvssml", + "llvm.ve.vl.vgtnc.vvssmvl" => "__builtin_ve_vl_vgtnc_vvssmvl", + "llvm.ve.vl.vgtnc.vvssvl" => "__builtin_ve_vl_vgtnc_vvssvl", + "llvm.ve.vl.vgtu.vvssl" => "__builtin_ve_vl_vgtu_vvssl", + "llvm.ve.vl.vgtu.vvssml" => "__builtin_ve_vl_vgtu_vvssml", + "llvm.ve.vl.vgtu.vvssmvl" => "__builtin_ve_vl_vgtu_vvssmvl", + "llvm.ve.vl.vgtu.vvssvl" => "__builtin_ve_vl_vgtu_vvssvl", + "llvm.ve.vl.vgtunc.vvssl" => "__builtin_ve_vl_vgtunc_vvssl", + "llvm.ve.vl.vgtunc.vvssml" => "__builtin_ve_vl_vgtunc_vvssml", + "llvm.ve.vl.vgtunc.vvssmvl" => "__builtin_ve_vl_vgtunc_vvssmvl", + "llvm.ve.vl.vgtunc.vvssvl" => "__builtin_ve_vl_vgtunc_vvssvl", + "llvm.ve.vl.vld.vssl" => "__builtin_ve_vl_vld_vssl", + "llvm.ve.vl.vld.vssvl" => "__builtin_ve_vl_vld_vssvl", + "llvm.ve.vl.vld2d.vssl" => "__builtin_ve_vl_vld2d_vssl", + "llvm.ve.vl.vld2d.vssvl" => "__builtin_ve_vl_vld2d_vssvl", + "llvm.ve.vl.vld2dnc.vssl" => "__builtin_ve_vl_vld2dnc_vssl", + "llvm.ve.vl.vld2dnc.vssvl" => "__builtin_ve_vl_vld2dnc_vssvl", + "llvm.ve.vl.vldl2dsx.vssl" => "__builtin_ve_vl_vldl2dsx_vssl", + "llvm.ve.vl.vldl2dsx.vssvl" => "__builtin_ve_vl_vldl2dsx_vssvl", + "llvm.ve.vl.vldl2dsxnc.vssl" => "__builtin_ve_vl_vldl2dsxnc_vssl", + "llvm.ve.vl.vldl2dsxnc.vssvl" => "__builtin_ve_vl_vldl2dsxnc_vssvl", + "llvm.ve.vl.vldl2dzx.vssl" => "__builtin_ve_vl_vldl2dzx_vssl", + "llvm.ve.vl.vldl2dzx.vssvl" => "__builtin_ve_vl_vldl2dzx_vssvl", + "llvm.ve.vl.vldl2dzxnc.vssl" => "__builtin_ve_vl_vldl2dzxnc_vssl", + "llvm.ve.vl.vldl2dzxnc.vssvl" => "__builtin_ve_vl_vldl2dzxnc_vssvl", + "llvm.ve.vl.vldlsx.vssl" => "__builtin_ve_vl_vldlsx_vssl", + "llvm.ve.vl.vldlsx.vssvl" => "__builtin_ve_vl_vldlsx_vssvl", + "llvm.ve.vl.vldlsxnc.vssl" => "__builtin_ve_vl_vldlsxnc_vssl", + "llvm.ve.vl.vldlsxnc.vssvl" => "__builtin_ve_vl_vldlsxnc_vssvl", + "llvm.ve.vl.vldlzx.vssl" => "__builtin_ve_vl_vldlzx_vssl", + "llvm.ve.vl.vldlzx.vssvl" => "__builtin_ve_vl_vldlzx_vssvl", + "llvm.ve.vl.vldlzxnc.vssl" => "__builtin_ve_vl_vldlzxnc_vssl", + "llvm.ve.vl.vldlzxnc.vssvl" => "__builtin_ve_vl_vldlzxnc_vssvl", + "llvm.ve.vl.vldnc.vssl" => "__builtin_ve_vl_vldnc_vssl", + "llvm.ve.vl.vldnc.vssvl" => "__builtin_ve_vl_vldnc_vssvl", + "llvm.ve.vl.vldu.vssl" => "__builtin_ve_vl_vldu_vssl", + "llvm.ve.vl.vldu.vssvl" => "__builtin_ve_vl_vldu_vssvl", + "llvm.ve.vl.vldu2d.vssl" => "__builtin_ve_vl_vldu2d_vssl", + "llvm.ve.vl.vldu2d.vssvl" => "__builtin_ve_vl_vldu2d_vssvl", + "llvm.ve.vl.vldu2dnc.vssl" => "__builtin_ve_vl_vldu2dnc_vssl", + "llvm.ve.vl.vldu2dnc.vssvl" => "__builtin_ve_vl_vldu2dnc_vssvl", + "llvm.ve.vl.vldunc.vssl" => "__builtin_ve_vl_vldunc_vssl", + "llvm.ve.vl.vldunc.vssvl" => "__builtin_ve_vl_vldunc_vssvl", + "llvm.ve.vl.vldz.vvl" => "__builtin_ve_vl_vldz_vvl", + "llvm.ve.vl.vldz.vvmvl" => "__builtin_ve_vl_vldz_vvmvl", + "llvm.ve.vl.vldz.vvvl" => "__builtin_ve_vl_vldz_vvvl", + "llvm.ve.vl.vmaxsl.vsvl" => "__builtin_ve_vl_vmaxsl_vsvl", + "llvm.ve.vl.vmaxsl.vsvmvl" => "__builtin_ve_vl_vmaxsl_vsvmvl", + "llvm.ve.vl.vmaxsl.vsvvl" => "__builtin_ve_vl_vmaxsl_vsvvl", + "llvm.ve.vl.vmaxsl.vvvl" => "__builtin_ve_vl_vmaxsl_vvvl", + "llvm.ve.vl.vmaxsl.vvvmvl" => "__builtin_ve_vl_vmaxsl_vvvmvl", + "llvm.ve.vl.vmaxsl.vvvvl" => "__builtin_ve_vl_vmaxsl_vvvvl", + "llvm.ve.vl.vmaxswsx.vsvl" => "__builtin_ve_vl_vmaxswsx_vsvl", + "llvm.ve.vl.vmaxswsx.vsvmvl" => "__builtin_ve_vl_vmaxswsx_vsvmvl", + "llvm.ve.vl.vmaxswsx.vsvvl" => "__builtin_ve_vl_vmaxswsx_vsvvl", + "llvm.ve.vl.vmaxswsx.vvvl" => "__builtin_ve_vl_vmaxswsx_vvvl", + "llvm.ve.vl.vmaxswsx.vvvmvl" => "__builtin_ve_vl_vmaxswsx_vvvmvl", + "llvm.ve.vl.vmaxswsx.vvvvl" => "__builtin_ve_vl_vmaxswsx_vvvvl", + "llvm.ve.vl.vmaxswzx.vsvl" => "__builtin_ve_vl_vmaxswzx_vsvl", + "llvm.ve.vl.vmaxswzx.vsvmvl" => "__builtin_ve_vl_vmaxswzx_vsvmvl", + "llvm.ve.vl.vmaxswzx.vsvvl" => "__builtin_ve_vl_vmaxswzx_vsvvl", + "llvm.ve.vl.vmaxswzx.vvvl" => "__builtin_ve_vl_vmaxswzx_vvvl", + "llvm.ve.vl.vmaxswzx.vvvmvl" => "__builtin_ve_vl_vmaxswzx_vvvmvl", + "llvm.ve.vl.vmaxswzx.vvvvl" => "__builtin_ve_vl_vmaxswzx_vvvvl", + "llvm.ve.vl.vminsl.vsvl" => "__builtin_ve_vl_vminsl_vsvl", + "llvm.ve.vl.vminsl.vsvmvl" => "__builtin_ve_vl_vminsl_vsvmvl", + "llvm.ve.vl.vminsl.vsvvl" => "__builtin_ve_vl_vminsl_vsvvl", + "llvm.ve.vl.vminsl.vvvl" => "__builtin_ve_vl_vminsl_vvvl", + "llvm.ve.vl.vminsl.vvvmvl" => "__builtin_ve_vl_vminsl_vvvmvl", + "llvm.ve.vl.vminsl.vvvvl" => "__builtin_ve_vl_vminsl_vvvvl", + "llvm.ve.vl.vminswsx.vsvl" => "__builtin_ve_vl_vminswsx_vsvl", + "llvm.ve.vl.vminswsx.vsvmvl" => "__builtin_ve_vl_vminswsx_vsvmvl", + "llvm.ve.vl.vminswsx.vsvvl" => "__builtin_ve_vl_vminswsx_vsvvl", + "llvm.ve.vl.vminswsx.vvvl" => "__builtin_ve_vl_vminswsx_vvvl", + "llvm.ve.vl.vminswsx.vvvmvl" => "__builtin_ve_vl_vminswsx_vvvmvl", + "llvm.ve.vl.vminswsx.vvvvl" => "__builtin_ve_vl_vminswsx_vvvvl", + "llvm.ve.vl.vminswzx.vsvl" => "__builtin_ve_vl_vminswzx_vsvl", + "llvm.ve.vl.vminswzx.vsvmvl" => "__builtin_ve_vl_vminswzx_vsvmvl", + "llvm.ve.vl.vminswzx.vsvvl" => "__builtin_ve_vl_vminswzx_vsvvl", + "llvm.ve.vl.vminswzx.vvvl" => "__builtin_ve_vl_vminswzx_vvvl", + "llvm.ve.vl.vminswzx.vvvmvl" => "__builtin_ve_vl_vminswzx_vvvmvl", + "llvm.ve.vl.vminswzx.vvvvl" => "__builtin_ve_vl_vminswzx_vvvvl", + "llvm.ve.vl.vmrg.vsvml" => "__builtin_ve_vl_vmrg_vsvml", + "llvm.ve.vl.vmrg.vsvmvl" => "__builtin_ve_vl_vmrg_vsvmvl", + "llvm.ve.vl.vmrg.vvvml" => "__builtin_ve_vl_vmrg_vvvml", + "llvm.ve.vl.vmrg.vvvmvl" => "__builtin_ve_vl_vmrg_vvvmvl", + "llvm.ve.vl.vmrgw.vsvMl" => "__builtin_ve_vl_vmrgw_vsvMl", + "llvm.ve.vl.vmrgw.vsvMvl" => "__builtin_ve_vl_vmrgw_vsvMvl", + "llvm.ve.vl.vmrgw.vvvMl" => "__builtin_ve_vl_vmrgw_vvvMl", + "llvm.ve.vl.vmrgw.vvvMvl" => "__builtin_ve_vl_vmrgw_vvvMvl", + "llvm.ve.vl.vmulsl.vsvl" => "__builtin_ve_vl_vmulsl_vsvl", + "llvm.ve.vl.vmulsl.vsvmvl" => "__builtin_ve_vl_vmulsl_vsvmvl", + "llvm.ve.vl.vmulsl.vsvvl" => "__builtin_ve_vl_vmulsl_vsvvl", + "llvm.ve.vl.vmulsl.vvvl" => "__builtin_ve_vl_vmulsl_vvvl", + "llvm.ve.vl.vmulsl.vvvmvl" => "__builtin_ve_vl_vmulsl_vvvmvl", + "llvm.ve.vl.vmulsl.vvvvl" => "__builtin_ve_vl_vmulsl_vvvvl", + "llvm.ve.vl.vmulslw.vsvl" => "__builtin_ve_vl_vmulslw_vsvl", + "llvm.ve.vl.vmulslw.vsvvl" => "__builtin_ve_vl_vmulslw_vsvvl", + "llvm.ve.vl.vmulslw.vvvl" => "__builtin_ve_vl_vmulslw_vvvl", + "llvm.ve.vl.vmulslw.vvvvl" => "__builtin_ve_vl_vmulslw_vvvvl", + "llvm.ve.vl.vmulswsx.vsvl" => "__builtin_ve_vl_vmulswsx_vsvl", + "llvm.ve.vl.vmulswsx.vsvmvl" => "__builtin_ve_vl_vmulswsx_vsvmvl", + "llvm.ve.vl.vmulswsx.vsvvl" => "__builtin_ve_vl_vmulswsx_vsvvl", + "llvm.ve.vl.vmulswsx.vvvl" => "__builtin_ve_vl_vmulswsx_vvvl", + "llvm.ve.vl.vmulswsx.vvvmvl" => "__builtin_ve_vl_vmulswsx_vvvmvl", + "llvm.ve.vl.vmulswsx.vvvvl" => "__builtin_ve_vl_vmulswsx_vvvvl", + "llvm.ve.vl.vmulswzx.vsvl" => "__builtin_ve_vl_vmulswzx_vsvl", + "llvm.ve.vl.vmulswzx.vsvmvl" => "__builtin_ve_vl_vmulswzx_vsvmvl", + "llvm.ve.vl.vmulswzx.vsvvl" => "__builtin_ve_vl_vmulswzx_vsvvl", + "llvm.ve.vl.vmulswzx.vvvl" => "__builtin_ve_vl_vmulswzx_vvvl", + "llvm.ve.vl.vmulswzx.vvvmvl" => "__builtin_ve_vl_vmulswzx_vvvmvl", + "llvm.ve.vl.vmulswzx.vvvvl" => "__builtin_ve_vl_vmulswzx_vvvvl", + "llvm.ve.vl.vmulul.vsvl" => "__builtin_ve_vl_vmulul_vsvl", + "llvm.ve.vl.vmulul.vsvmvl" => "__builtin_ve_vl_vmulul_vsvmvl", + "llvm.ve.vl.vmulul.vsvvl" => "__builtin_ve_vl_vmulul_vsvvl", + "llvm.ve.vl.vmulul.vvvl" => "__builtin_ve_vl_vmulul_vvvl", + "llvm.ve.vl.vmulul.vvvmvl" => "__builtin_ve_vl_vmulul_vvvmvl", + "llvm.ve.vl.vmulul.vvvvl" => "__builtin_ve_vl_vmulul_vvvvl", + "llvm.ve.vl.vmuluw.vsvl" => "__builtin_ve_vl_vmuluw_vsvl", + "llvm.ve.vl.vmuluw.vsvmvl" => "__builtin_ve_vl_vmuluw_vsvmvl", + "llvm.ve.vl.vmuluw.vsvvl" => "__builtin_ve_vl_vmuluw_vsvvl", + "llvm.ve.vl.vmuluw.vvvl" => "__builtin_ve_vl_vmuluw_vvvl", + "llvm.ve.vl.vmuluw.vvvmvl" => "__builtin_ve_vl_vmuluw_vvvmvl", + "llvm.ve.vl.vmuluw.vvvvl" => "__builtin_ve_vl_vmuluw_vvvvl", + "llvm.ve.vl.vmv.vsvl" => "__builtin_ve_vl_vmv_vsvl", + "llvm.ve.vl.vmv.vsvmvl" => "__builtin_ve_vl_vmv_vsvmvl", + "llvm.ve.vl.vmv.vsvvl" => "__builtin_ve_vl_vmv_vsvvl", + "llvm.ve.vl.vor.vsvl" => "__builtin_ve_vl_vor_vsvl", + "llvm.ve.vl.vor.vsvmvl" => "__builtin_ve_vl_vor_vsvmvl", + "llvm.ve.vl.vor.vsvvl" => "__builtin_ve_vl_vor_vsvvl", + "llvm.ve.vl.vor.vvvl" => "__builtin_ve_vl_vor_vvvl", + "llvm.ve.vl.vor.vvvmvl" => "__builtin_ve_vl_vor_vvvmvl", + "llvm.ve.vl.vor.vvvvl" => "__builtin_ve_vl_vor_vvvvl", + "llvm.ve.vl.vpcnt.vvl" => "__builtin_ve_vl_vpcnt_vvl", + "llvm.ve.vl.vpcnt.vvmvl" => "__builtin_ve_vl_vpcnt_vvmvl", + "llvm.ve.vl.vpcnt.vvvl" => "__builtin_ve_vl_vpcnt_vvvl", + "llvm.ve.vl.vrand.vvl" => "__builtin_ve_vl_vrand_vvl", + "llvm.ve.vl.vrand.vvml" => "__builtin_ve_vl_vrand_vvml", + "llvm.ve.vl.vrcpd.vvl" => "__builtin_ve_vl_vrcpd_vvl", + "llvm.ve.vl.vrcpd.vvvl" => "__builtin_ve_vl_vrcpd_vvvl", + "llvm.ve.vl.vrcps.vvl" => "__builtin_ve_vl_vrcps_vvl", + "llvm.ve.vl.vrcps.vvvl" => "__builtin_ve_vl_vrcps_vvvl", + "llvm.ve.vl.vrmaxslfst.vvl" => "__builtin_ve_vl_vrmaxslfst_vvl", + "llvm.ve.vl.vrmaxslfst.vvvl" => "__builtin_ve_vl_vrmaxslfst_vvvl", + "llvm.ve.vl.vrmaxsllst.vvl" => "__builtin_ve_vl_vrmaxsllst_vvl", + "llvm.ve.vl.vrmaxsllst.vvvl" => "__builtin_ve_vl_vrmaxsllst_vvvl", + "llvm.ve.vl.vrmaxswfstsx.vvl" => "__builtin_ve_vl_vrmaxswfstsx_vvl", + "llvm.ve.vl.vrmaxswfstsx.vvvl" => "__builtin_ve_vl_vrmaxswfstsx_vvvl", + "llvm.ve.vl.vrmaxswfstzx.vvl" => "__builtin_ve_vl_vrmaxswfstzx_vvl", + "llvm.ve.vl.vrmaxswfstzx.vvvl" => "__builtin_ve_vl_vrmaxswfstzx_vvvl", + "llvm.ve.vl.vrmaxswlstsx.vvl" => "__builtin_ve_vl_vrmaxswlstsx_vvl", + "llvm.ve.vl.vrmaxswlstsx.vvvl" => "__builtin_ve_vl_vrmaxswlstsx_vvvl", + "llvm.ve.vl.vrmaxswlstzx.vvl" => "__builtin_ve_vl_vrmaxswlstzx_vvl", + "llvm.ve.vl.vrmaxswlstzx.vvvl" => "__builtin_ve_vl_vrmaxswlstzx_vvvl", + "llvm.ve.vl.vrminslfst.vvl" => "__builtin_ve_vl_vrminslfst_vvl", + "llvm.ve.vl.vrminslfst.vvvl" => "__builtin_ve_vl_vrminslfst_vvvl", + "llvm.ve.vl.vrminsllst.vvl" => "__builtin_ve_vl_vrminsllst_vvl", + "llvm.ve.vl.vrminsllst.vvvl" => "__builtin_ve_vl_vrminsllst_vvvl", + "llvm.ve.vl.vrminswfstsx.vvl" => "__builtin_ve_vl_vrminswfstsx_vvl", + "llvm.ve.vl.vrminswfstsx.vvvl" => "__builtin_ve_vl_vrminswfstsx_vvvl", + "llvm.ve.vl.vrminswfstzx.vvl" => "__builtin_ve_vl_vrminswfstzx_vvl", + "llvm.ve.vl.vrminswfstzx.vvvl" => "__builtin_ve_vl_vrminswfstzx_vvvl", + "llvm.ve.vl.vrminswlstsx.vvl" => "__builtin_ve_vl_vrminswlstsx_vvl", + "llvm.ve.vl.vrminswlstsx.vvvl" => "__builtin_ve_vl_vrminswlstsx_vvvl", + "llvm.ve.vl.vrminswlstzx.vvl" => "__builtin_ve_vl_vrminswlstzx_vvl", + "llvm.ve.vl.vrminswlstzx.vvvl" => "__builtin_ve_vl_vrminswlstzx_vvvl", + "llvm.ve.vl.vror.vvl" => "__builtin_ve_vl_vror_vvl", + "llvm.ve.vl.vror.vvml" => "__builtin_ve_vl_vror_vvml", + "llvm.ve.vl.vrsqrtd.vvl" => "__builtin_ve_vl_vrsqrtd_vvl", + "llvm.ve.vl.vrsqrtd.vvvl" => "__builtin_ve_vl_vrsqrtd_vvvl", + "llvm.ve.vl.vrsqrtdnex.vvl" => "__builtin_ve_vl_vrsqrtdnex_vvl", + "llvm.ve.vl.vrsqrtdnex.vvvl" => "__builtin_ve_vl_vrsqrtdnex_vvvl", + "llvm.ve.vl.vrsqrts.vvl" => "__builtin_ve_vl_vrsqrts_vvl", + "llvm.ve.vl.vrsqrts.vvvl" => "__builtin_ve_vl_vrsqrts_vvvl", + "llvm.ve.vl.vrsqrtsnex.vvl" => "__builtin_ve_vl_vrsqrtsnex_vvl", + "llvm.ve.vl.vrsqrtsnex.vvvl" => "__builtin_ve_vl_vrsqrtsnex_vvvl", + "llvm.ve.vl.vrxor.vvl" => "__builtin_ve_vl_vrxor_vvl", + "llvm.ve.vl.vrxor.vvml" => "__builtin_ve_vl_vrxor_vvml", + "llvm.ve.vl.vsc.vvssl" => "__builtin_ve_vl_vsc_vvssl", + "llvm.ve.vl.vsc.vvssml" => "__builtin_ve_vl_vsc_vvssml", + "llvm.ve.vl.vscl.vvssl" => "__builtin_ve_vl_vscl_vvssl", + "llvm.ve.vl.vscl.vvssml" => "__builtin_ve_vl_vscl_vvssml", + "llvm.ve.vl.vsclnc.vvssl" => "__builtin_ve_vl_vsclnc_vvssl", + "llvm.ve.vl.vsclnc.vvssml" => "__builtin_ve_vl_vsclnc_vvssml", + "llvm.ve.vl.vsclncot.vvssl" => "__builtin_ve_vl_vsclncot_vvssl", + "llvm.ve.vl.vsclncot.vvssml" => "__builtin_ve_vl_vsclncot_vvssml", + "llvm.ve.vl.vsclot.vvssl" => "__builtin_ve_vl_vsclot_vvssl", + "llvm.ve.vl.vsclot.vvssml" => "__builtin_ve_vl_vsclot_vvssml", + "llvm.ve.vl.vscnc.vvssl" => "__builtin_ve_vl_vscnc_vvssl", + "llvm.ve.vl.vscnc.vvssml" => "__builtin_ve_vl_vscnc_vvssml", + "llvm.ve.vl.vscncot.vvssl" => "__builtin_ve_vl_vscncot_vvssl", + "llvm.ve.vl.vscncot.vvssml" => "__builtin_ve_vl_vscncot_vvssml", + "llvm.ve.vl.vscot.vvssl" => "__builtin_ve_vl_vscot_vvssl", + "llvm.ve.vl.vscot.vvssml" => "__builtin_ve_vl_vscot_vvssml", + "llvm.ve.vl.vscu.vvssl" => "__builtin_ve_vl_vscu_vvssl", + "llvm.ve.vl.vscu.vvssml" => "__builtin_ve_vl_vscu_vvssml", + "llvm.ve.vl.vscunc.vvssl" => "__builtin_ve_vl_vscunc_vvssl", + "llvm.ve.vl.vscunc.vvssml" => "__builtin_ve_vl_vscunc_vvssml", + "llvm.ve.vl.vscuncot.vvssl" => "__builtin_ve_vl_vscuncot_vvssl", + "llvm.ve.vl.vscuncot.vvssml" => "__builtin_ve_vl_vscuncot_vvssml", + "llvm.ve.vl.vscuot.vvssl" => "__builtin_ve_vl_vscuot_vvssl", + "llvm.ve.vl.vscuot.vvssml" => "__builtin_ve_vl_vscuot_vvssml", + "llvm.ve.vl.vseq.vl" => "__builtin_ve_vl_vseq_vl", + "llvm.ve.vl.vseq.vvl" => "__builtin_ve_vl_vseq_vvl", + "llvm.ve.vl.vsfa.vvssl" => "__builtin_ve_vl_vsfa_vvssl", + "llvm.ve.vl.vsfa.vvssmvl" => "__builtin_ve_vl_vsfa_vvssmvl", + "llvm.ve.vl.vsfa.vvssvl" => "__builtin_ve_vl_vsfa_vvssvl", + "llvm.ve.vl.vshf.vvvsl" => "__builtin_ve_vl_vshf_vvvsl", + "llvm.ve.vl.vshf.vvvsvl" => "__builtin_ve_vl_vshf_vvvsvl", + "llvm.ve.vl.vslal.vvsl" => "__builtin_ve_vl_vslal_vvsl", + "llvm.ve.vl.vslal.vvsmvl" => "__builtin_ve_vl_vslal_vvsmvl", + "llvm.ve.vl.vslal.vvsvl" => "__builtin_ve_vl_vslal_vvsvl", + "llvm.ve.vl.vslal.vvvl" => "__builtin_ve_vl_vslal_vvvl", + "llvm.ve.vl.vslal.vvvmvl" => "__builtin_ve_vl_vslal_vvvmvl", + "llvm.ve.vl.vslal.vvvvl" => "__builtin_ve_vl_vslal_vvvvl", + "llvm.ve.vl.vslawsx.vvsl" => "__builtin_ve_vl_vslawsx_vvsl", + "llvm.ve.vl.vslawsx.vvsmvl" => "__builtin_ve_vl_vslawsx_vvsmvl", + "llvm.ve.vl.vslawsx.vvsvl" => "__builtin_ve_vl_vslawsx_vvsvl", + "llvm.ve.vl.vslawsx.vvvl" => "__builtin_ve_vl_vslawsx_vvvl", + "llvm.ve.vl.vslawsx.vvvmvl" => "__builtin_ve_vl_vslawsx_vvvmvl", + "llvm.ve.vl.vslawsx.vvvvl" => "__builtin_ve_vl_vslawsx_vvvvl", + "llvm.ve.vl.vslawzx.vvsl" => "__builtin_ve_vl_vslawzx_vvsl", + "llvm.ve.vl.vslawzx.vvsmvl" => "__builtin_ve_vl_vslawzx_vvsmvl", + "llvm.ve.vl.vslawzx.vvsvl" => "__builtin_ve_vl_vslawzx_vvsvl", + "llvm.ve.vl.vslawzx.vvvl" => "__builtin_ve_vl_vslawzx_vvvl", + "llvm.ve.vl.vslawzx.vvvmvl" => "__builtin_ve_vl_vslawzx_vvvmvl", + "llvm.ve.vl.vslawzx.vvvvl" => "__builtin_ve_vl_vslawzx_vvvvl", + "llvm.ve.vl.vsll.vvsl" => "__builtin_ve_vl_vsll_vvsl", + "llvm.ve.vl.vsll.vvsmvl" => "__builtin_ve_vl_vsll_vvsmvl", + "llvm.ve.vl.vsll.vvsvl" => "__builtin_ve_vl_vsll_vvsvl", + "llvm.ve.vl.vsll.vvvl" => "__builtin_ve_vl_vsll_vvvl", + "llvm.ve.vl.vsll.vvvmvl" => "__builtin_ve_vl_vsll_vvvmvl", + "llvm.ve.vl.vsll.vvvvl" => "__builtin_ve_vl_vsll_vvvvl", + "llvm.ve.vl.vsral.vvsl" => "__builtin_ve_vl_vsral_vvsl", + "llvm.ve.vl.vsral.vvsmvl" => "__builtin_ve_vl_vsral_vvsmvl", + "llvm.ve.vl.vsral.vvsvl" => "__builtin_ve_vl_vsral_vvsvl", + "llvm.ve.vl.vsral.vvvl" => "__builtin_ve_vl_vsral_vvvl", + "llvm.ve.vl.vsral.vvvmvl" => "__builtin_ve_vl_vsral_vvvmvl", + "llvm.ve.vl.vsral.vvvvl" => "__builtin_ve_vl_vsral_vvvvl", + "llvm.ve.vl.vsrawsx.vvsl" => "__builtin_ve_vl_vsrawsx_vvsl", + "llvm.ve.vl.vsrawsx.vvsmvl" => "__builtin_ve_vl_vsrawsx_vvsmvl", + "llvm.ve.vl.vsrawsx.vvsvl" => "__builtin_ve_vl_vsrawsx_vvsvl", + "llvm.ve.vl.vsrawsx.vvvl" => "__builtin_ve_vl_vsrawsx_vvvl", + "llvm.ve.vl.vsrawsx.vvvmvl" => "__builtin_ve_vl_vsrawsx_vvvmvl", + "llvm.ve.vl.vsrawsx.vvvvl" => "__builtin_ve_vl_vsrawsx_vvvvl", + "llvm.ve.vl.vsrawzx.vvsl" => "__builtin_ve_vl_vsrawzx_vvsl", + "llvm.ve.vl.vsrawzx.vvsmvl" => "__builtin_ve_vl_vsrawzx_vvsmvl", + "llvm.ve.vl.vsrawzx.vvsvl" => "__builtin_ve_vl_vsrawzx_vvsvl", + "llvm.ve.vl.vsrawzx.vvvl" => "__builtin_ve_vl_vsrawzx_vvvl", + "llvm.ve.vl.vsrawzx.vvvmvl" => "__builtin_ve_vl_vsrawzx_vvvmvl", + "llvm.ve.vl.vsrawzx.vvvvl" => "__builtin_ve_vl_vsrawzx_vvvvl", + "llvm.ve.vl.vsrl.vvsl" => "__builtin_ve_vl_vsrl_vvsl", + "llvm.ve.vl.vsrl.vvsmvl" => "__builtin_ve_vl_vsrl_vvsmvl", + "llvm.ve.vl.vsrl.vvsvl" => "__builtin_ve_vl_vsrl_vvsvl", + "llvm.ve.vl.vsrl.vvvl" => "__builtin_ve_vl_vsrl_vvvl", + "llvm.ve.vl.vsrl.vvvmvl" => "__builtin_ve_vl_vsrl_vvvmvl", + "llvm.ve.vl.vsrl.vvvvl" => "__builtin_ve_vl_vsrl_vvvvl", + "llvm.ve.vl.vst.vssl" => "__builtin_ve_vl_vst_vssl", + "llvm.ve.vl.vst.vssml" => "__builtin_ve_vl_vst_vssml", + "llvm.ve.vl.vst2d.vssl" => "__builtin_ve_vl_vst2d_vssl", + "llvm.ve.vl.vst2d.vssml" => "__builtin_ve_vl_vst2d_vssml", + "llvm.ve.vl.vst2dnc.vssl" => "__builtin_ve_vl_vst2dnc_vssl", + "llvm.ve.vl.vst2dnc.vssml" => "__builtin_ve_vl_vst2dnc_vssml", + "llvm.ve.vl.vst2dncot.vssl" => "__builtin_ve_vl_vst2dncot_vssl", + "llvm.ve.vl.vst2dncot.vssml" => "__builtin_ve_vl_vst2dncot_vssml", + "llvm.ve.vl.vst2dot.vssl" => "__builtin_ve_vl_vst2dot_vssl", + "llvm.ve.vl.vst2dot.vssml" => "__builtin_ve_vl_vst2dot_vssml", + "llvm.ve.vl.vstl.vssl" => "__builtin_ve_vl_vstl_vssl", + "llvm.ve.vl.vstl.vssml" => "__builtin_ve_vl_vstl_vssml", + "llvm.ve.vl.vstl2d.vssl" => "__builtin_ve_vl_vstl2d_vssl", + "llvm.ve.vl.vstl2d.vssml" => "__builtin_ve_vl_vstl2d_vssml", + "llvm.ve.vl.vstl2dnc.vssl" => "__builtin_ve_vl_vstl2dnc_vssl", + "llvm.ve.vl.vstl2dnc.vssml" => "__builtin_ve_vl_vstl2dnc_vssml", + "llvm.ve.vl.vstl2dncot.vssl" => "__builtin_ve_vl_vstl2dncot_vssl", + "llvm.ve.vl.vstl2dncot.vssml" => "__builtin_ve_vl_vstl2dncot_vssml", + "llvm.ve.vl.vstl2dot.vssl" => "__builtin_ve_vl_vstl2dot_vssl", + "llvm.ve.vl.vstl2dot.vssml" => "__builtin_ve_vl_vstl2dot_vssml", + "llvm.ve.vl.vstlnc.vssl" => "__builtin_ve_vl_vstlnc_vssl", + "llvm.ve.vl.vstlnc.vssml" => "__builtin_ve_vl_vstlnc_vssml", + "llvm.ve.vl.vstlncot.vssl" => "__builtin_ve_vl_vstlncot_vssl", + "llvm.ve.vl.vstlncot.vssml" => "__builtin_ve_vl_vstlncot_vssml", + "llvm.ve.vl.vstlot.vssl" => "__builtin_ve_vl_vstlot_vssl", + "llvm.ve.vl.vstlot.vssml" => "__builtin_ve_vl_vstlot_vssml", + "llvm.ve.vl.vstnc.vssl" => "__builtin_ve_vl_vstnc_vssl", + "llvm.ve.vl.vstnc.vssml" => "__builtin_ve_vl_vstnc_vssml", + "llvm.ve.vl.vstncot.vssl" => "__builtin_ve_vl_vstncot_vssl", + "llvm.ve.vl.vstncot.vssml" => "__builtin_ve_vl_vstncot_vssml", + "llvm.ve.vl.vstot.vssl" => "__builtin_ve_vl_vstot_vssl", + "llvm.ve.vl.vstot.vssml" => "__builtin_ve_vl_vstot_vssml", + "llvm.ve.vl.vstu.vssl" => "__builtin_ve_vl_vstu_vssl", + "llvm.ve.vl.vstu.vssml" => "__builtin_ve_vl_vstu_vssml", + "llvm.ve.vl.vstu2d.vssl" => "__builtin_ve_vl_vstu2d_vssl", + "llvm.ve.vl.vstu2d.vssml" => "__builtin_ve_vl_vstu2d_vssml", + "llvm.ve.vl.vstu2dnc.vssl" => "__builtin_ve_vl_vstu2dnc_vssl", + "llvm.ve.vl.vstu2dnc.vssml" => "__builtin_ve_vl_vstu2dnc_vssml", + "llvm.ve.vl.vstu2dncot.vssl" => "__builtin_ve_vl_vstu2dncot_vssl", + "llvm.ve.vl.vstu2dncot.vssml" => "__builtin_ve_vl_vstu2dncot_vssml", + "llvm.ve.vl.vstu2dot.vssl" => "__builtin_ve_vl_vstu2dot_vssl", + "llvm.ve.vl.vstu2dot.vssml" => "__builtin_ve_vl_vstu2dot_vssml", + "llvm.ve.vl.vstunc.vssl" => "__builtin_ve_vl_vstunc_vssl", + "llvm.ve.vl.vstunc.vssml" => "__builtin_ve_vl_vstunc_vssml", + "llvm.ve.vl.vstuncot.vssl" => "__builtin_ve_vl_vstuncot_vssl", + "llvm.ve.vl.vstuncot.vssml" => "__builtin_ve_vl_vstuncot_vssml", + "llvm.ve.vl.vstuot.vssl" => "__builtin_ve_vl_vstuot_vssl", + "llvm.ve.vl.vstuot.vssml" => "__builtin_ve_vl_vstuot_vssml", + "llvm.ve.vl.vsubsl.vsvl" => "__builtin_ve_vl_vsubsl_vsvl", + "llvm.ve.vl.vsubsl.vsvmvl" => "__builtin_ve_vl_vsubsl_vsvmvl", + "llvm.ve.vl.vsubsl.vsvvl" => "__builtin_ve_vl_vsubsl_vsvvl", + "llvm.ve.vl.vsubsl.vvvl" => "__builtin_ve_vl_vsubsl_vvvl", + "llvm.ve.vl.vsubsl.vvvmvl" => "__builtin_ve_vl_vsubsl_vvvmvl", + "llvm.ve.vl.vsubsl.vvvvl" => "__builtin_ve_vl_vsubsl_vvvvl", + "llvm.ve.vl.vsubswsx.vsvl" => "__builtin_ve_vl_vsubswsx_vsvl", + "llvm.ve.vl.vsubswsx.vsvmvl" => "__builtin_ve_vl_vsubswsx_vsvmvl", + "llvm.ve.vl.vsubswsx.vsvvl" => "__builtin_ve_vl_vsubswsx_vsvvl", + "llvm.ve.vl.vsubswsx.vvvl" => "__builtin_ve_vl_vsubswsx_vvvl", + "llvm.ve.vl.vsubswsx.vvvmvl" => "__builtin_ve_vl_vsubswsx_vvvmvl", + "llvm.ve.vl.vsubswsx.vvvvl" => "__builtin_ve_vl_vsubswsx_vvvvl", + "llvm.ve.vl.vsubswzx.vsvl" => "__builtin_ve_vl_vsubswzx_vsvl", + "llvm.ve.vl.vsubswzx.vsvmvl" => "__builtin_ve_vl_vsubswzx_vsvmvl", + "llvm.ve.vl.vsubswzx.vsvvl" => "__builtin_ve_vl_vsubswzx_vsvvl", + "llvm.ve.vl.vsubswzx.vvvl" => "__builtin_ve_vl_vsubswzx_vvvl", + "llvm.ve.vl.vsubswzx.vvvmvl" => "__builtin_ve_vl_vsubswzx_vvvmvl", + "llvm.ve.vl.vsubswzx.vvvvl" => "__builtin_ve_vl_vsubswzx_vvvvl", + "llvm.ve.vl.vsubul.vsvl" => "__builtin_ve_vl_vsubul_vsvl", + "llvm.ve.vl.vsubul.vsvmvl" => "__builtin_ve_vl_vsubul_vsvmvl", + "llvm.ve.vl.vsubul.vsvvl" => "__builtin_ve_vl_vsubul_vsvvl", + "llvm.ve.vl.vsubul.vvvl" => "__builtin_ve_vl_vsubul_vvvl", + "llvm.ve.vl.vsubul.vvvmvl" => "__builtin_ve_vl_vsubul_vvvmvl", + "llvm.ve.vl.vsubul.vvvvl" => "__builtin_ve_vl_vsubul_vvvvl", + "llvm.ve.vl.vsubuw.vsvl" => "__builtin_ve_vl_vsubuw_vsvl", + "llvm.ve.vl.vsubuw.vsvmvl" => "__builtin_ve_vl_vsubuw_vsvmvl", + "llvm.ve.vl.vsubuw.vsvvl" => "__builtin_ve_vl_vsubuw_vsvvl", + "llvm.ve.vl.vsubuw.vvvl" => "__builtin_ve_vl_vsubuw_vvvl", + "llvm.ve.vl.vsubuw.vvvmvl" => "__builtin_ve_vl_vsubuw_vvvmvl", + "llvm.ve.vl.vsubuw.vvvvl" => "__builtin_ve_vl_vsubuw_vvvvl", + "llvm.ve.vl.vsuml.vvl" => "__builtin_ve_vl_vsuml_vvl", + "llvm.ve.vl.vsuml.vvml" => "__builtin_ve_vl_vsuml_vvml", + "llvm.ve.vl.vsumwsx.vvl" => "__builtin_ve_vl_vsumwsx_vvl", + "llvm.ve.vl.vsumwsx.vvml" => "__builtin_ve_vl_vsumwsx_vvml", + "llvm.ve.vl.vsumwzx.vvl" => "__builtin_ve_vl_vsumwzx_vvl", + "llvm.ve.vl.vsumwzx.vvml" => "__builtin_ve_vl_vsumwzx_vvml", + "llvm.ve.vl.vxor.vsvl" => "__builtin_ve_vl_vxor_vsvl", + "llvm.ve.vl.vxor.vsvmvl" => "__builtin_ve_vl_vxor_vsvmvl", + "llvm.ve.vl.vxor.vsvvl" => "__builtin_ve_vl_vxor_vsvvl", + "llvm.ve.vl.vxor.vvvl" => "__builtin_ve_vl_vxor_vvvl", + "llvm.ve.vl.vxor.vvvmvl" => "__builtin_ve_vl_vxor_vvvmvl", + "llvm.ve.vl.vxor.vvvvl" => "__builtin_ve_vl_vxor_vvvvl", + "llvm.ve.vl.xorm.MMM" => "__builtin_ve_vl_xorm_MMM", + "llvm.ve.vl.xorm.mmm" => "__builtin_ve_vl_xorm_mmm", + // x86 + "llvm.x86.3dnow.pavgusb" => "__builtin_ia32_pavgusb", + "llvm.x86.3dnow.pf2id" => "__builtin_ia32_pf2id", + "llvm.x86.3dnow.pfacc" => "__builtin_ia32_pfacc", + "llvm.x86.3dnow.pfadd" => "__builtin_ia32_pfadd", + "llvm.x86.3dnow.pfcmpeq" => "__builtin_ia32_pfcmpeq", + "llvm.x86.3dnow.pfcmpge" => "__builtin_ia32_pfcmpge", + "llvm.x86.3dnow.pfcmpgt" => "__builtin_ia32_pfcmpgt", + "llvm.x86.3dnow.pfmax" => "__builtin_ia32_pfmax", + "llvm.x86.3dnow.pfmin" => "__builtin_ia32_pfmin", + "llvm.x86.3dnow.pfmul" => "__builtin_ia32_pfmul", + "llvm.x86.3dnow.pfrcp" => "__builtin_ia32_pfrcp", + "llvm.x86.3dnow.pfrcpit1" => "__builtin_ia32_pfrcpit1", + "llvm.x86.3dnow.pfrcpit2" => "__builtin_ia32_pfrcpit2", + "llvm.x86.3dnow.pfrsqit1" => "__builtin_ia32_pfrsqit1", + "llvm.x86.3dnow.pfrsqrt" => "__builtin_ia32_pfrsqrt", + "llvm.x86.3dnow.pfsub" => "__builtin_ia32_pfsub", + "llvm.x86.3dnow.pfsubr" => "__builtin_ia32_pfsubr", + "llvm.x86.3dnow.pi2fd" => "__builtin_ia32_pi2fd", + "llvm.x86.3dnow.pmulhrw" => "__builtin_ia32_pmulhrw", + "llvm.x86.3dnowa.pf2iw" => "__builtin_ia32_pf2iw", + "llvm.x86.3dnowa.pfnacc" => "__builtin_ia32_pfnacc", + "llvm.x86.3dnowa.pfpnacc" => "__builtin_ia32_pfpnacc", + "llvm.x86.3dnowa.pi2fw" => "__builtin_ia32_pi2fw", + "llvm.x86.aadd32" => "__builtin_ia32_aadd32", + "llvm.x86.aadd64" => "__builtin_ia32_aadd64", + "llvm.x86.aand32" => "__builtin_ia32_aand32", + "llvm.x86.aand64" => "__builtin_ia32_aand64", + "llvm.x86.addcarry.u32" => "__builtin_ia32_addcarry_u32", + "llvm.x86.addcarry.u64" => "__builtin_ia32_addcarry_u64", + "llvm.x86.addcarryx.u32" => "__builtin_ia32_addcarryx_u32", + "llvm.x86.addcarryx.u64" => "__builtin_ia32_addcarryx_u64", + "llvm.x86.aesni.aesdec" => "__builtin_ia32_aesdec128", + "llvm.x86.aesni.aesdec.256" => "__builtin_ia32_aesdec256", + "llvm.x86.aesni.aesdec.512" => "__builtin_ia32_aesdec512", + "llvm.x86.aesni.aesdeclast" => "__builtin_ia32_aesdeclast128", + "llvm.x86.aesni.aesdeclast.256" => "__builtin_ia32_aesdeclast256", + "llvm.x86.aesni.aesdeclast.512" => "__builtin_ia32_aesdeclast512", + "llvm.x86.aesni.aesenc" => "__builtin_ia32_aesenc128", + "llvm.x86.aesni.aesenc.256" => "__builtin_ia32_aesenc256", + "llvm.x86.aesni.aesenc.512" => "__builtin_ia32_aesenc512", + "llvm.x86.aesni.aesenclast" => "__builtin_ia32_aesenclast128", + "llvm.x86.aesni.aesenclast.256" => "__builtin_ia32_aesenclast256", + "llvm.x86.aesni.aesenclast.512" => "__builtin_ia32_aesenclast512", + "llvm.x86.aesni.aesimc" => "__builtin_ia32_aesimc128", + "llvm.x86.aesni.aeskeygenassist" => "__builtin_ia32_aeskeygenassist128", + "llvm.x86.aor32" => "__builtin_ia32_aor32", + "llvm.x86.aor64" => "__builtin_ia32_aor64", + "llvm.x86.avx.addsub.pd.256" => "__builtin_ia32_addsubpd256", + "llvm.x86.avx.addsub.ps.256" => "__builtin_ia32_addsubps256", + "llvm.x86.avx.blend.pd.256" => "__builtin_ia32_blendpd256", + "llvm.x86.avx.blend.ps.256" => "__builtin_ia32_blendps256", + "llvm.x86.avx.blendv.pd.256" => "__builtin_ia32_blendvpd256", + "llvm.x86.avx.blendv.ps.256" => "__builtin_ia32_blendvps256", + "llvm.x86.avx.cmp.pd.256" => "__builtin_ia32_cmppd256", + "llvm.x86.avx.cmp.ps.256" => "__builtin_ia32_cmpps256", + "llvm.x86.avx.cvt.pd2.ps.256" => "__builtin_ia32_cvtpd2ps256", + "llvm.x86.avx.cvt.pd2dq.256" => "__builtin_ia32_cvtpd2dq256", + "llvm.x86.avx.cvt.ps2.pd.256" => "__builtin_ia32_cvtps2pd256", + "llvm.x86.avx.cvt.ps2dq.256" => "__builtin_ia32_cvtps2dq256", + "llvm.x86.avx.cvtdq2.pd.256" => "__builtin_ia32_cvtdq2pd256", + "llvm.x86.avx.cvtdq2.ps.256" => "__builtin_ia32_cvtdq2ps256", + "llvm.x86.avx.cvtt.pd2dq.256" => "__builtin_ia32_cvttpd2dq256", + "llvm.x86.avx.cvtt.ps2dq.256" => "__builtin_ia32_cvttps2dq256", + "llvm.x86.avx.dp.ps.256" => "__builtin_ia32_dpps256", + "llvm.x86.avx.hadd.pd.256" => "__builtin_ia32_haddpd256", + "llvm.x86.avx.hadd.ps.256" => "__builtin_ia32_haddps256", + "llvm.x86.avx.hsub.pd.256" => "__builtin_ia32_hsubpd256", + "llvm.x86.avx.hsub.ps.256" => "__builtin_ia32_hsubps256", + "llvm.x86.avx.ldu.dq.256" => "__builtin_ia32_lddqu256", + "llvm.x86.avx.maskload.pd" => "__builtin_ia32_maskloadpd", + "llvm.x86.avx.maskload.pd.256" => "__builtin_ia32_maskloadpd256", + "llvm.x86.avx.maskload.ps" => "__builtin_ia32_maskloadps", + "llvm.x86.avx.maskload.ps.256" => "__builtin_ia32_maskloadps256", + "llvm.x86.avx.maskstore.pd" => "__builtin_ia32_maskstorepd", + "llvm.x86.avx.maskstore.pd.256" => "__builtin_ia32_maskstorepd256", + "llvm.x86.avx.maskstore.ps" => "__builtin_ia32_maskstoreps", + "llvm.x86.avx.maskstore.ps.256" => "__builtin_ia32_maskstoreps256", + "llvm.x86.avx.max.pd.256" => "__builtin_ia32_maxpd256", + "llvm.x86.avx.max.ps.256" => "__builtin_ia32_maxps256", + "llvm.x86.avx.min.pd.256" => "__builtin_ia32_minpd256", + "llvm.x86.avx.min.ps.256" => "__builtin_ia32_minps256", + "llvm.x86.avx.movmsk.pd.256" => "__builtin_ia32_movmskpd256", + "llvm.x86.avx.movmsk.ps.256" => "__builtin_ia32_movmskps256", + "llvm.x86.avx.ptestc.256" => "__builtin_ia32_ptestc256", + "llvm.x86.avx.ptestnzc.256" => "__builtin_ia32_ptestnzc256", + "llvm.x86.avx.ptestz.256" => "__builtin_ia32_ptestz256", + "llvm.x86.avx.rcp.ps.256" => "__builtin_ia32_rcpps256", + "llvm.x86.avx.round.pd.256" => "__builtin_ia32_roundpd256", + "llvm.x86.avx.round.ps.256" => "__builtin_ia32_roundps256", + "llvm.x86.avx.rsqrt.ps.256" => "__builtin_ia32_rsqrtps256", + "llvm.x86.avx.sqrt.pd.256" => "__builtin_ia32_sqrtpd256", + "llvm.x86.avx.sqrt.ps.256" => "__builtin_ia32_sqrtps256", + "llvm.x86.avx.storeu.dq.256" => "__builtin_ia32_storedqu256", + "llvm.x86.avx.storeu.pd.256" => "__builtin_ia32_storeupd256", + "llvm.x86.avx.storeu.ps.256" => "__builtin_ia32_storeups256", + "llvm.x86.avx.vbroadcastf128.pd.256" => "__builtin_ia32_vbroadcastf128_pd256", + "llvm.x86.avx.vbroadcastf128.ps.256" => "__builtin_ia32_vbroadcastf128_ps256", + "llvm.x86.avx.vextractf128.pd.256" => "__builtin_ia32_vextractf128_pd256", + "llvm.x86.avx.vextractf128.ps.256" => "__builtin_ia32_vextractf128_ps256", + "llvm.x86.avx.vextractf128.si.256" => "__builtin_ia32_vextractf128_si256", + "llvm.x86.avx.vinsertf128.pd.256" => "__builtin_ia32_vinsertf128_pd256", + "llvm.x86.avx.vinsertf128.ps.256" => "__builtin_ia32_vinsertf128_ps256", + "llvm.x86.avx.vinsertf128.si.256" => "__builtin_ia32_vinsertf128_si256", + "llvm.x86.avx.vperm2f128.pd.256" => "__builtin_ia32_vperm2f128_pd256", + "llvm.x86.avx.vperm2f128.ps.256" => "__builtin_ia32_vperm2f128_ps256", + "llvm.x86.avx.vperm2f128.si.256" => "__builtin_ia32_vperm2f128_si256", + "llvm.x86.avx.vpermilvar.pd" => "__builtin_ia32_vpermilvarpd", + "llvm.x86.avx.vpermilvar.pd.256" => "__builtin_ia32_vpermilvarpd256", + "llvm.x86.avx.vpermilvar.ps" => "__builtin_ia32_vpermilvarps", + "llvm.x86.avx.vpermilvar.ps.256" => "__builtin_ia32_vpermilvarps256", + "llvm.x86.avx.vtestc.pd" => "__builtin_ia32_vtestcpd", + "llvm.x86.avx.vtestc.pd.256" => "__builtin_ia32_vtestcpd256", + "llvm.x86.avx.vtestc.ps" => "__builtin_ia32_vtestcps", + "llvm.x86.avx.vtestc.ps.256" => "__builtin_ia32_vtestcps256", + "llvm.x86.avx.vtestnzc.pd" => "__builtin_ia32_vtestnzcpd", + "llvm.x86.avx.vtestnzc.pd.256" => "__builtin_ia32_vtestnzcpd256", + "llvm.x86.avx.vtestnzc.ps" => "__builtin_ia32_vtestnzcps", + "llvm.x86.avx.vtestnzc.ps.256" => "__builtin_ia32_vtestnzcps256", + "llvm.x86.avx.vtestz.pd" => "__builtin_ia32_vtestzpd", + "llvm.x86.avx.vtestz.pd.256" => "__builtin_ia32_vtestzpd256", + "llvm.x86.avx.vtestz.ps" => "__builtin_ia32_vtestzps", + "llvm.x86.avx.vtestz.ps.256" => "__builtin_ia32_vtestzps256", + "llvm.x86.avx.vzeroall" => "__builtin_ia32_vzeroall", + "llvm.x86.avx.vzeroupper" => "__builtin_ia32_vzeroupper", + "llvm.x86.avx2.gather.d.d" => "__builtin_ia32_gatherd_d", + "llvm.x86.avx2.gather.d.d.256" => "__builtin_ia32_gatherd_d256", + "llvm.x86.avx2.gather.d.pd" => "__builtin_ia32_gatherd_pd", + "llvm.x86.avx2.gather.d.pd.256" => "__builtin_ia32_gatherd_pd256", + "llvm.x86.avx2.gather.d.ps" => "__builtin_ia32_gatherd_ps", + "llvm.x86.avx2.gather.d.ps.256" => "__builtin_ia32_gatherd_ps256", + "llvm.x86.avx2.gather.d.q" => "__builtin_ia32_gatherd_q", + "llvm.x86.avx2.gather.d.q.256" => "__builtin_ia32_gatherd_q256", + "llvm.x86.avx2.gather.q.d" => "__builtin_ia32_gatherq_d", + "llvm.x86.avx2.gather.q.d.256" => "__builtin_ia32_gatherq_d256", + "llvm.x86.avx2.gather.q.pd" => "__builtin_ia32_gatherq_pd", + "llvm.x86.avx2.gather.q.pd.256" => "__builtin_ia32_gatherq_pd256", + "llvm.x86.avx2.gather.q.ps" => "__builtin_ia32_gatherq_ps", + "llvm.x86.avx2.gather.q.ps.256" => "__builtin_ia32_gatherq_ps256", + "llvm.x86.avx2.gather.q.q" => "__builtin_ia32_gatherq_q", + "llvm.x86.avx2.gather.q.q.256" => "__builtin_ia32_gatherq_q256", + "llvm.x86.avx2.maskload.d" => "__builtin_ia32_maskloadd", + "llvm.x86.avx2.maskload.d.256" => "__builtin_ia32_maskloadd256", + "llvm.x86.avx2.maskload.q" => "__builtin_ia32_maskloadq", + "llvm.x86.avx2.maskload.q.256" => "__builtin_ia32_maskloadq256", + "llvm.x86.avx2.maskstore.d" => "__builtin_ia32_maskstored", + "llvm.x86.avx2.maskstore.d.256" => "__builtin_ia32_maskstored256", + "llvm.x86.avx2.maskstore.q" => "__builtin_ia32_maskstoreq", + "llvm.x86.avx2.maskstore.q.256" => "__builtin_ia32_maskstoreq256", + "llvm.x86.avx2.movntdqa" => "__builtin_ia32_movntdqa256", + "llvm.x86.avx2.mpsadbw" => "__builtin_ia32_mpsadbw256", + "llvm.x86.avx2.pabs.b" => "__builtin_ia32_pabsb256", + "llvm.x86.avx2.pabs.d" => "__builtin_ia32_pabsd256", + "llvm.x86.avx2.pabs.w" => "__builtin_ia32_pabsw256", + "llvm.x86.avx2.packssdw" => "__builtin_ia32_packssdw256", + "llvm.x86.avx2.packsswb" => "__builtin_ia32_packsswb256", + "llvm.x86.avx2.packusdw" => "__builtin_ia32_packusdw256", + "llvm.x86.avx2.packuswb" => "__builtin_ia32_packuswb256", + "llvm.x86.avx2.padds.b" => "__builtin_ia32_paddsb256", + "llvm.x86.avx2.padds.w" => "__builtin_ia32_paddsw256", + "llvm.x86.avx2.paddus.b" => "__builtin_ia32_paddusb256", + "llvm.x86.avx2.paddus.w" => "__builtin_ia32_paddusw256", + "llvm.x86.avx2.pavg.b" => "__builtin_ia32_pavgb256", + "llvm.x86.avx2.pavg.w" => "__builtin_ia32_pavgw256", + "llvm.x86.avx2.pblendd.128" => "__builtin_ia32_pblendd128", + "llvm.x86.avx2.pblendd.256" => "__builtin_ia32_pblendd256", + "llvm.x86.avx2.pblendvb" => "__builtin_ia32_pblendvb256", + "llvm.x86.avx2.pblendw" => "__builtin_ia32_pblendw256", + "llvm.x86.avx2.pbroadcastb.128" => "__builtin_ia32_pbroadcastb128", + "llvm.x86.avx2.pbroadcastb.256" => "__builtin_ia32_pbroadcastb256", + "llvm.x86.avx2.pbroadcastd.128" => "__builtin_ia32_pbroadcastd128", + "llvm.x86.avx2.pbroadcastd.256" => "__builtin_ia32_pbroadcastd256", + "llvm.x86.avx2.pbroadcastq.128" => "__builtin_ia32_pbroadcastq128", + "llvm.x86.avx2.pbroadcastq.256" => "__builtin_ia32_pbroadcastq256", + "llvm.x86.avx2.pbroadcastw.128" => "__builtin_ia32_pbroadcastw128", + "llvm.x86.avx2.pbroadcastw.256" => "__builtin_ia32_pbroadcastw256", + "llvm.x86.avx2.permd" => "__builtin_ia32_permvarsi256", + "llvm.x86.avx2.permps" => "__builtin_ia32_permvarsf256", + "llvm.x86.avx2.phadd.d" => "__builtin_ia32_phaddd256", + "llvm.x86.avx2.phadd.sw" => "__builtin_ia32_phaddsw256", + "llvm.x86.avx2.phadd.w" => "__builtin_ia32_phaddw256", + "llvm.x86.avx2.phsub.d" => "__builtin_ia32_phsubd256", + "llvm.x86.avx2.phsub.sw" => "__builtin_ia32_phsubsw256", + "llvm.x86.avx2.phsub.w" => "__builtin_ia32_phsubw256", + "llvm.x86.avx2.pmadd.ub.sw" => "__builtin_ia32_pmaddubsw256", + "llvm.x86.avx2.pmadd.wd" => "__builtin_ia32_pmaddwd256", + "llvm.x86.avx2.pmaxs.b" => "__builtin_ia32_pmaxsb256", + "llvm.x86.avx2.pmaxs.d" => "__builtin_ia32_pmaxsd256", + "llvm.x86.avx2.pmaxs.w" => "__builtin_ia32_pmaxsw256", + "llvm.x86.avx2.pmaxu.b" => "__builtin_ia32_pmaxub256", + "llvm.x86.avx2.pmaxu.d" => "__builtin_ia32_pmaxud256", + "llvm.x86.avx2.pmaxu.w" => "__builtin_ia32_pmaxuw256", + "llvm.x86.avx2.pmins.b" => "__builtin_ia32_pminsb256", + "llvm.x86.avx2.pmins.d" => "__builtin_ia32_pminsd256", + "llvm.x86.avx2.pmins.w" => "__builtin_ia32_pminsw256", + "llvm.x86.avx2.pminu.b" => "__builtin_ia32_pminub256", + "llvm.x86.avx2.pminu.d" => "__builtin_ia32_pminud256", + "llvm.x86.avx2.pminu.w" => "__builtin_ia32_pminuw256", + "llvm.x86.avx2.pmovmskb" => "__builtin_ia32_pmovmskb256", + "llvm.x86.avx2.pmovsxbd" => "__builtin_ia32_pmovsxbd256", + "llvm.x86.avx2.pmovsxbq" => "__builtin_ia32_pmovsxbq256", + "llvm.x86.avx2.pmovsxbw" => "__builtin_ia32_pmovsxbw256", + "llvm.x86.avx2.pmovsxdq" => "__builtin_ia32_pmovsxdq256", + "llvm.x86.avx2.pmovsxwd" => "__builtin_ia32_pmovsxwd256", + "llvm.x86.avx2.pmovsxwq" => "__builtin_ia32_pmovsxwq256", + "llvm.x86.avx2.pmovzxbd" => "__builtin_ia32_pmovzxbd256", + "llvm.x86.avx2.pmovzxbq" => "__builtin_ia32_pmovzxbq256", + "llvm.x86.avx2.pmovzxbw" => "__builtin_ia32_pmovzxbw256", + "llvm.x86.avx2.pmovzxdq" => "__builtin_ia32_pmovzxdq256", + "llvm.x86.avx2.pmovzxwd" => "__builtin_ia32_pmovzxwd256", + "llvm.x86.avx2.pmovzxwq" => "__builtin_ia32_pmovzxwq256", + "llvm.x86.avx2.pmul.dq" => "__builtin_ia32_pmuldq256", + "llvm.x86.avx2.pmul.hr.sw" => "__builtin_ia32_pmulhrsw256", + "llvm.x86.avx2.pmulh.w" => "__builtin_ia32_pmulhw256", + "llvm.x86.avx2.pmulhu.w" => "__builtin_ia32_pmulhuw256", + "llvm.x86.avx2.pmulu.dq" => "__builtin_ia32_pmuludq256", + "llvm.x86.avx2.psad.bw" => "__builtin_ia32_psadbw256", + "llvm.x86.avx2.pshuf.b" => "__builtin_ia32_pshufb256", + "llvm.x86.avx2.psign.b" => "__builtin_ia32_psignb256", + "llvm.x86.avx2.psign.d" => "__builtin_ia32_psignd256", + "llvm.x86.avx2.psign.w" => "__builtin_ia32_psignw256", + "llvm.x86.avx2.psll.d" => "__builtin_ia32_pslld256", + "llvm.x86.avx2.psll.dq" => "__builtin_ia32_pslldqi256", + "llvm.x86.avx2.psll.dq.bs" => "__builtin_ia32_pslldqi256_byteshift", + "llvm.x86.avx2.psll.q" => "__builtin_ia32_psllq256", + "llvm.x86.avx2.psll.w" => "__builtin_ia32_psllw256", + "llvm.x86.avx2.pslli.d" => "__builtin_ia32_pslldi256", + "llvm.x86.avx2.pslli.q" => "__builtin_ia32_psllqi256", + "llvm.x86.avx2.pslli.w" => "__builtin_ia32_psllwi256", + "llvm.x86.avx2.psllv.d" => "__builtin_ia32_psllv4si", + "llvm.x86.avx2.psllv.d.256" => "__builtin_ia32_psllv8si", + "llvm.x86.avx2.psllv.q" => "__builtin_ia32_psllv2di", + "llvm.x86.avx2.psllv.q.256" => "__builtin_ia32_psllv4di", + "llvm.x86.avx2.psra.d" => "__builtin_ia32_psrad256", + "llvm.x86.avx2.psra.w" => "__builtin_ia32_psraw256", + "llvm.x86.avx2.psrai.d" => "__builtin_ia32_psradi256", + "llvm.x86.avx2.psrai.w" => "__builtin_ia32_psrawi256", + "llvm.x86.avx2.psrav.d" => "__builtin_ia32_psrav4si", + "llvm.x86.avx2.psrav.d.256" => "__builtin_ia32_psrav8si", + "llvm.x86.avx2.psrl.d" => "__builtin_ia32_psrld256", + "llvm.x86.avx2.psrl.dq" => "__builtin_ia32_psrldqi256", + "llvm.x86.avx2.psrl.dq.bs" => "__builtin_ia32_psrldqi256_byteshift", + "llvm.x86.avx2.psrl.q" => "__builtin_ia32_psrlq256", + "llvm.x86.avx2.psrl.w" => "__builtin_ia32_psrlw256", + "llvm.x86.avx2.psrli.d" => "__builtin_ia32_psrldi256", + "llvm.x86.avx2.psrli.q" => "__builtin_ia32_psrlqi256", + "llvm.x86.avx2.psrli.w" => "__builtin_ia32_psrlwi256", + "llvm.x86.avx2.psrlv.d" => "__builtin_ia32_psrlv4si", + "llvm.x86.avx2.psrlv.d.256" => "__builtin_ia32_psrlv8si", + "llvm.x86.avx2.psrlv.q" => "__builtin_ia32_psrlv2di", + "llvm.x86.avx2.psrlv.q.256" => "__builtin_ia32_psrlv4di", + "llvm.x86.avx2.psubs.b" => "__builtin_ia32_psubsb256", + "llvm.x86.avx2.psubs.w" => "__builtin_ia32_psubsw256", + "llvm.x86.avx2.psubus.b" => "__builtin_ia32_psubusb256", + "llvm.x86.avx2.psubus.w" => "__builtin_ia32_psubusw256", + "llvm.x86.avx2.vbroadcast.sd.pd.256" => "__builtin_ia32_vbroadcastsd_pd256", + "llvm.x86.avx2.vbroadcast.ss.ps" => "__builtin_ia32_vbroadcastss_ps", + "llvm.x86.avx2.vbroadcast.ss.ps.256" => "__builtin_ia32_vbroadcastss_ps256", + "llvm.x86.avx2.vextracti128" => "__builtin_ia32_extract128i256", + "llvm.x86.avx2.vinserti128" => "__builtin_ia32_insert128i256", + "llvm.x86.avx2.vpdpbssd.128" => "__builtin_ia32_vpdpbssd128", + "llvm.x86.avx2.vpdpbssd.256" => "__builtin_ia32_vpdpbssd256", + "llvm.x86.avx2.vpdpbssds.128" => "__builtin_ia32_vpdpbssds128", + "llvm.x86.avx2.vpdpbssds.256" => "__builtin_ia32_vpdpbssds256", + "llvm.x86.avx2.vpdpbsud.128" => "__builtin_ia32_vpdpbsud128", + "llvm.x86.avx2.vpdpbsud.256" => "__builtin_ia32_vpdpbsud256", + "llvm.x86.avx2.vpdpbsuds.128" => "__builtin_ia32_vpdpbsuds128", + "llvm.x86.avx2.vpdpbsuds.256" => "__builtin_ia32_vpdpbsuds256", + "llvm.x86.avx2.vpdpbuud.128" => "__builtin_ia32_vpdpbuud128", + "llvm.x86.avx2.vpdpbuud.256" => "__builtin_ia32_vpdpbuud256", + "llvm.x86.avx2.vpdpbuuds.128" => "__builtin_ia32_vpdpbuuds128", + "llvm.x86.avx2.vpdpbuuds.256" => "__builtin_ia32_vpdpbuuds256", + "llvm.x86.avx2.vperm2i128" => "__builtin_ia32_permti256", + "llvm.x86.avx512.add.pd.512" => "__builtin_ia32_addpd512", + "llvm.x86.avx512.add.ps.512" => "__builtin_ia32_addps512", + "llvm.x86.avx512.broadcastmb.128" => "__builtin_ia32_broadcastmb128", + "llvm.x86.avx512.broadcastmb.256" => "__builtin_ia32_broadcastmb256", + "llvm.x86.avx512.broadcastmb.512" => "__builtin_ia32_broadcastmb512", + "llvm.x86.avx512.broadcastmw.128" => "__builtin_ia32_broadcastmw128", + "llvm.x86.avx512.broadcastmw.256" => "__builtin_ia32_broadcastmw256", + "llvm.x86.avx512.broadcastmw.512" => "__builtin_ia32_broadcastmw512", + "llvm.x86.avx512.conflict.d.128" => "__builtin_ia32_vpconflictsi_128", + "llvm.x86.avx512.conflict.d.256" => "__builtin_ia32_vpconflictsi_256", + "llvm.x86.avx512.conflict.d.512" => "__builtin_ia32_vpconflictsi_512", + "llvm.x86.avx512.conflict.q.128" => "__builtin_ia32_vpconflictdi_128", + "llvm.x86.avx512.conflict.q.256" => "__builtin_ia32_vpconflictdi_256", + "llvm.x86.avx512.conflict.q.512" => "__builtin_ia32_vpconflictdi_512", + "llvm.x86.avx512.cvtb2mask.128" => "__builtin_ia32_cvtb2mask128", + "llvm.x86.avx512.cvtb2mask.256" => "__builtin_ia32_cvtb2mask256", + "llvm.x86.avx512.cvtb2mask.512" => "__builtin_ia32_cvtb2mask512", + "llvm.x86.avx512.cvtd2mask.128" => "__builtin_ia32_cvtd2mask128", + "llvm.x86.avx512.cvtd2mask.256" => "__builtin_ia32_cvtd2mask256", + "llvm.x86.avx512.cvtd2mask.512" => "__builtin_ia32_cvtd2mask512", + "llvm.x86.avx512.cvtmask2b.128" => "__builtin_ia32_cvtmask2b128", + "llvm.x86.avx512.cvtmask2b.256" => "__builtin_ia32_cvtmask2b256", + "llvm.x86.avx512.cvtmask2b.512" => "__builtin_ia32_cvtmask2b512", + "llvm.x86.avx512.cvtmask2d.128" => "__builtin_ia32_cvtmask2d128", + "llvm.x86.avx512.cvtmask2d.256" => "__builtin_ia32_cvtmask2d256", + "llvm.x86.avx512.cvtmask2d.512" => "__builtin_ia32_cvtmask2d512", + "llvm.x86.avx512.cvtmask2q.128" => "__builtin_ia32_cvtmask2q128", + "llvm.x86.avx512.cvtmask2q.256" => "__builtin_ia32_cvtmask2q256", + "llvm.x86.avx512.cvtmask2q.512" => "__builtin_ia32_cvtmask2q512", + "llvm.x86.avx512.cvtmask2w.128" => "__builtin_ia32_cvtmask2w128", + "llvm.x86.avx512.cvtmask2w.256" => "__builtin_ia32_cvtmask2w256", + "llvm.x86.avx512.cvtmask2w.512" => "__builtin_ia32_cvtmask2w512", + "llvm.x86.avx512.cvtq2mask.128" => "__builtin_ia32_cvtq2mask128", + "llvm.x86.avx512.cvtq2mask.256" => "__builtin_ia32_cvtq2mask256", + "llvm.x86.avx512.cvtq2mask.512" => "__builtin_ia32_cvtq2mask512", + "llvm.x86.avx512.cvtsd2usi" => "__builtin_ia32_cvtsd2usi", + "llvm.x86.avx512.cvtsd2usi64" => "__builtin_ia32_cvtsd2usi64", + "llvm.x86.avx512.cvtsi2sd32" => "__builtin_ia32_cvtsi2sd32", + "llvm.x86.avx512.cvtsi2sd64" => "__builtin_ia32_cvtsi2sd64", + "llvm.x86.avx512.cvtsi2ss32" => "__builtin_ia32_cvtsi2ss32", + "llvm.x86.avx512.cvtsi2ss64" => "__builtin_ia32_cvtsi2ss64", + "llvm.x86.avx512.cvtss2usi" => "__builtin_ia32_cvtss2usi", + "llvm.x86.avx512.cvtss2usi64" => "__builtin_ia32_cvtss2usi64", + "llvm.x86.avx512.cvttsd2si" => "__builtin_ia32_vcvttsd2si32", + "llvm.x86.avx512.cvttsd2si64" => "__builtin_ia32_vcvttsd2si64", + "llvm.x86.avx512.cvttsd2usi" => "__builtin_ia32_vcvttsd2usi32", + // [DUPLICATE]: "llvm.x86.avx512.cvttsd2usi" => "__builtin_ia32_cvttsd2usi", + "llvm.x86.avx512.cvttsd2usi64" => "__builtin_ia32_vcvttsd2usi64", + // [DUPLICATE]: "llvm.x86.avx512.cvttsd2usi64" => "__builtin_ia32_cvttsd2usi64", + "llvm.x86.avx512.cvttss2si" => "__builtin_ia32_vcvttss2si32", + "llvm.x86.avx512.cvttss2si64" => "__builtin_ia32_vcvttss2si64", + "llvm.x86.avx512.cvttss2usi" => "__builtin_ia32_vcvttss2usi32", + // [DUPLICATE]: "llvm.x86.avx512.cvttss2usi" => "__builtin_ia32_cvttss2usi", + "llvm.x86.avx512.cvttss2usi64" => "__builtin_ia32_vcvttss2usi64", + // [DUPLICATE]: "llvm.x86.avx512.cvttss2usi64" => "__builtin_ia32_cvttss2usi64", + "llvm.x86.avx512.cvtusi2sd" => "__builtin_ia32_cvtusi2sd", + // [DUPLICATE]: "llvm.x86.avx512.cvtusi2sd" => "__builtin_ia32_cvtusi2sd32", + "llvm.x86.avx512.cvtusi2ss" => "__builtin_ia32_cvtusi2ss32", + // [DUPLICATE]: "llvm.x86.avx512.cvtusi2ss" => "__builtin_ia32_cvtusi2ss", + "llvm.x86.avx512.cvtusi642sd" => "__builtin_ia32_cvtusi2sd64", + // [DUPLICATE]: "llvm.x86.avx512.cvtusi642sd" => "__builtin_ia32_cvtusi642sd", + "llvm.x86.avx512.cvtusi642ss" => "__builtin_ia32_cvtusi2ss64", + // [DUPLICATE]: "llvm.x86.avx512.cvtusi642ss" => "__builtin_ia32_cvtusi642ss", + "llvm.x86.avx512.cvtw2mask.128" => "__builtin_ia32_cvtw2mask128", + "llvm.x86.avx512.cvtw2mask.256" => "__builtin_ia32_cvtw2mask256", + "llvm.x86.avx512.cvtw2mask.512" => "__builtin_ia32_cvtw2mask512", + "llvm.x86.avx512.dbpsadbw.128" => "__builtin_ia32_dbpsadbw128", + "llvm.x86.avx512.dbpsadbw.256" => "__builtin_ia32_dbpsadbw256", + "llvm.x86.avx512.dbpsadbw.512" => "__builtin_ia32_dbpsadbw512", + "llvm.x86.avx512.div.pd.512" => "__builtin_ia32_divpd512", + "llvm.x86.avx512.div.ps.512" => "__builtin_ia32_divps512", + "llvm.x86.avx512.exp2.pd" => "__builtin_ia32_exp2pd_mask", + "llvm.x86.avx512.exp2.ps" => "__builtin_ia32_exp2ps_mask", + "llvm.x86.avx512.gather.dpd.512" => "__builtin_ia32_gathersiv8df", + "llvm.x86.avx512.gather.dpi.512" => "__builtin_ia32_gathersiv16si", + "llvm.x86.avx512.gather.dpq.512" => "__builtin_ia32_gathersiv8di", + "llvm.x86.avx512.gather.dps.512" => "__builtin_ia32_gathersiv16sf", + "llvm.x86.avx512.gather.qpd.512" => "__builtin_ia32_gatherdiv8df", + "llvm.x86.avx512.gather.qpi.512" => "__builtin_ia32_gatherdiv16si", + "llvm.x86.avx512.gather.qpq.512" => "__builtin_ia32_gatherdiv8di", + "llvm.x86.avx512.gather.qps.512" => "__builtin_ia32_gatherdiv16sf", + "llvm.x86.avx512.gather3div2.df" => "__builtin_ia32_gather3div2df", + "llvm.x86.avx512.gather3div2.di" => "__builtin_ia32_gather3div2di", + "llvm.x86.avx512.gather3div4.df" => "__builtin_ia32_gather3div4df", + "llvm.x86.avx512.gather3div4.di" => "__builtin_ia32_gather3div4di", + "llvm.x86.avx512.gather3div4.sf" => "__builtin_ia32_gather3div4sf", + "llvm.x86.avx512.gather3div4.si" => "__builtin_ia32_gather3div4si", + "llvm.x86.avx512.gather3div8.sf" => "__builtin_ia32_gather3div8sf", + "llvm.x86.avx512.gather3div8.si" => "__builtin_ia32_gather3div8si", + "llvm.x86.avx512.gather3siv2.df" => "__builtin_ia32_gather3siv2df", + "llvm.x86.avx512.gather3siv2.di" => "__builtin_ia32_gather3siv2di", + "llvm.x86.avx512.gather3siv4.df" => "__builtin_ia32_gather3siv4df", + "llvm.x86.avx512.gather3siv4.di" => "__builtin_ia32_gather3siv4di", + "llvm.x86.avx512.gather3siv4.sf" => "__builtin_ia32_gather3siv4sf", + "llvm.x86.avx512.gather3siv4.si" => "__builtin_ia32_gather3siv4si", + "llvm.x86.avx512.gather3siv8.sf" => "__builtin_ia32_gather3siv8sf", + "llvm.x86.avx512.gather3siv8.si" => "__builtin_ia32_gather3siv8si", + "llvm.x86.avx512.gatherpf.dpd.512" => "__builtin_ia32_gatherpfdpd", + "llvm.x86.avx512.gatherpf.dps.512" => "__builtin_ia32_gatherpfdps", + "llvm.x86.avx512.gatherpf.qpd.512" => "__builtin_ia32_gatherpfqpd", + "llvm.x86.avx512.gatherpf.qps.512" => "__builtin_ia32_gatherpfqps", + "llvm.x86.avx512.kand.w" => "__builtin_ia32_kandhi", + "llvm.x86.avx512.kandn.w" => "__builtin_ia32_kandnhi", + "llvm.x86.avx512.knot.w" => "__builtin_ia32_knothi", + "llvm.x86.avx512.kor.w" => "__builtin_ia32_korhi", + "llvm.x86.avx512.kortestc.w" => "__builtin_ia32_kortestchi", + "llvm.x86.avx512.kortestz.w" => "__builtin_ia32_kortestzhi", + "llvm.x86.avx512.kunpck.bw" => "__builtin_ia32_kunpckhi", + "llvm.x86.avx512.kunpck.dq" => "__builtin_ia32_kunpckdi", + "llvm.x86.avx512.kunpck.wd" => "__builtin_ia32_kunpcksi", + "llvm.x86.avx512.kxnor.w" => "__builtin_ia32_kxnorhi", + "llvm.x86.avx512.kxor.w" => "__builtin_ia32_kxorhi", + "llvm.x86.avx512.mask.add.pd.128" => "__builtin_ia32_addpd128_mask", + "llvm.x86.avx512.mask.add.pd.256" => "__builtin_ia32_addpd256_mask", + "llvm.x86.avx512.mask.add.pd.512" => "__builtin_ia32_addpd512_mask", + "llvm.x86.avx512.mask.add.ps.128" => "__builtin_ia32_addps128_mask", + "llvm.x86.avx512.mask.add.ps.256" => "__builtin_ia32_addps256_mask", + "llvm.x86.avx512.mask.add.ps.512" => "__builtin_ia32_addps512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.add.sd.round" => "__builtin_ia32_addsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.add.ss.round" => "__builtin_ia32_addss_round_mask", + "llvm.x86.avx512.mask.and.pd.128" => "__builtin_ia32_andpd128_mask", + "llvm.x86.avx512.mask.and.pd.256" => "__builtin_ia32_andpd256_mask", + "llvm.x86.avx512.mask.and.pd.512" => "__builtin_ia32_andpd512_mask", + "llvm.x86.avx512.mask.and.ps.128" => "__builtin_ia32_andps128_mask", + "llvm.x86.avx512.mask.and.ps.256" => "__builtin_ia32_andps256_mask", + "llvm.x86.avx512.mask.and.ps.512" => "__builtin_ia32_andps512_mask", + "llvm.x86.avx512.mask.andn.pd.128" => "__builtin_ia32_andnpd128_mask", + "llvm.x86.avx512.mask.andn.pd.256" => "__builtin_ia32_andnpd256_mask", + "llvm.x86.avx512.mask.andn.pd.512" => "__builtin_ia32_andnpd512_mask", + "llvm.x86.avx512.mask.andn.ps.128" => "__builtin_ia32_andnps128_mask", + "llvm.x86.avx512.mask.andn.ps.256" => "__builtin_ia32_andnps256_mask", + "llvm.x86.avx512.mask.andn.ps.512" => "__builtin_ia32_andnps512_mask", + "llvm.x86.avx512.mask.blend.d.512" => "__builtin_ia32_blendmd_512_mask", + "llvm.x86.avx512.mask.blend.pd.512" => "__builtin_ia32_blendmpd_512_mask", + "llvm.x86.avx512.mask.blend.ps.512" => "__builtin_ia32_blendmps_512_mask", + "llvm.x86.avx512.mask.blend.q.512" => "__builtin_ia32_blendmq_512_mask", + "llvm.x86.avx512.mask.broadcastf32x2.256" => "__builtin_ia32_broadcastf32x2_256_mask", + "llvm.x86.avx512.mask.broadcastf32x2.512" => "__builtin_ia32_broadcastf32x2_512_mask", + "llvm.x86.avx512.mask.broadcastf32x4.256" => "__builtin_ia32_broadcastf32x4_256_mask", + "llvm.x86.avx512.mask.broadcastf32x4.512" => "__builtin_ia32_broadcastf32x4_512", + "llvm.x86.avx512.mask.broadcastf32x8.512" => "__builtin_ia32_broadcastf32x8_512_mask", + "llvm.x86.avx512.mask.broadcastf64x2.256" => "__builtin_ia32_broadcastf64x2_256_mask", + "llvm.x86.avx512.mask.broadcastf64x2.512" => "__builtin_ia32_broadcastf64x2_512_mask", + "llvm.x86.avx512.mask.broadcastf64x4.512" => "__builtin_ia32_broadcastf64x4_512", + "llvm.x86.avx512.mask.broadcasti32x2.128" => "__builtin_ia32_broadcasti32x2_128_mask", + "llvm.x86.avx512.mask.broadcasti32x2.256" => "__builtin_ia32_broadcasti32x2_256_mask", + "llvm.x86.avx512.mask.broadcasti32x2.512" => "__builtin_ia32_broadcasti32x2_512_mask", + "llvm.x86.avx512.mask.broadcasti32x4.256" => "__builtin_ia32_broadcasti32x4_256_mask", + "llvm.x86.avx512.mask.broadcasti32x4.512" => "__builtin_ia32_broadcasti32x4_512", + "llvm.x86.avx512.mask.broadcasti32x8.512" => "__builtin_ia32_broadcasti32x8_512_mask", + "llvm.x86.avx512.mask.broadcasti64x2.256" => "__builtin_ia32_broadcasti64x2_256_mask", + "llvm.x86.avx512.mask.broadcasti64x2.512" => "__builtin_ia32_broadcasti64x2_512_mask", + "llvm.x86.avx512.mask.broadcasti64x4.512" => "__builtin_ia32_broadcasti64x4_512", + "llvm.x86.avx512.mask.cmp.pd.128" => "__builtin_ia32_cmppd128_mask", + "llvm.x86.avx512.mask.cmp.pd.256" => "__builtin_ia32_cmppd256_mask", + "llvm.x86.avx512.mask.cmp.pd.512" => "__builtin_ia32_cmppd512_mask", + "llvm.x86.avx512.mask.cmp.ps.128" => "__builtin_ia32_cmpps128_mask", + "llvm.x86.avx512.mask.cmp.ps.256" => "__builtin_ia32_cmpps256_mask", + "llvm.x86.avx512.mask.cmp.ps.512" => "__builtin_ia32_cmpps512_mask", + "llvm.x86.avx512.mask.cmp.sd" => "__builtin_ia32_cmpsd_mask", + "llvm.x86.avx512.mask.cmp.ss" => "__builtin_ia32_cmpss_mask", + "llvm.x86.avx512.mask.compress.d.128" => "__builtin_ia32_compresssi128_mask", + "llvm.x86.avx512.mask.compress.d.256" => "__builtin_ia32_compresssi256_mask", + "llvm.x86.avx512.mask.compress.d.512" => "__builtin_ia32_compresssi512_mask", + "llvm.x86.avx512.mask.compress.pd.128" => "__builtin_ia32_compressdf128_mask", + "llvm.x86.avx512.mask.compress.pd.256" => "__builtin_ia32_compressdf256_mask", + "llvm.x86.avx512.mask.compress.pd.512" => "__builtin_ia32_compressdf512_mask", + "llvm.x86.avx512.mask.compress.ps.128" => "__builtin_ia32_compresssf128_mask", + "llvm.x86.avx512.mask.compress.ps.256" => "__builtin_ia32_compresssf256_mask", + "llvm.x86.avx512.mask.compress.ps.512" => "__builtin_ia32_compresssf512_mask", + "llvm.x86.avx512.mask.compress.q.128" => "__builtin_ia32_compressdi128_mask", + "llvm.x86.avx512.mask.compress.q.256" => "__builtin_ia32_compressdi256_mask", + "llvm.x86.avx512.mask.compress.q.512" => "__builtin_ia32_compressdi512_mask", + "llvm.x86.avx512.mask.compress.store.d.128" => "__builtin_ia32_compressstoresi128_mask", + "llvm.x86.avx512.mask.compress.store.d.256" => "__builtin_ia32_compressstoresi256_mask", + "llvm.x86.avx512.mask.compress.store.d.512" => "__builtin_ia32_compressstoresi512_mask", + "llvm.x86.avx512.mask.compress.store.pd.128" => "__builtin_ia32_compressstoredf128_mask", + "llvm.x86.avx512.mask.compress.store.pd.256" => "__builtin_ia32_compressstoredf256_mask", + "llvm.x86.avx512.mask.compress.store.pd.512" => "__builtin_ia32_compressstoredf512_mask", + "llvm.x86.avx512.mask.compress.store.ps.128" => "__builtin_ia32_compressstoresf128_mask", + "llvm.x86.avx512.mask.compress.store.ps.256" => "__builtin_ia32_compressstoresf256_mask", + "llvm.x86.avx512.mask.compress.store.ps.512" => "__builtin_ia32_compressstoresf512_mask", + "llvm.x86.avx512.mask.compress.store.q.128" => "__builtin_ia32_compressstoredi128_mask", + "llvm.x86.avx512.mask.compress.store.q.256" => "__builtin_ia32_compressstoredi256_mask", + "llvm.x86.avx512.mask.compress.store.q.512" => "__builtin_ia32_compressstoredi512_mask", + "llvm.x86.avx512.mask.conflict.d.128" => "__builtin_ia32_vpconflictsi_128_mask", + "llvm.x86.avx512.mask.conflict.d.256" => "__builtin_ia32_vpconflictsi_256_mask", + "llvm.x86.avx512.mask.conflict.d.512" => "__builtin_ia32_vpconflictsi_512_mask", + "llvm.x86.avx512.mask.conflict.q.128" => "__builtin_ia32_vpconflictdi_128_mask", + "llvm.x86.avx512.mask.conflict.q.256" => "__builtin_ia32_vpconflictdi_256_mask", + "llvm.x86.avx512.mask.conflict.q.512" => "__builtin_ia32_vpconflictdi_512_mask", + "llvm.x86.avx512.mask.cvtdq2pd.128" => "__builtin_ia32_cvtdq2pd128_mask", + "llvm.x86.avx512.mask.cvtdq2pd.256" => "__builtin_ia32_cvtdq2pd256_mask", + "llvm.x86.avx512.mask.cvtdq2pd.512" => "__builtin_ia32_cvtdq2pd512_mask", + "llvm.x86.avx512.mask.cvtdq2ps.128" => "__builtin_ia32_cvtdq2ps128_mask", + "llvm.x86.avx512.mask.cvtdq2ps.256" => "__builtin_ia32_cvtdq2ps256_mask", + "llvm.x86.avx512.mask.cvtdq2ps.512" => "__builtin_ia32_cvtdq2ps512_mask", + "llvm.x86.avx512.mask.cvtpd2dq.128" => "__builtin_ia32_cvtpd2dq128_mask", + "llvm.x86.avx512.mask.cvtpd2dq.256" => "__builtin_ia32_cvtpd2dq256_mask", + "llvm.x86.avx512.mask.cvtpd2dq.512" => "__builtin_ia32_cvtpd2dq512_mask", + "llvm.x86.avx512.mask.cvtpd2ps" => "__builtin_ia32_cvtpd2ps_mask", + "llvm.x86.avx512.mask.cvtpd2ps.256" => "__builtin_ia32_cvtpd2ps256_mask", + "llvm.x86.avx512.mask.cvtpd2ps.512" => "__builtin_ia32_cvtpd2ps512_mask", + "llvm.x86.avx512.mask.cvtpd2qq.128" => "__builtin_ia32_cvtpd2qq128_mask", + "llvm.x86.avx512.mask.cvtpd2qq.256" => "__builtin_ia32_cvtpd2qq256_mask", + "llvm.x86.avx512.mask.cvtpd2qq.512" => "__builtin_ia32_cvtpd2qq512_mask", + "llvm.x86.avx512.mask.cvtpd2udq.128" => "__builtin_ia32_cvtpd2udq128_mask", + "llvm.x86.avx512.mask.cvtpd2udq.256" => "__builtin_ia32_cvtpd2udq256_mask", + "llvm.x86.avx512.mask.cvtpd2udq.512" => "__builtin_ia32_cvtpd2udq512_mask", + "llvm.x86.avx512.mask.cvtpd2uqq.128" => "__builtin_ia32_cvtpd2uqq128_mask", + "llvm.x86.avx512.mask.cvtpd2uqq.256" => "__builtin_ia32_cvtpd2uqq256_mask", + "llvm.x86.avx512.mask.cvtpd2uqq.512" => "__builtin_ia32_cvtpd2uqq512_mask", + "llvm.x86.avx512.mask.cvtps2dq.128" => "__builtin_ia32_cvtps2dq128_mask", + "llvm.x86.avx512.mask.cvtps2dq.256" => "__builtin_ia32_cvtps2dq256_mask", + "llvm.x86.avx512.mask.cvtps2dq.512" => "__builtin_ia32_cvtps2dq512_mask", + "llvm.x86.avx512.mask.cvtps2pd.128" => "__builtin_ia32_cvtps2pd128_mask", + "llvm.x86.avx512.mask.cvtps2pd.256" => "__builtin_ia32_cvtps2pd256_mask", + "llvm.x86.avx512.mask.cvtps2pd.512" => "__builtin_ia32_cvtps2pd512_mask", + "llvm.x86.avx512.mask.cvtps2qq.128" => "__builtin_ia32_cvtps2qq128_mask", + "llvm.x86.avx512.mask.cvtps2qq.256" => "__builtin_ia32_cvtps2qq256_mask", + "llvm.x86.avx512.mask.cvtps2qq.512" => "__builtin_ia32_cvtps2qq512_mask", + "llvm.x86.avx512.mask.cvtps2udq.128" => "__builtin_ia32_cvtps2udq128_mask", + "llvm.x86.avx512.mask.cvtps2udq.256" => "__builtin_ia32_cvtps2udq256_mask", + "llvm.x86.avx512.mask.cvtps2udq.512" => "__builtin_ia32_cvtps2udq512_mask", + "llvm.x86.avx512.mask.cvtps2uqq.128" => "__builtin_ia32_cvtps2uqq128_mask", + "llvm.x86.avx512.mask.cvtps2uqq.256" => "__builtin_ia32_cvtps2uqq256_mask", + "llvm.x86.avx512.mask.cvtps2uqq.512" => "__builtin_ia32_cvtps2uqq512_mask", + "llvm.x86.avx512.mask.cvtqq2pd.128" => "__builtin_ia32_cvtqq2pd128_mask", + "llvm.x86.avx512.mask.cvtqq2pd.256" => "__builtin_ia32_cvtqq2pd256_mask", + "llvm.x86.avx512.mask.cvtqq2pd.512" => "__builtin_ia32_cvtqq2pd512_mask", + "llvm.x86.avx512.mask.cvtqq2ps.128" => "__builtin_ia32_cvtqq2ps128_mask", + "llvm.x86.avx512.mask.cvtqq2ps.256" => "__builtin_ia32_cvtqq2ps256_mask", + "llvm.x86.avx512.mask.cvtqq2ps.512" => "__builtin_ia32_cvtqq2ps512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.cvtsd2ss.round" => "__builtin_ia32_cvtsd2ss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.cvtss2sd.round" => "__builtin_ia32_cvtss2sd_round_mask", + "llvm.x86.avx512.mask.cvttpd2dq.128" => "__builtin_ia32_cvttpd2dq128_mask", + "llvm.x86.avx512.mask.cvttpd2dq.256" => "__builtin_ia32_cvttpd2dq256_mask", + "llvm.x86.avx512.mask.cvttpd2dq.512" => "__builtin_ia32_cvttpd2dq512_mask", + "llvm.x86.avx512.mask.cvttpd2qq.128" => "__builtin_ia32_cvttpd2qq128_mask", + "llvm.x86.avx512.mask.cvttpd2qq.256" => "__builtin_ia32_cvttpd2qq256_mask", + "llvm.x86.avx512.mask.cvttpd2qq.512" => "__builtin_ia32_cvttpd2qq512_mask", + "llvm.x86.avx512.mask.cvttpd2udq.128" => "__builtin_ia32_cvttpd2udq128_mask", + "llvm.x86.avx512.mask.cvttpd2udq.256" => "__builtin_ia32_cvttpd2udq256_mask", + "llvm.x86.avx512.mask.cvttpd2udq.512" => "__builtin_ia32_cvttpd2udq512_mask", + "llvm.x86.avx512.mask.cvttpd2uqq.128" => "__builtin_ia32_cvttpd2uqq128_mask", + "llvm.x86.avx512.mask.cvttpd2uqq.256" => "__builtin_ia32_cvttpd2uqq256_mask", + "llvm.x86.avx512.mask.cvttpd2uqq.512" => "__builtin_ia32_cvttpd2uqq512_mask", + "llvm.x86.avx512.mask.cvttps2dq.128" => "__builtin_ia32_cvttps2dq128_mask", + "llvm.x86.avx512.mask.cvttps2dq.256" => "__builtin_ia32_cvttps2dq256_mask", + "llvm.x86.avx512.mask.cvttps2dq.512" => "__builtin_ia32_cvttps2dq512_mask", + "llvm.x86.avx512.mask.cvttps2qq.128" => "__builtin_ia32_cvttps2qq128_mask", + "llvm.x86.avx512.mask.cvttps2qq.256" => "__builtin_ia32_cvttps2qq256_mask", + "llvm.x86.avx512.mask.cvttps2qq.512" => "__builtin_ia32_cvttps2qq512_mask", + "llvm.x86.avx512.mask.cvttps2udq.128" => "__builtin_ia32_cvttps2udq128_mask", + "llvm.x86.avx512.mask.cvttps2udq.256" => "__builtin_ia32_cvttps2udq256_mask", + "llvm.x86.avx512.mask.cvttps2udq.512" => "__builtin_ia32_cvttps2udq512_mask", + "llvm.x86.avx512.mask.cvttps2uqq.128" => "__builtin_ia32_cvttps2uqq128_mask", + "llvm.x86.avx512.mask.cvttps2uqq.256" => "__builtin_ia32_cvttps2uqq256_mask", + "llvm.x86.avx512.mask.cvttps2uqq.512" => "__builtin_ia32_cvttps2uqq512_mask", + "llvm.x86.avx512.mask.cvtudq2pd.128" => "__builtin_ia32_cvtudq2pd128_mask", + "llvm.x86.avx512.mask.cvtudq2pd.256" => "__builtin_ia32_cvtudq2pd256_mask", + "llvm.x86.avx512.mask.cvtudq2pd.512" => "__builtin_ia32_cvtudq2pd512_mask", + "llvm.x86.avx512.mask.cvtudq2ps.128" => "__builtin_ia32_cvtudq2ps128_mask", + "llvm.x86.avx512.mask.cvtudq2ps.256" => "__builtin_ia32_cvtudq2ps256_mask", + "llvm.x86.avx512.mask.cvtudq2ps.512" => "__builtin_ia32_cvtudq2ps512_mask", + "llvm.x86.avx512.mask.cvtuqq2pd.128" => "__builtin_ia32_cvtuqq2pd128_mask", + "llvm.x86.avx512.mask.cvtuqq2pd.256" => "__builtin_ia32_cvtuqq2pd256_mask", + "llvm.x86.avx512.mask.cvtuqq2pd.512" => "__builtin_ia32_cvtuqq2pd512_mask", + "llvm.x86.avx512.mask.cvtuqq2ps.128" => "__builtin_ia32_cvtuqq2ps128_mask", + "llvm.x86.avx512.mask.cvtuqq2ps.256" => "__builtin_ia32_cvtuqq2ps256_mask", + "llvm.x86.avx512.mask.cvtuqq2ps.512" => "__builtin_ia32_cvtuqq2ps512_mask", + "llvm.x86.avx512.mask.dbpsadbw.128" => "__builtin_ia32_dbpsadbw128_mask", + "llvm.x86.avx512.mask.dbpsadbw.256" => "__builtin_ia32_dbpsadbw256_mask", + "llvm.x86.avx512.mask.dbpsadbw.512" => "__builtin_ia32_dbpsadbw512_mask", + "llvm.x86.avx512.mask.div.pd.128" => "__builtin_ia32_divpd_mask", + "llvm.x86.avx512.mask.div.pd.256" => "__builtin_ia32_divpd256_mask", + "llvm.x86.avx512.mask.div.pd.512" => "__builtin_ia32_divpd512_mask", + "llvm.x86.avx512.mask.div.ps.128" => "__builtin_ia32_divps_mask", + "llvm.x86.avx512.mask.div.ps.256" => "__builtin_ia32_divps256_mask", + "llvm.x86.avx512.mask.div.ps.512" => "__builtin_ia32_divps512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.div.sd.round" => "__builtin_ia32_divsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.div.ss.round" => "__builtin_ia32_divss_round_mask", + "llvm.x86.avx512.mask.expand.d.128" => "__builtin_ia32_expandsi128_mask", + "llvm.x86.avx512.mask.expand.d.256" => "__builtin_ia32_expandsi256_mask", + "llvm.x86.avx512.mask.expand.d.512" => "__builtin_ia32_expandsi512_mask", + "llvm.x86.avx512.mask.expand.load.d.128" => "__builtin_ia32_expandloadsi128_mask", + "llvm.x86.avx512.mask.expand.load.d.256" => "__builtin_ia32_expandloadsi256_mask", + "llvm.x86.avx512.mask.expand.load.d.512" => "__builtin_ia32_expandloadsi512_mask", + "llvm.x86.avx512.mask.expand.load.pd.128" => "__builtin_ia32_expandloaddf128_mask", + "llvm.x86.avx512.mask.expand.load.pd.256" => "__builtin_ia32_expandloaddf256_mask", + "llvm.x86.avx512.mask.expand.load.pd.512" => "__builtin_ia32_expandloaddf512_mask", + "llvm.x86.avx512.mask.expand.load.ps.128" => "__builtin_ia32_expandloadsf128_mask", + "llvm.x86.avx512.mask.expand.load.ps.256" => "__builtin_ia32_expandloadsf256_mask", + "llvm.x86.avx512.mask.expand.load.ps.512" => "__builtin_ia32_expandloadsf512_mask", + "llvm.x86.avx512.mask.expand.load.q.128" => "__builtin_ia32_expandloaddi128_mask", + "llvm.x86.avx512.mask.expand.load.q.256" => "__builtin_ia32_expandloaddi256_mask", + "llvm.x86.avx512.mask.expand.load.q.512" => "__builtin_ia32_expandloaddi512_mask", + "llvm.x86.avx512.mask.expand.pd.128" => "__builtin_ia32_expanddf128_mask", + "llvm.x86.avx512.mask.expand.pd.256" => "__builtin_ia32_expanddf256_mask", + "llvm.x86.avx512.mask.expand.pd.512" => "__builtin_ia32_expanddf512_mask", + "llvm.x86.avx512.mask.expand.ps.128" => "__builtin_ia32_expandsf128_mask", + "llvm.x86.avx512.mask.expand.ps.256" => "__builtin_ia32_expandsf256_mask", + "llvm.x86.avx512.mask.expand.ps.512" => "__builtin_ia32_expandsf512_mask", + "llvm.x86.avx512.mask.expand.q.128" => "__builtin_ia32_expanddi128_mask", + "llvm.x86.avx512.mask.expand.q.256" => "__builtin_ia32_expanddi256_mask", + "llvm.x86.avx512.mask.expand.q.512" => "__builtin_ia32_expanddi512_mask", + "llvm.x86.avx512.mask.fixupimm.pd.128" => "__builtin_ia32_fixupimmpd128_mask", + "llvm.x86.avx512.mask.fixupimm.pd.256" => "__builtin_ia32_fixupimmpd256_mask", + "llvm.x86.avx512.mask.fixupimm.pd.512" => "__builtin_ia32_fixupimmpd512_mask", + "llvm.x86.avx512.mask.fixupimm.ps.128" => "__builtin_ia32_fixupimmps128_mask", + "llvm.x86.avx512.mask.fixupimm.ps.256" => "__builtin_ia32_fixupimmps256_mask", + "llvm.x86.avx512.mask.fixupimm.ps.512" => "__builtin_ia32_fixupimmps512_mask", + "llvm.x86.avx512.mask.fixupimm.sd" => "__builtin_ia32_fixupimmsd_mask", + "llvm.x86.avx512.mask.fixupimm.ss" => "__builtin_ia32_fixupimmss_mask", + "llvm.x86.avx512.mask.fpclass.pd.128" => "__builtin_ia32_fpclasspd128_mask", + "llvm.x86.avx512.mask.fpclass.pd.256" => "__builtin_ia32_fpclasspd256_mask", + "llvm.x86.avx512.mask.fpclass.pd.512" => "__builtin_ia32_fpclasspd512_mask", + "llvm.x86.avx512.mask.fpclass.ps.128" => "__builtin_ia32_fpclassps128_mask", + "llvm.x86.avx512.mask.fpclass.ps.256" => "__builtin_ia32_fpclassps256_mask", + "llvm.x86.avx512.mask.fpclass.ps.512" => "__builtin_ia32_fpclassps512_mask", + "llvm.x86.avx512.mask.fpclass.sd" => "__builtin_ia32_fpclasssd_mask", + "llvm.x86.avx512.mask.fpclass.ss" => "__builtin_ia32_fpclassss_mask", + "llvm.x86.avx512.mask.getexp.pd.128" => "__builtin_ia32_getexppd128_mask", + "llvm.x86.avx512.mask.getexp.pd.256" => "__builtin_ia32_getexppd256_mask", + "llvm.x86.avx512.mask.getexp.pd.512" => "__builtin_ia32_getexppd512_mask", + "llvm.x86.avx512.mask.getexp.ps.128" => "__builtin_ia32_getexpps128_mask", + "llvm.x86.avx512.mask.getexp.ps.256" => "__builtin_ia32_getexpps256_mask", + "llvm.x86.avx512.mask.getexp.ps.512" => "__builtin_ia32_getexpps512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.getexp.sd" => "__builtin_ia32_getexpsd128_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.getexp.ss" => "__builtin_ia32_getexpss128_round_mask", + "llvm.x86.avx512.mask.getmant.pd.128" => "__builtin_ia32_getmantpd128_mask", + "llvm.x86.avx512.mask.getmant.pd.256" => "__builtin_ia32_getmantpd256_mask", + "llvm.x86.avx512.mask.getmant.pd.512" => "__builtin_ia32_getmantpd512_mask", + "llvm.x86.avx512.mask.getmant.ps.128" => "__builtin_ia32_getmantps128_mask", + "llvm.x86.avx512.mask.getmant.ps.256" => "__builtin_ia32_getmantps256_mask", + "llvm.x86.avx512.mask.getmant.ps.512" => "__builtin_ia32_getmantps512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.getmant.sd" => "__builtin_ia32_getmantsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.getmant.ss" => "__builtin_ia32_getmantss_round_mask", + "llvm.x86.avx512.mask.insertf32x4.256" => "__builtin_ia32_insertf32x4_256_mask", + "llvm.x86.avx512.mask.insertf32x4.512" => "__builtin_ia32_insertf32x4_mask", + "llvm.x86.avx512.mask.insertf32x8.512" => "__builtin_ia32_insertf32x8_mask", + "llvm.x86.avx512.mask.insertf64x2.256" => "__builtin_ia32_insertf64x2_256_mask", + "llvm.x86.avx512.mask.insertf64x2.512" => "__builtin_ia32_insertf64x2_512_mask", + "llvm.x86.avx512.mask.insertf64x4.512" => "__builtin_ia32_insertf64x4_mask", + "llvm.x86.avx512.mask.inserti32x4.256" => "__builtin_ia32_inserti32x4_256_mask", + "llvm.x86.avx512.mask.inserti32x4.512" => "__builtin_ia32_inserti32x4_mask", + "llvm.x86.avx512.mask.inserti32x8.512" => "__builtin_ia32_inserti32x8_mask", + "llvm.x86.avx512.mask.inserti64x2.256" => "__builtin_ia32_inserti64x2_256_mask", + "llvm.x86.avx512.mask.inserti64x2.512" => "__builtin_ia32_inserti64x2_512_mask", + "llvm.x86.avx512.mask.inserti64x4.512" => "__builtin_ia32_inserti64x4_mask", + "llvm.x86.avx512.mask.loadu.d.512" => "__builtin_ia32_loaddqusi512_mask", + "llvm.x86.avx512.mask.loadu.pd.512" => "__builtin_ia32_loadupd512_mask", + "llvm.x86.avx512.mask.loadu.ps.512" => "__builtin_ia32_loadups512_mask", + "llvm.x86.avx512.mask.loadu.q.512" => "__builtin_ia32_loaddqudi512_mask", + "llvm.x86.avx512.mask.lzcnt.d.512" => "__builtin_ia32_vplzcntd_512_mask", + "llvm.x86.avx512.mask.lzcnt.q.512" => "__builtin_ia32_vplzcntq_512_mask", + "llvm.x86.avx512.mask.max.pd.128" => "__builtin_ia32_maxpd_mask", + "llvm.x86.avx512.mask.max.pd.256" => "__builtin_ia32_maxpd256_mask", + "llvm.x86.avx512.mask.max.pd.512" => "__builtin_ia32_maxpd512_mask", + "llvm.x86.avx512.mask.max.ps.128" => "__builtin_ia32_maxps_mask", + "llvm.x86.avx512.mask.max.ps.256" => "__builtin_ia32_maxps256_mask", + "llvm.x86.avx512.mask.max.ps.512" => "__builtin_ia32_maxps512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.max.sd.round" => "__builtin_ia32_maxsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.max.ss.round" => "__builtin_ia32_maxss_round_mask", + "llvm.x86.avx512.mask.min.pd.128" => "__builtin_ia32_minpd_mask", + "llvm.x86.avx512.mask.min.pd.256" => "__builtin_ia32_minpd256_mask", + "llvm.x86.avx512.mask.min.pd.512" => "__builtin_ia32_minpd512_mask", + "llvm.x86.avx512.mask.min.ps.128" => "__builtin_ia32_minps_mask", + "llvm.x86.avx512.mask.min.ps.256" => "__builtin_ia32_minps256_mask", + "llvm.x86.avx512.mask.min.ps.512" => "__builtin_ia32_minps512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.min.sd.round" => "__builtin_ia32_minsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.min.ss.round" => "__builtin_ia32_minss_round_mask", + "llvm.x86.avx512.mask.move.sd" => "__builtin_ia32_movsd_mask", + "llvm.x86.avx512.mask.move.ss" => "__builtin_ia32_movss_mask", + "llvm.x86.avx512.mask.mul.pd.128" => "__builtin_ia32_mulpd_mask", + "llvm.x86.avx512.mask.mul.pd.256" => "__builtin_ia32_mulpd256_mask", + "llvm.x86.avx512.mask.mul.pd.512" => "__builtin_ia32_mulpd512_mask", + "llvm.x86.avx512.mask.mul.ps.128" => "__builtin_ia32_mulps_mask", + "llvm.x86.avx512.mask.mul.ps.256" => "__builtin_ia32_mulps256_mask", + "llvm.x86.avx512.mask.mul.ps.512" => "__builtin_ia32_mulps512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.mul.sd.round" => "__builtin_ia32_mulsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.mul.ss.round" => "__builtin_ia32_mulss_round_mask", + "llvm.x86.avx512.mask.or.pd.128" => "__builtin_ia32_orpd128_mask", + "llvm.x86.avx512.mask.or.pd.256" => "__builtin_ia32_orpd256_mask", + "llvm.x86.avx512.mask.or.pd.512" => "__builtin_ia32_orpd512_mask", + "llvm.x86.avx512.mask.or.ps.128" => "__builtin_ia32_orps128_mask", + "llvm.x86.avx512.mask.or.ps.256" => "__builtin_ia32_orps256_mask", + "llvm.x86.avx512.mask.or.ps.512" => "__builtin_ia32_orps512_mask", + "llvm.x86.avx512.mask.pabs.b.128" => "__builtin_ia32_pabsb128_mask", + "llvm.x86.avx512.mask.pabs.b.256" => "__builtin_ia32_pabsb256_mask", + "llvm.x86.avx512.mask.pabs.b.512" => "__builtin_ia32_pabsb512_mask", + "llvm.x86.avx512.mask.pabs.d.128" => "__builtin_ia32_pabsd128_mask", + "llvm.x86.avx512.mask.pabs.d.256" => "__builtin_ia32_pabsd256_mask", + "llvm.x86.avx512.mask.pabs.d.512" => "__builtin_ia32_pabsd512_mask", + "llvm.x86.avx512.mask.pabs.q.128" => "__builtin_ia32_pabsq128_mask", + "llvm.x86.avx512.mask.pabs.q.256" => "__builtin_ia32_pabsq256_mask", + "llvm.x86.avx512.mask.pabs.q.512" => "__builtin_ia32_pabsq512_mask", + "llvm.x86.avx512.mask.pabs.w.128" => "__builtin_ia32_pabsw128_mask", + "llvm.x86.avx512.mask.pabs.w.256" => "__builtin_ia32_pabsw256_mask", + "llvm.x86.avx512.mask.pabs.w.512" => "__builtin_ia32_pabsw512_mask", + "llvm.x86.avx512.mask.packssdw.128" => "__builtin_ia32_packssdw128_mask", + "llvm.x86.avx512.mask.packssdw.256" => "__builtin_ia32_packssdw256_mask", + "llvm.x86.avx512.mask.packssdw.512" => "__builtin_ia32_packssdw512_mask", + "llvm.x86.avx512.mask.packsswb.128" => "__builtin_ia32_packsswb128_mask", + "llvm.x86.avx512.mask.packsswb.256" => "__builtin_ia32_packsswb256_mask", + "llvm.x86.avx512.mask.packsswb.512" => "__builtin_ia32_packsswb512_mask", + "llvm.x86.avx512.mask.packusdw.128" => "__builtin_ia32_packusdw128_mask", + "llvm.x86.avx512.mask.packusdw.256" => "__builtin_ia32_packusdw256_mask", + "llvm.x86.avx512.mask.packusdw.512" => "__builtin_ia32_packusdw512_mask", + "llvm.x86.avx512.mask.packuswb.128" => "__builtin_ia32_packuswb128_mask", + "llvm.x86.avx512.mask.packuswb.256" => "__builtin_ia32_packuswb256_mask", + "llvm.x86.avx512.mask.packuswb.512" => "__builtin_ia32_packuswb512_mask", + "llvm.x86.avx512.mask.padd.b.128" => "__builtin_ia32_paddb128_mask", + "llvm.x86.avx512.mask.padd.b.256" => "__builtin_ia32_paddb256_mask", + "llvm.x86.avx512.mask.padd.b.512" => "__builtin_ia32_paddb512_mask", + "llvm.x86.avx512.mask.padd.d.128" => "__builtin_ia32_paddd128_mask", + "llvm.x86.avx512.mask.padd.d.256" => "__builtin_ia32_paddd256_mask", + "llvm.x86.avx512.mask.padd.d.512" => "__builtin_ia32_paddd512_mask", + "llvm.x86.avx512.mask.padd.q.128" => "__builtin_ia32_paddq128_mask", + "llvm.x86.avx512.mask.padd.q.256" => "__builtin_ia32_paddq256_mask", + "llvm.x86.avx512.mask.padd.q.512" => "__builtin_ia32_paddq512_mask", + "llvm.x86.avx512.mask.padd.w.128" => "__builtin_ia32_paddw128_mask", + "llvm.x86.avx512.mask.padd.w.256" => "__builtin_ia32_paddw256_mask", + "llvm.x86.avx512.mask.padd.w.512" => "__builtin_ia32_paddw512_mask", + "llvm.x86.avx512.mask.padds.b.128" => "__builtin_ia32_paddsb128_mask", + "llvm.x86.avx512.mask.padds.b.256" => "__builtin_ia32_paddsb256_mask", + "llvm.x86.avx512.mask.padds.b.512" => "__builtin_ia32_paddsb512_mask", + "llvm.x86.avx512.mask.padds.w.128" => "__builtin_ia32_paddsw128_mask", + "llvm.x86.avx512.mask.padds.w.256" => "__builtin_ia32_paddsw256_mask", + "llvm.x86.avx512.mask.padds.w.512" => "__builtin_ia32_paddsw512_mask", + "llvm.x86.avx512.mask.paddus.b.128" => "__builtin_ia32_paddusb128_mask", + "llvm.x86.avx512.mask.paddus.b.256" => "__builtin_ia32_paddusb256_mask", + "llvm.x86.avx512.mask.paddus.b.512" => "__builtin_ia32_paddusb512_mask", + "llvm.x86.avx512.mask.paddus.w.128" => "__builtin_ia32_paddusw128_mask", + "llvm.x86.avx512.mask.paddus.w.256" => "__builtin_ia32_paddusw256_mask", + "llvm.x86.avx512.mask.paddus.w.512" => "__builtin_ia32_paddusw512_mask", + "llvm.x86.avx512.mask.pand.d.512" => "__builtin_ia32_pandd512_mask", + "llvm.x86.avx512.mask.pand.q.512" => "__builtin_ia32_pandq512_mask", + "llvm.x86.avx512.mask.pavg.b.128" => "__builtin_ia32_pavgb128_mask", + "llvm.x86.avx512.mask.pavg.b.256" => "__builtin_ia32_pavgb256_mask", + "llvm.x86.avx512.mask.pavg.b.512" => "__builtin_ia32_pavgb512_mask", + "llvm.x86.avx512.mask.pavg.w.128" => "__builtin_ia32_pavgw128_mask", + "llvm.x86.avx512.mask.pavg.w.256" => "__builtin_ia32_pavgw256_mask", + "llvm.x86.avx512.mask.pavg.w.512" => "__builtin_ia32_pavgw512_mask", + "llvm.x86.avx512.mask.pbroadcast.b.gpr.128" => "__builtin_ia32_pbroadcastb128_gpr_mask", + "llvm.x86.avx512.mask.pbroadcast.b.gpr.256" => "__builtin_ia32_pbroadcastb256_gpr_mask", + "llvm.x86.avx512.mask.pbroadcast.b.gpr.512" => "__builtin_ia32_pbroadcastb512_gpr_mask", + "llvm.x86.avx512.mask.pbroadcast.d.gpr.128" => "__builtin_ia32_pbroadcastd128_gpr_mask", + "llvm.x86.avx512.mask.pbroadcast.d.gpr.256" => "__builtin_ia32_pbroadcastd256_gpr_mask", + "llvm.x86.avx512.mask.pbroadcast.d.gpr.512" => "__builtin_ia32_pbroadcastd512_gpr_mask", + "llvm.x86.avx512.mask.pbroadcast.q.gpr.128" => "__builtin_ia32_pbroadcastq128_gpr_mask", + "llvm.x86.avx512.mask.pbroadcast.q.gpr.256" => "__builtin_ia32_pbroadcastq256_gpr_mask", + "llvm.x86.avx512.mask.pbroadcast.q.gpr.512" => "__builtin_ia32_pbroadcastq512_gpr_mask", + "llvm.x86.avx512.mask.pbroadcast.q.mem.512" => "__builtin_ia32_pbroadcastq512_mem_mask", + "llvm.x86.avx512.mask.pbroadcast.w.gpr.128" => "__builtin_ia32_pbroadcastw128_gpr_mask", + "llvm.x86.avx512.mask.pbroadcast.w.gpr.256" => "__builtin_ia32_pbroadcastw256_gpr_mask", + "llvm.x86.avx512.mask.pbroadcast.w.gpr.512" => "__builtin_ia32_pbroadcastw512_gpr_mask", + "llvm.x86.avx512.mask.pcmpeq.b.128" => "__builtin_ia32_pcmpeqb128_mask", + "llvm.x86.avx512.mask.pcmpeq.b.256" => "__builtin_ia32_pcmpeqb256_mask", + "llvm.x86.avx512.mask.pcmpeq.b.512" => "__builtin_ia32_pcmpeqb512_mask", + "llvm.x86.avx512.mask.pcmpeq.d.128" => "__builtin_ia32_pcmpeqd128_mask", + "llvm.x86.avx512.mask.pcmpeq.d.256" => "__builtin_ia32_pcmpeqd256_mask", + "llvm.x86.avx512.mask.pcmpeq.d.512" => "__builtin_ia32_pcmpeqd512_mask", + "llvm.x86.avx512.mask.pcmpeq.q.128" => "__builtin_ia32_pcmpeqq128_mask", + "llvm.x86.avx512.mask.pcmpeq.q.256" => "__builtin_ia32_pcmpeqq256_mask", + "llvm.x86.avx512.mask.pcmpeq.q.512" => "__builtin_ia32_pcmpeqq512_mask", + "llvm.x86.avx512.mask.pcmpeq.w.128" => "__builtin_ia32_pcmpeqw128_mask", + "llvm.x86.avx512.mask.pcmpeq.w.256" => "__builtin_ia32_pcmpeqw256_mask", + "llvm.x86.avx512.mask.pcmpeq.w.512" => "__builtin_ia32_pcmpeqw512_mask", + "llvm.x86.avx512.mask.pcmpgt.b.128" => "__builtin_ia32_pcmpgtb128_mask", + "llvm.x86.avx512.mask.pcmpgt.b.256" => "__builtin_ia32_pcmpgtb256_mask", + "llvm.x86.avx512.mask.pcmpgt.b.512" => "__builtin_ia32_pcmpgtb512_mask", + "llvm.x86.avx512.mask.pcmpgt.d.128" => "__builtin_ia32_pcmpgtd128_mask", + "llvm.x86.avx512.mask.pcmpgt.d.256" => "__builtin_ia32_pcmpgtd256_mask", + "llvm.x86.avx512.mask.pcmpgt.d.512" => "__builtin_ia32_pcmpgtd512_mask", + "llvm.x86.avx512.mask.pcmpgt.q.128" => "__builtin_ia32_pcmpgtq128_mask", + "llvm.x86.avx512.mask.pcmpgt.q.256" => "__builtin_ia32_pcmpgtq256_mask", + "llvm.x86.avx512.mask.pcmpgt.q.512" => "__builtin_ia32_pcmpgtq512_mask", + "llvm.x86.avx512.mask.pcmpgt.w.128" => "__builtin_ia32_pcmpgtw128_mask", + "llvm.x86.avx512.mask.pcmpgt.w.256" => "__builtin_ia32_pcmpgtw256_mask", + "llvm.x86.avx512.mask.pcmpgt.w.512" => "__builtin_ia32_pcmpgtw512_mask", + "llvm.x86.avx512.mask.permvar.df.256" => "__builtin_ia32_permvardf256_mask", + "llvm.x86.avx512.mask.permvar.df.512" => "__builtin_ia32_permvardf512_mask", + "llvm.x86.avx512.mask.permvar.di.256" => "__builtin_ia32_permvardi256_mask", + "llvm.x86.avx512.mask.permvar.di.512" => "__builtin_ia32_permvardi512_mask", + "llvm.x86.avx512.mask.permvar.hi.128" => "__builtin_ia32_permvarhi128_mask", + "llvm.x86.avx512.mask.permvar.hi.256" => "__builtin_ia32_permvarhi256_mask", + "llvm.x86.avx512.mask.permvar.hi.512" => "__builtin_ia32_permvarhi512_mask", + "llvm.x86.avx512.mask.permvar.qi.128" => "__builtin_ia32_permvarqi128_mask", + "llvm.x86.avx512.mask.permvar.qi.256" => "__builtin_ia32_permvarqi256_mask", + "llvm.x86.avx512.mask.permvar.qi.512" => "__builtin_ia32_permvarqi512_mask", + "llvm.x86.avx512.mask.permvar.sf.256" => "__builtin_ia32_permvarsf256_mask", + "llvm.x86.avx512.mask.permvar.sf.512" => "__builtin_ia32_permvarsf512_mask", + "llvm.x86.avx512.mask.permvar.si.256" => "__builtin_ia32_permvarsi256_mask", + "llvm.x86.avx512.mask.permvar.si.512" => "__builtin_ia32_permvarsi512_mask", + "llvm.x86.avx512.mask.pmaddubs.w.128" => "__builtin_ia32_pmaddubsw128_mask", + "llvm.x86.avx512.mask.pmaddubs.w.256" => "__builtin_ia32_pmaddubsw256_mask", + "llvm.x86.avx512.mask.pmaddubs.w.512" => "__builtin_ia32_pmaddubsw512_mask", + "llvm.x86.avx512.mask.pmaddw.d.128" => "__builtin_ia32_pmaddwd128_mask", + "llvm.x86.avx512.mask.pmaddw.d.256" => "__builtin_ia32_pmaddwd256_mask", + "llvm.x86.avx512.mask.pmaddw.d.512" => "__builtin_ia32_pmaddwd512_mask", + "llvm.x86.avx512.mask.pmaxs.b.128" => "__builtin_ia32_pmaxsb128_mask", + "llvm.x86.avx512.mask.pmaxs.b.256" => "__builtin_ia32_pmaxsb256_mask", + "llvm.x86.avx512.mask.pmaxs.b.512" => "__builtin_ia32_pmaxsb512_mask", + "llvm.x86.avx512.mask.pmaxs.d.128" => "__builtin_ia32_pmaxsd128_mask", + "llvm.x86.avx512.mask.pmaxs.d.256" => "__builtin_ia32_pmaxsd256_mask", + "llvm.x86.avx512.mask.pmaxs.d.512" => "__builtin_ia32_pmaxsd512_mask", + "llvm.x86.avx512.mask.pmaxs.q.128" => "__builtin_ia32_pmaxsq128_mask", + "llvm.x86.avx512.mask.pmaxs.q.256" => "__builtin_ia32_pmaxsq256_mask", + "llvm.x86.avx512.mask.pmaxs.q.512" => "__builtin_ia32_pmaxsq512_mask", + "llvm.x86.avx512.mask.pmaxs.w.128" => "__builtin_ia32_pmaxsw128_mask", + "llvm.x86.avx512.mask.pmaxs.w.256" => "__builtin_ia32_pmaxsw256_mask", + "llvm.x86.avx512.mask.pmaxs.w.512" => "__builtin_ia32_pmaxsw512_mask", + "llvm.x86.avx512.mask.pmaxu.b.128" => "__builtin_ia32_pmaxub128_mask", + "llvm.x86.avx512.mask.pmaxu.b.256" => "__builtin_ia32_pmaxub256_mask", + "llvm.x86.avx512.mask.pmaxu.b.512" => "__builtin_ia32_pmaxub512_mask", + "llvm.x86.avx512.mask.pmaxu.d.128" => "__builtin_ia32_pmaxud128_mask", + "llvm.x86.avx512.mask.pmaxu.d.256" => "__builtin_ia32_pmaxud256_mask", + "llvm.x86.avx512.mask.pmaxu.d.512" => "__builtin_ia32_pmaxud512_mask", + "llvm.x86.avx512.mask.pmaxu.q.128" => "__builtin_ia32_pmaxuq128_mask", + "llvm.x86.avx512.mask.pmaxu.q.256" => "__builtin_ia32_pmaxuq256_mask", + "llvm.x86.avx512.mask.pmaxu.q.512" => "__builtin_ia32_pmaxuq512_mask", + "llvm.x86.avx512.mask.pmaxu.w.128" => "__builtin_ia32_pmaxuw128_mask", + "llvm.x86.avx512.mask.pmaxu.w.256" => "__builtin_ia32_pmaxuw256_mask", + "llvm.x86.avx512.mask.pmaxu.w.512" => "__builtin_ia32_pmaxuw512_mask", + "llvm.x86.avx512.mask.pmins.b.128" => "__builtin_ia32_pminsb128_mask", + "llvm.x86.avx512.mask.pmins.b.256" => "__builtin_ia32_pminsb256_mask", + "llvm.x86.avx512.mask.pmins.b.512" => "__builtin_ia32_pminsb512_mask", + "llvm.x86.avx512.mask.pmins.d.128" => "__builtin_ia32_pminsd128_mask", + "llvm.x86.avx512.mask.pmins.d.256" => "__builtin_ia32_pminsd256_mask", + "llvm.x86.avx512.mask.pmins.d.512" => "__builtin_ia32_pminsd512_mask", + "llvm.x86.avx512.mask.pmins.q.128" => "__builtin_ia32_pminsq128_mask", + "llvm.x86.avx512.mask.pmins.q.256" => "__builtin_ia32_pminsq256_mask", + "llvm.x86.avx512.mask.pmins.q.512" => "__builtin_ia32_pminsq512_mask", + "llvm.x86.avx512.mask.pmins.w.128" => "__builtin_ia32_pminsw128_mask", + "llvm.x86.avx512.mask.pmins.w.256" => "__builtin_ia32_pminsw256_mask", + "llvm.x86.avx512.mask.pmins.w.512" => "__builtin_ia32_pminsw512_mask", + "llvm.x86.avx512.mask.pminu.b.128" => "__builtin_ia32_pminub128_mask", + "llvm.x86.avx512.mask.pminu.b.256" => "__builtin_ia32_pminub256_mask", + "llvm.x86.avx512.mask.pminu.b.512" => "__builtin_ia32_pminub512_mask", + "llvm.x86.avx512.mask.pminu.d.128" => "__builtin_ia32_pminud128_mask", + "llvm.x86.avx512.mask.pminu.d.256" => "__builtin_ia32_pminud256_mask", + "llvm.x86.avx512.mask.pminu.d.512" => "__builtin_ia32_pminud512_mask", + "llvm.x86.avx512.mask.pminu.q.128" => "__builtin_ia32_pminuq128_mask", + "llvm.x86.avx512.mask.pminu.q.256" => "__builtin_ia32_pminuq256_mask", + "llvm.x86.avx512.mask.pminu.q.512" => "__builtin_ia32_pminuq512_mask", + "llvm.x86.avx512.mask.pminu.w.128" => "__builtin_ia32_pminuw128_mask", + "llvm.x86.avx512.mask.pminu.w.256" => "__builtin_ia32_pminuw256_mask", + "llvm.x86.avx512.mask.pminu.w.512" => "__builtin_ia32_pminuw512_mask", + "llvm.x86.avx512.mask.pmov.db.128" => "__builtin_ia32_pmovdb128_mask", + "llvm.x86.avx512.mask.pmov.db.256" => "__builtin_ia32_pmovdb256_mask", + "llvm.x86.avx512.mask.pmov.db.512" => "__builtin_ia32_pmovdb512_mask", + "llvm.x86.avx512.mask.pmov.db.mem.128" => "__builtin_ia32_pmovdb128mem_mask", + "llvm.x86.avx512.mask.pmov.db.mem.256" => "__builtin_ia32_pmovdb256mem_mask", + "llvm.x86.avx512.mask.pmov.db.mem.512" => "__builtin_ia32_pmovdb512mem_mask", + "llvm.x86.avx512.mask.pmov.dw.128" => "__builtin_ia32_pmovdw128_mask", + "llvm.x86.avx512.mask.pmov.dw.256" => "__builtin_ia32_pmovdw256_mask", + "llvm.x86.avx512.mask.pmov.dw.512" => "__builtin_ia32_pmovdw512_mask", + "llvm.x86.avx512.mask.pmov.dw.mem.128" => "__builtin_ia32_pmovdw128mem_mask", + "llvm.x86.avx512.mask.pmov.dw.mem.256" => "__builtin_ia32_pmovdw256mem_mask", + "llvm.x86.avx512.mask.pmov.dw.mem.512" => "__builtin_ia32_pmovdw512mem_mask", + "llvm.x86.avx512.mask.pmov.qb.128" => "__builtin_ia32_pmovqb128_mask", + "llvm.x86.avx512.mask.pmov.qb.256" => "__builtin_ia32_pmovqb256_mask", + "llvm.x86.avx512.mask.pmov.qb.512" => "__builtin_ia32_pmovqb512_mask", + "llvm.x86.avx512.mask.pmov.qb.mem.128" => "__builtin_ia32_pmovqb128mem_mask", + "llvm.x86.avx512.mask.pmov.qb.mem.256" => "__builtin_ia32_pmovqb256mem_mask", + "llvm.x86.avx512.mask.pmov.qb.mem.512" => "__builtin_ia32_pmovqb512mem_mask", + "llvm.x86.avx512.mask.pmov.qd.128" => "__builtin_ia32_pmovqd128_mask", + "llvm.x86.avx512.mask.pmov.qd.256" => "__builtin_ia32_pmovqd256_mask", + "llvm.x86.avx512.mask.pmov.qd.512" => "__builtin_ia32_pmovqd512_mask", + "llvm.x86.avx512.mask.pmov.qd.mem.128" => "__builtin_ia32_pmovqd128mem_mask", + "llvm.x86.avx512.mask.pmov.qd.mem.256" => "__builtin_ia32_pmovqd256mem_mask", + "llvm.x86.avx512.mask.pmov.qd.mem.512" => "__builtin_ia32_pmovqd512mem_mask", + "llvm.x86.avx512.mask.pmov.qw.128" => "__builtin_ia32_pmovqw128_mask", + "llvm.x86.avx512.mask.pmov.qw.256" => "__builtin_ia32_pmovqw256_mask", + "llvm.x86.avx512.mask.pmov.qw.512" => "__builtin_ia32_pmovqw512_mask", + "llvm.x86.avx512.mask.pmov.qw.mem.128" => "__builtin_ia32_pmovqw128mem_mask", + "llvm.x86.avx512.mask.pmov.qw.mem.256" => "__builtin_ia32_pmovqw256mem_mask", + "llvm.x86.avx512.mask.pmov.qw.mem.512" => "__builtin_ia32_pmovqw512mem_mask", + "llvm.x86.avx512.mask.pmov.wb.128" => "__builtin_ia32_pmovwb128_mask", + "llvm.x86.avx512.mask.pmov.wb.256" => "__builtin_ia32_pmovwb256_mask", + "llvm.x86.avx512.mask.pmov.wb.512" => "__builtin_ia32_pmovwb512_mask", + "llvm.x86.avx512.mask.pmov.wb.mem.128" => "__builtin_ia32_pmovwb128mem_mask", + "llvm.x86.avx512.mask.pmov.wb.mem.256" => "__builtin_ia32_pmovwb256mem_mask", + "llvm.x86.avx512.mask.pmov.wb.mem.512" => "__builtin_ia32_pmovwb512mem_mask", + "llvm.x86.avx512.mask.pmovs.db.128" => "__builtin_ia32_pmovsdb128_mask", + "llvm.x86.avx512.mask.pmovs.db.256" => "__builtin_ia32_pmovsdb256_mask", + "llvm.x86.avx512.mask.pmovs.db.512" => "__builtin_ia32_pmovsdb512_mask", + "llvm.x86.avx512.mask.pmovs.db.mem.128" => "__builtin_ia32_pmovsdb128mem_mask", + "llvm.x86.avx512.mask.pmovs.db.mem.256" => "__builtin_ia32_pmovsdb256mem_mask", + "llvm.x86.avx512.mask.pmovs.db.mem.512" => "__builtin_ia32_pmovsdb512mem_mask", + "llvm.x86.avx512.mask.pmovs.dw.128" => "__builtin_ia32_pmovsdw128_mask", + "llvm.x86.avx512.mask.pmovs.dw.256" => "__builtin_ia32_pmovsdw256_mask", + "llvm.x86.avx512.mask.pmovs.dw.512" => "__builtin_ia32_pmovsdw512_mask", + "llvm.x86.avx512.mask.pmovs.dw.mem.128" => "__builtin_ia32_pmovsdw128mem_mask", + "llvm.x86.avx512.mask.pmovs.dw.mem.256" => "__builtin_ia32_pmovsdw256mem_mask", + "llvm.x86.avx512.mask.pmovs.dw.mem.512" => "__builtin_ia32_pmovsdw512mem_mask", + "llvm.x86.avx512.mask.pmovs.qb.128" => "__builtin_ia32_pmovsqb128_mask", + "llvm.x86.avx512.mask.pmovs.qb.256" => "__builtin_ia32_pmovsqb256_mask", + "llvm.x86.avx512.mask.pmovs.qb.512" => "__builtin_ia32_pmovsqb512_mask", + "llvm.x86.avx512.mask.pmovs.qb.mem.128" => "__builtin_ia32_pmovsqb128mem_mask", + "llvm.x86.avx512.mask.pmovs.qb.mem.256" => "__builtin_ia32_pmovsqb256mem_mask", + "llvm.x86.avx512.mask.pmovs.qb.mem.512" => "__builtin_ia32_pmovsqb512mem_mask", + "llvm.x86.avx512.mask.pmovs.qd.128" => "__builtin_ia32_pmovsqd128_mask", + "llvm.x86.avx512.mask.pmovs.qd.256" => "__builtin_ia32_pmovsqd256_mask", + "llvm.x86.avx512.mask.pmovs.qd.512" => "__builtin_ia32_pmovsqd512_mask", + "llvm.x86.avx512.mask.pmovs.qd.mem.128" => "__builtin_ia32_pmovsqd128mem_mask", + "llvm.x86.avx512.mask.pmovs.qd.mem.256" => "__builtin_ia32_pmovsqd256mem_mask", + "llvm.x86.avx512.mask.pmovs.qd.mem.512" => "__builtin_ia32_pmovsqd512mem_mask", + "llvm.x86.avx512.mask.pmovs.qw.128" => "__builtin_ia32_pmovsqw128_mask", + "llvm.x86.avx512.mask.pmovs.qw.256" => "__builtin_ia32_pmovsqw256_mask", + "llvm.x86.avx512.mask.pmovs.qw.512" => "__builtin_ia32_pmovsqw512_mask", + "llvm.x86.avx512.mask.pmovs.qw.mem.128" => "__builtin_ia32_pmovsqw128mem_mask", + "llvm.x86.avx512.mask.pmovs.qw.mem.256" => "__builtin_ia32_pmovsqw256mem_mask", + "llvm.x86.avx512.mask.pmovs.qw.mem.512" => "__builtin_ia32_pmovsqw512mem_mask", + "llvm.x86.avx512.mask.pmovs.wb.128" => "__builtin_ia32_pmovswb128_mask", + "llvm.x86.avx512.mask.pmovs.wb.256" => "__builtin_ia32_pmovswb256_mask", + "llvm.x86.avx512.mask.pmovs.wb.512" => "__builtin_ia32_pmovswb512_mask", + "llvm.x86.avx512.mask.pmovs.wb.mem.128" => "__builtin_ia32_pmovswb128mem_mask", + "llvm.x86.avx512.mask.pmovs.wb.mem.256" => "__builtin_ia32_pmovswb256mem_mask", + "llvm.x86.avx512.mask.pmovs.wb.mem.512" => "__builtin_ia32_pmovswb512mem_mask", + "llvm.x86.avx512.mask.pmovsxb.d.128" => "__builtin_ia32_pmovsxbd128_mask", + "llvm.x86.avx512.mask.pmovsxb.d.256" => "__builtin_ia32_pmovsxbd256_mask", + "llvm.x86.avx512.mask.pmovsxb.d.512" => "__builtin_ia32_pmovsxbd512_mask", + "llvm.x86.avx512.mask.pmovsxb.q.128" => "__builtin_ia32_pmovsxbq128_mask", + "llvm.x86.avx512.mask.pmovsxb.q.256" => "__builtin_ia32_pmovsxbq256_mask", + "llvm.x86.avx512.mask.pmovsxb.q.512" => "__builtin_ia32_pmovsxbq512_mask", + "llvm.x86.avx512.mask.pmovsxb.w.128" => "__builtin_ia32_pmovsxbw128_mask", + "llvm.x86.avx512.mask.pmovsxb.w.256" => "__builtin_ia32_pmovsxbw256_mask", + "llvm.x86.avx512.mask.pmovsxb.w.512" => "__builtin_ia32_pmovsxbw512_mask", + "llvm.x86.avx512.mask.pmovsxd.q.128" => "__builtin_ia32_pmovsxdq128_mask", + "llvm.x86.avx512.mask.pmovsxd.q.256" => "__builtin_ia32_pmovsxdq256_mask", + "llvm.x86.avx512.mask.pmovsxd.q.512" => "__builtin_ia32_pmovsxdq512_mask", + "llvm.x86.avx512.mask.pmovsxw.d.128" => "__builtin_ia32_pmovsxwd128_mask", + "llvm.x86.avx512.mask.pmovsxw.d.256" => "__builtin_ia32_pmovsxwd256_mask", + "llvm.x86.avx512.mask.pmovsxw.d.512" => "__builtin_ia32_pmovsxwd512_mask", + "llvm.x86.avx512.mask.pmovsxw.q.128" => "__builtin_ia32_pmovsxwq128_mask", + "llvm.x86.avx512.mask.pmovsxw.q.256" => "__builtin_ia32_pmovsxwq256_mask", + "llvm.x86.avx512.mask.pmovsxw.q.512" => "__builtin_ia32_pmovsxwq512_mask", + "llvm.x86.avx512.mask.pmovus.db.128" => "__builtin_ia32_pmovusdb128_mask", + "llvm.x86.avx512.mask.pmovus.db.256" => "__builtin_ia32_pmovusdb256_mask", + "llvm.x86.avx512.mask.pmovus.db.512" => "__builtin_ia32_pmovusdb512_mask", + "llvm.x86.avx512.mask.pmovus.db.mem.128" => "__builtin_ia32_pmovusdb128mem_mask", + "llvm.x86.avx512.mask.pmovus.db.mem.256" => "__builtin_ia32_pmovusdb256mem_mask", + "llvm.x86.avx512.mask.pmovus.db.mem.512" => "__builtin_ia32_pmovusdb512mem_mask", + "llvm.x86.avx512.mask.pmovus.dw.128" => "__builtin_ia32_pmovusdw128_mask", + "llvm.x86.avx512.mask.pmovus.dw.256" => "__builtin_ia32_pmovusdw256_mask", + "llvm.x86.avx512.mask.pmovus.dw.512" => "__builtin_ia32_pmovusdw512_mask", + "llvm.x86.avx512.mask.pmovus.dw.mem.128" => "__builtin_ia32_pmovusdw128mem_mask", + "llvm.x86.avx512.mask.pmovus.dw.mem.256" => "__builtin_ia32_pmovusdw256mem_mask", + "llvm.x86.avx512.mask.pmovus.dw.mem.512" => "__builtin_ia32_pmovusdw512mem_mask", + "llvm.x86.avx512.mask.pmovus.qb.128" => "__builtin_ia32_pmovusqb128_mask", + "llvm.x86.avx512.mask.pmovus.qb.256" => "__builtin_ia32_pmovusqb256_mask", + "llvm.x86.avx512.mask.pmovus.qb.512" => "__builtin_ia32_pmovusqb512_mask", + "llvm.x86.avx512.mask.pmovus.qb.mem.128" => "__builtin_ia32_pmovusqb128mem_mask", + "llvm.x86.avx512.mask.pmovus.qb.mem.256" => "__builtin_ia32_pmovusqb256mem_mask", + "llvm.x86.avx512.mask.pmovus.qb.mem.512" => "__builtin_ia32_pmovusqb512mem_mask", + "llvm.x86.avx512.mask.pmovus.qd.128" => "__builtin_ia32_pmovusqd128_mask", + "llvm.x86.avx512.mask.pmovus.qd.256" => "__builtin_ia32_pmovusqd256_mask", + "llvm.x86.avx512.mask.pmovus.qd.512" => "__builtin_ia32_pmovusqd512_mask", + "llvm.x86.avx512.mask.pmovus.qd.mem.128" => "__builtin_ia32_pmovusqd128mem_mask", + "llvm.x86.avx512.mask.pmovus.qd.mem.256" => "__builtin_ia32_pmovusqd256mem_mask", + "llvm.x86.avx512.mask.pmovus.qd.mem.512" => "__builtin_ia32_pmovusqd512mem_mask", + "llvm.x86.avx512.mask.pmovus.qw.128" => "__builtin_ia32_pmovusqw128_mask", + "llvm.x86.avx512.mask.pmovus.qw.256" => "__builtin_ia32_pmovusqw256_mask", + "llvm.x86.avx512.mask.pmovus.qw.512" => "__builtin_ia32_pmovusqw512_mask", + "llvm.x86.avx512.mask.pmovus.qw.mem.128" => "__builtin_ia32_pmovusqw128mem_mask", + "llvm.x86.avx512.mask.pmovus.qw.mem.256" => "__builtin_ia32_pmovusqw256mem_mask", + "llvm.x86.avx512.mask.pmovus.qw.mem.512" => "__builtin_ia32_pmovusqw512mem_mask", + "llvm.x86.avx512.mask.pmovus.wb.128" => "__builtin_ia32_pmovuswb128_mask", + "llvm.x86.avx512.mask.pmovus.wb.256" => "__builtin_ia32_pmovuswb256_mask", + "llvm.x86.avx512.mask.pmovus.wb.512" => "__builtin_ia32_pmovuswb512_mask", + "llvm.x86.avx512.mask.pmovus.wb.mem.128" => "__builtin_ia32_pmovuswb128mem_mask", + "llvm.x86.avx512.mask.pmovus.wb.mem.256" => "__builtin_ia32_pmovuswb256mem_mask", + "llvm.x86.avx512.mask.pmovus.wb.mem.512" => "__builtin_ia32_pmovuswb512mem_mask", + "llvm.x86.avx512.mask.pmovzxb.d.128" => "__builtin_ia32_pmovzxbd128_mask", + "llvm.x86.avx512.mask.pmovzxb.d.256" => "__builtin_ia32_pmovzxbd256_mask", + "llvm.x86.avx512.mask.pmovzxb.d.512" => "__builtin_ia32_pmovzxbd512_mask", + "llvm.x86.avx512.mask.pmovzxb.q.128" => "__builtin_ia32_pmovzxbq128_mask", + "llvm.x86.avx512.mask.pmovzxb.q.256" => "__builtin_ia32_pmovzxbq256_mask", + "llvm.x86.avx512.mask.pmovzxb.q.512" => "__builtin_ia32_pmovzxbq512_mask", + "llvm.x86.avx512.mask.pmovzxb.w.128" => "__builtin_ia32_pmovzxbw128_mask", + "llvm.x86.avx512.mask.pmovzxb.w.256" => "__builtin_ia32_pmovzxbw256_mask", + "llvm.x86.avx512.mask.pmovzxb.w.512" => "__builtin_ia32_pmovzxbw512_mask", + "llvm.x86.avx512.mask.pmovzxd.q.128" => "__builtin_ia32_pmovzxdq128_mask", + "llvm.x86.avx512.mask.pmovzxd.q.256" => "__builtin_ia32_pmovzxdq256_mask", + "llvm.x86.avx512.mask.pmovzxd.q.512" => "__builtin_ia32_pmovzxdq512_mask", + "llvm.x86.avx512.mask.pmovzxw.d.128" => "__builtin_ia32_pmovzxwd128_mask", + "llvm.x86.avx512.mask.pmovzxw.d.256" => "__builtin_ia32_pmovzxwd256_mask", + "llvm.x86.avx512.mask.pmovzxw.d.512" => "__builtin_ia32_pmovzxwd512_mask", + "llvm.x86.avx512.mask.pmovzxw.q.128" => "__builtin_ia32_pmovzxwq128_mask", + "llvm.x86.avx512.mask.pmovzxw.q.256" => "__builtin_ia32_pmovzxwq256_mask", + "llvm.x86.avx512.mask.pmovzxw.q.512" => "__builtin_ia32_pmovzxwq512_mask", + "llvm.x86.avx512.mask.pmul.dq.128" => "__builtin_ia32_pmuldq128_mask", + "llvm.x86.avx512.mask.pmul.dq.256" => "__builtin_ia32_pmuldq256_mask", + "llvm.x86.avx512.mask.pmul.dq.512" => "__builtin_ia32_pmuldq512_mask", + "llvm.x86.avx512.mask.pmul.hr.sw.128" => "__builtin_ia32_pmulhrsw128_mask", + "llvm.x86.avx512.mask.pmul.hr.sw.256" => "__builtin_ia32_pmulhrsw256_mask", + "llvm.x86.avx512.mask.pmul.hr.sw.512" => "__builtin_ia32_pmulhrsw512_mask", + "llvm.x86.avx512.mask.pmulh.w.128" => "__builtin_ia32_pmulhw128_mask", + "llvm.x86.avx512.mask.pmulh.w.256" => "__builtin_ia32_pmulhw256_mask", + "llvm.x86.avx512.mask.pmulh.w.512" => "__builtin_ia32_pmulhw512_mask", + "llvm.x86.avx512.mask.pmulhu.w.128" => "__builtin_ia32_pmulhuw128_mask", + "llvm.x86.avx512.mask.pmulhu.w.256" => "__builtin_ia32_pmulhuw256_mask", + "llvm.x86.avx512.mask.pmulhu.w.512" => "__builtin_ia32_pmulhuw512_mask", + "llvm.x86.avx512.mask.pmull.d.128" => "__builtin_ia32_pmulld128_mask", + "llvm.x86.avx512.mask.pmull.d.256" => "__builtin_ia32_pmulld256_mask", + "llvm.x86.avx512.mask.pmull.d.512" => "__builtin_ia32_pmulld512_mask", + "llvm.x86.avx512.mask.pmull.q.128" => "__builtin_ia32_pmullq128_mask", + "llvm.x86.avx512.mask.pmull.q.256" => "__builtin_ia32_pmullq256_mask", + "llvm.x86.avx512.mask.pmull.q.512" => "__builtin_ia32_pmullq512_mask", + "llvm.x86.avx512.mask.pmull.w.128" => "__builtin_ia32_pmullw128_mask", + "llvm.x86.avx512.mask.pmull.w.256" => "__builtin_ia32_pmullw256_mask", + "llvm.x86.avx512.mask.pmull.w.512" => "__builtin_ia32_pmullw512_mask", + "llvm.x86.avx512.mask.pmultishift.qb.128" => "__builtin_ia32_vpmultishiftqb128_mask", + "llvm.x86.avx512.mask.pmultishift.qb.256" => "__builtin_ia32_vpmultishiftqb256_mask", + "llvm.x86.avx512.mask.pmultishift.qb.512" => "__builtin_ia32_vpmultishiftqb512_mask", + "llvm.x86.avx512.mask.pmulu.dq.128" => "__builtin_ia32_pmuludq128_mask", + "llvm.x86.avx512.mask.pmulu.dq.256" => "__builtin_ia32_pmuludq256_mask", + "llvm.x86.avx512.mask.pmulu.dq.512" => "__builtin_ia32_pmuludq512_mask", + "llvm.x86.avx512.mask.prol.d.128" => "__builtin_ia32_prold128_mask", + "llvm.x86.avx512.mask.prol.d.256" => "__builtin_ia32_prold256_mask", + "llvm.x86.avx512.mask.prol.d.512" => "__builtin_ia32_prold512_mask", + "llvm.x86.avx512.mask.prol.q.128" => "__builtin_ia32_prolq128_mask", + "llvm.x86.avx512.mask.prol.q.256" => "__builtin_ia32_prolq256_mask", + "llvm.x86.avx512.mask.prol.q.512" => "__builtin_ia32_prolq512_mask", + "llvm.x86.avx512.mask.prolv.d.128" => "__builtin_ia32_prolvd128_mask", + "llvm.x86.avx512.mask.prolv.d.256" => "__builtin_ia32_prolvd256_mask", + "llvm.x86.avx512.mask.prolv.d.512" => "__builtin_ia32_prolvd512_mask", + "llvm.x86.avx512.mask.prolv.q.128" => "__builtin_ia32_prolvq128_mask", + "llvm.x86.avx512.mask.prolv.q.256" => "__builtin_ia32_prolvq256_mask", + "llvm.x86.avx512.mask.prolv.q.512" => "__builtin_ia32_prolvq512_mask", + "llvm.x86.avx512.mask.pror.d.128" => "__builtin_ia32_prord128_mask", + "llvm.x86.avx512.mask.pror.d.256" => "__builtin_ia32_prord256_mask", + "llvm.x86.avx512.mask.pror.d.512" => "__builtin_ia32_prord512_mask", + "llvm.x86.avx512.mask.pror.q.128" => "__builtin_ia32_prorq128_mask", + "llvm.x86.avx512.mask.pror.q.256" => "__builtin_ia32_prorq256_mask", + "llvm.x86.avx512.mask.pror.q.512" => "__builtin_ia32_prorq512_mask", + "llvm.x86.avx512.mask.prorv.d.128" => "__builtin_ia32_prorvd128_mask", + "llvm.x86.avx512.mask.prorv.d.256" => "__builtin_ia32_prorvd256_mask", + "llvm.x86.avx512.mask.prorv.d.512" => "__builtin_ia32_prorvd512_mask", + "llvm.x86.avx512.mask.prorv.q.128" => "__builtin_ia32_prorvq128_mask", + "llvm.x86.avx512.mask.prorv.q.256" => "__builtin_ia32_prorvq256_mask", + "llvm.x86.avx512.mask.prorv.q.512" => "__builtin_ia32_prorvq512_mask", + "llvm.x86.avx512.mask.pshuf.b.128" => "__builtin_ia32_pshufb128_mask", + "llvm.x86.avx512.mask.pshuf.b.256" => "__builtin_ia32_pshufb256_mask", + "llvm.x86.avx512.mask.pshuf.b.512" => "__builtin_ia32_pshufb512_mask", + "llvm.x86.avx512.mask.psll.d" => "__builtin_ia32_pslld512_mask", + "llvm.x86.avx512.mask.psll.d.128" => "__builtin_ia32_pslld128_mask", + "llvm.x86.avx512.mask.psll.d.256" => "__builtin_ia32_pslld256_mask", + "llvm.x86.avx512.mask.psll.di.128" => "__builtin_ia32_pslldi128_mask", + "llvm.x86.avx512.mask.psll.di.256" => "__builtin_ia32_pslldi256_mask", + "llvm.x86.avx512.mask.psll.di.512" => "__builtin_ia32_pslldi512_mask", + "llvm.x86.avx512.mask.psll.q" => "__builtin_ia32_psllq512_mask", + "llvm.x86.avx512.mask.psll.q.128" => "__builtin_ia32_psllq128_mask", + "llvm.x86.avx512.mask.psll.q.256" => "__builtin_ia32_psllq256_mask", + "llvm.x86.avx512.mask.psll.qi.128" => "__builtin_ia32_psllqi128_mask", + "llvm.x86.avx512.mask.psll.qi.256" => "__builtin_ia32_psllqi256_mask", + "llvm.x86.avx512.mask.psll.qi.512" => "__builtin_ia32_psllqi512_mask", + "llvm.x86.avx512.mask.psll.w.128" => "__builtin_ia32_psllw128_mask", + "llvm.x86.avx512.mask.psll.w.256" => "__builtin_ia32_psllw256_mask", + "llvm.x86.avx512.mask.psll.w.512" => "__builtin_ia32_psllw512_mask", + "llvm.x86.avx512.mask.psll.wi.128" => "__builtin_ia32_psllwi128_mask", + "llvm.x86.avx512.mask.psll.wi.256" => "__builtin_ia32_psllwi256_mask", + "llvm.x86.avx512.mask.psll.wi.512" => "__builtin_ia32_psllwi512_mask", + "llvm.x86.avx512.mask.psllv.d" => "__builtin_ia32_psllv16si_mask", + "llvm.x86.avx512.mask.psllv.q" => "__builtin_ia32_psllv8di_mask", + "llvm.x86.avx512.mask.psllv16.hi" => "__builtin_ia32_psllv16hi_mask", + "llvm.x86.avx512.mask.psllv2.di" => "__builtin_ia32_psllv2di_mask", + "llvm.x86.avx512.mask.psllv32hi" => "__builtin_ia32_psllv32hi_mask", + "llvm.x86.avx512.mask.psllv4.di" => "__builtin_ia32_psllv4di_mask", + "llvm.x86.avx512.mask.psllv4.si" => "__builtin_ia32_psllv4si_mask", + "llvm.x86.avx512.mask.psllv8.hi" => "__builtin_ia32_psllv8hi_mask", + "llvm.x86.avx512.mask.psllv8.si" => "__builtin_ia32_psllv8si_mask", + "llvm.x86.avx512.mask.psra.d" => "__builtin_ia32_psrad512_mask", + "llvm.x86.avx512.mask.psra.d.128" => "__builtin_ia32_psrad128_mask", + "llvm.x86.avx512.mask.psra.d.256" => "__builtin_ia32_psrad256_mask", + "llvm.x86.avx512.mask.psra.di.128" => "__builtin_ia32_psradi128_mask", + "llvm.x86.avx512.mask.psra.di.256" => "__builtin_ia32_psradi256_mask", + "llvm.x86.avx512.mask.psra.di.512" => "__builtin_ia32_psradi512_mask", + "llvm.x86.avx512.mask.psra.q" => "__builtin_ia32_psraq512_mask", + "llvm.x86.avx512.mask.psra.q.128" => "__builtin_ia32_psraq128_mask", + "llvm.x86.avx512.mask.psra.q.256" => "__builtin_ia32_psraq256_mask", + "llvm.x86.avx512.mask.psra.qi.128" => "__builtin_ia32_psraqi128_mask", + "llvm.x86.avx512.mask.psra.qi.256" => "__builtin_ia32_psraqi256_mask", + "llvm.x86.avx512.mask.psra.qi.512" => "__builtin_ia32_psraqi512_mask", + "llvm.x86.avx512.mask.psra.w.128" => "__builtin_ia32_psraw128_mask", + "llvm.x86.avx512.mask.psra.w.256" => "__builtin_ia32_psraw256_mask", + "llvm.x86.avx512.mask.psra.w.512" => "__builtin_ia32_psraw512_mask", + "llvm.x86.avx512.mask.psra.wi.128" => "__builtin_ia32_psrawi128_mask", + "llvm.x86.avx512.mask.psra.wi.256" => "__builtin_ia32_psrawi256_mask", + "llvm.x86.avx512.mask.psra.wi.512" => "__builtin_ia32_psrawi512_mask", + "llvm.x86.avx512.mask.psrav.d" => "__builtin_ia32_psrav16si_mask", + "llvm.x86.avx512.mask.psrav.q" => "__builtin_ia32_psrav8di_mask", + "llvm.x86.avx512.mask.psrav.q.128" => "__builtin_ia32_psravq128_mask", + "llvm.x86.avx512.mask.psrav.q.256" => "__builtin_ia32_psravq256_mask", + "llvm.x86.avx512.mask.psrav16.hi" => "__builtin_ia32_psrav16hi_mask", + "llvm.x86.avx512.mask.psrav32.hi" => "__builtin_ia32_psrav32hi_mask", + "llvm.x86.avx512.mask.psrav4.si" => "__builtin_ia32_psrav4si_mask", + "llvm.x86.avx512.mask.psrav8.hi" => "__builtin_ia32_psrav8hi_mask", + "llvm.x86.avx512.mask.psrav8.si" => "__builtin_ia32_psrav8si_mask", + "llvm.x86.avx512.mask.psrl.d" => "__builtin_ia32_psrld512_mask", + "llvm.x86.avx512.mask.psrl.d.128" => "__builtin_ia32_psrld128_mask", + "llvm.x86.avx512.mask.psrl.d.256" => "__builtin_ia32_psrld256_mask", + "llvm.x86.avx512.mask.psrl.di.128" => "__builtin_ia32_psrldi128_mask", + "llvm.x86.avx512.mask.psrl.di.256" => "__builtin_ia32_psrldi256_mask", + "llvm.x86.avx512.mask.psrl.di.512" => "__builtin_ia32_psrldi512_mask", + "llvm.x86.avx512.mask.psrl.q" => "__builtin_ia32_psrlq512_mask", + "llvm.x86.avx512.mask.psrl.q.128" => "__builtin_ia32_psrlq128_mask", + "llvm.x86.avx512.mask.psrl.q.256" => "__builtin_ia32_psrlq256_mask", + "llvm.x86.avx512.mask.psrl.qi.128" => "__builtin_ia32_psrlqi128_mask", + "llvm.x86.avx512.mask.psrl.qi.256" => "__builtin_ia32_psrlqi256_mask", + "llvm.x86.avx512.mask.psrl.qi.512" => "__builtin_ia32_psrlqi512_mask", + "llvm.x86.avx512.mask.psrl.w.128" => "__builtin_ia32_psrlw128_mask", + "llvm.x86.avx512.mask.psrl.w.256" => "__builtin_ia32_psrlw256_mask", + "llvm.x86.avx512.mask.psrl.w.512" => "__builtin_ia32_psrlw512_mask", + "llvm.x86.avx512.mask.psrl.wi.128" => "__builtin_ia32_psrlwi128_mask", + "llvm.x86.avx512.mask.psrl.wi.256" => "__builtin_ia32_psrlwi256_mask", + "llvm.x86.avx512.mask.psrl.wi.512" => "__builtin_ia32_psrlwi512_mask", + "llvm.x86.avx512.mask.psrlv.d" => "__builtin_ia32_psrlv16si_mask", + "llvm.x86.avx512.mask.psrlv.q" => "__builtin_ia32_psrlv8di_mask", + "llvm.x86.avx512.mask.psrlv16.hi" => "__builtin_ia32_psrlv16hi_mask", + "llvm.x86.avx512.mask.psrlv2.di" => "__builtin_ia32_psrlv2di_mask", + "llvm.x86.avx512.mask.psrlv32hi" => "__builtin_ia32_psrlv32hi_mask", + "llvm.x86.avx512.mask.psrlv4.di" => "__builtin_ia32_psrlv4di_mask", + "llvm.x86.avx512.mask.psrlv4.si" => "__builtin_ia32_psrlv4si_mask", + "llvm.x86.avx512.mask.psrlv8.hi" => "__builtin_ia32_psrlv8hi_mask", + "llvm.x86.avx512.mask.psrlv8.si" => "__builtin_ia32_psrlv8si_mask", + "llvm.x86.avx512.mask.psub.b.128" => "__builtin_ia32_psubb128_mask", + "llvm.x86.avx512.mask.psub.b.256" => "__builtin_ia32_psubb256_mask", + "llvm.x86.avx512.mask.psub.b.512" => "__builtin_ia32_psubb512_mask", + "llvm.x86.avx512.mask.psub.d.128" => "__builtin_ia32_psubd128_mask", + "llvm.x86.avx512.mask.psub.d.256" => "__builtin_ia32_psubd256_mask", + "llvm.x86.avx512.mask.psub.d.512" => "__builtin_ia32_psubd512_mask", + "llvm.x86.avx512.mask.psub.q.128" => "__builtin_ia32_psubq128_mask", + "llvm.x86.avx512.mask.psub.q.256" => "__builtin_ia32_psubq256_mask", + "llvm.x86.avx512.mask.psub.q.512" => "__builtin_ia32_psubq512_mask", + "llvm.x86.avx512.mask.psub.w.128" => "__builtin_ia32_psubw128_mask", + "llvm.x86.avx512.mask.psub.w.256" => "__builtin_ia32_psubw256_mask", + "llvm.x86.avx512.mask.psub.w.512" => "__builtin_ia32_psubw512_mask", + "llvm.x86.avx512.mask.psubs.b.128" => "__builtin_ia32_psubsb128_mask", + "llvm.x86.avx512.mask.psubs.b.256" => "__builtin_ia32_psubsb256_mask", + "llvm.x86.avx512.mask.psubs.b.512" => "__builtin_ia32_psubsb512_mask", + "llvm.x86.avx512.mask.psubs.w.128" => "__builtin_ia32_psubsw128_mask", + "llvm.x86.avx512.mask.psubs.w.256" => "__builtin_ia32_psubsw256_mask", + "llvm.x86.avx512.mask.psubs.w.512" => "__builtin_ia32_psubsw512_mask", + "llvm.x86.avx512.mask.psubus.b.128" => "__builtin_ia32_psubusb128_mask", + "llvm.x86.avx512.mask.psubus.b.256" => "__builtin_ia32_psubusb256_mask", + "llvm.x86.avx512.mask.psubus.b.512" => "__builtin_ia32_psubusb512_mask", + "llvm.x86.avx512.mask.psubus.w.128" => "__builtin_ia32_psubusw128_mask", + "llvm.x86.avx512.mask.psubus.w.256" => "__builtin_ia32_psubusw256_mask", + "llvm.x86.avx512.mask.psubus.w.512" => "__builtin_ia32_psubusw512_mask", + "llvm.x86.avx512.mask.pternlog.d.128" => "__builtin_ia32_pternlogd128_mask", + "llvm.x86.avx512.mask.pternlog.d.256" => "__builtin_ia32_pternlogd256_mask", + "llvm.x86.avx512.mask.pternlog.d.512" => "__builtin_ia32_pternlogd512_mask", + "llvm.x86.avx512.mask.pternlog.q.128" => "__builtin_ia32_pternlogq128_mask", + "llvm.x86.avx512.mask.pternlog.q.256" => "__builtin_ia32_pternlogq256_mask", + "llvm.x86.avx512.mask.pternlog.q.512" => "__builtin_ia32_pternlogq512_mask", + "llvm.x86.avx512.mask.ptestm.d.512" => "__builtin_ia32_ptestmd512", + "llvm.x86.avx512.mask.ptestm.q.512" => "__builtin_ia32_ptestmq512", + "llvm.x86.avx512.mask.range.pd.128" => "__builtin_ia32_rangepd128_mask", + "llvm.x86.avx512.mask.range.pd.256" => "__builtin_ia32_rangepd256_mask", + "llvm.x86.avx512.mask.range.pd.512" => "__builtin_ia32_rangepd512_mask", + "llvm.x86.avx512.mask.range.ps.128" => "__builtin_ia32_rangeps128_mask", + "llvm.x86.avx512.mask.range.ps.256" => "__builtin_ia32_rangeps256_mask", + "llvm.x86.avx512.mask.range.ps.512" => "__builtin_ia32_rangeps512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.range.sd" => "__builtin_ia32_rangesd128_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.range.ss" => "__builtin_ia32_rangess128_round_mask", + "llvm.x86.avx512.mask.reduce.pd.128" => "__builtin_ia32_reducepd128_mask", + "llvm.x86.avx512.mask.reduce.pd.256" => "__builtin_ia32_reducepd256_mask", + "llvm.x86.avx512.mask.reduce.pd.512" => "__builtin_ia32_reducepd512_mask", + "llvm.x86.avx512.mask.reduce.ps.128" => "__builtin_ia32_reduceps128_mask", + "llvm.x86.avx512.mask.reduce.ps.256" => "__builtin_ia32_reduceps256_mask", + "llvm.x86.avx512.mask.reduce.ps.512" => "__builtin_ia32_reduceps512_mask", + "llvm.x86.avx512.mask.reduce.sd" => "__builtin_ia32_reducesd_mask", + "llvm.x86.avx512.mask.reduce.ss" => "__builtin_ia32_reducess_mask", + "llvm.x86.avx512.mask.rndscale.pd.128" => "__builtin_ia32_rndscalepd_128_mask", + "llvm.x86.avx512.mask.rndscale.pd.256" => "__builtin_ia32_rndscalepd_256_mask", + "llvm.x86.avx512.mask.rndscale.pd.512" => "__builtin_ia32_rndscalepd_mask", + "llvm.x86.avx512.mask.rndscale.ps.128" => "__builtin_ia32_rndscaleps_128_mask", + "llvm.x86.avx512.mask.rndscale.ps.256" => "__builtin_ia32_rndscaleps_256_mask", + "llvm.x86.avx512.mask.rndscale.ps.512" => "__builtin_ia32_rndscaleps_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.rndscale.sd" => "__builtin_ia32_rndscalesd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.rndscale.ss" => "__builtin_ia32_rndscaless_round_mask", + "llvm.x86.avx512.mask.scalef.pd.128" => "__builtin_ia32_scalefpd128_mask", + "llvm.x86.avx512.mask.scalef.pd.256" => "__builtin_ia32_scalefpd256_mask", + "llvm.x86.avx512.mask.scalef.pd.512" => "__builtin_ia32_scalefpd512_mask", + "llvm.x86.avx512.mask.scalef.ps.128" => "__builtin_ia32_scalefps128_mask", + "llvm.x86.avx512.mask.scalef.ps.256" => "__builtin_ia32_scalefps256_mask", + "llvm.x86.avx512.mask.scalef.ps.512" => "__builtin_ia32_scalefps512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.scalef.sd" => "__builtin_ia32_scalefsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.scalef.ss" => "__builtin_ia32_scalefss_round_mask", + "llvm.x86.avx512.mask.shuf.f32x4" => "__builtin_ia32_shuf_f32x4_mask", + "llvm.x86.avx512.mask.shuf.f32x4.256" => "__builtin_ia32_shuf_f32x4_256_mask", + "llvm.x86.avx512.mask.shuf.f64x2" => "__builtin_ia32_shuf_f64x2_mask", + "llvm.x86.avx512.mask.shuf.f64x2.256" => "__builtin_ia32_shuf_f64x2_256_mask", + "llvm.x86.avx512.mask.shuf.i32x4" => "__builtin_ia32_shuf_i32x4_mask", + "llvm.x86.avx512.mask.shuf.i32x4.256" => "__builtin_ia32_shuf_i32x4_256_mask", + "llvm.x86.avx512.mask.shuf.i64x2" => "__builtin_ia32_shuf_i64x2_mask", + "llvm.x86.avx512.mask.shuf.i64x2.256" => "__builtin_ia32_shuf_i64x2_256_mask", + "llvm.x86.avx512.mask.shuf.pd.128" => "__builtin_ia32_shufpd128_mask", + "llvm.x86.avx512.mask.shuf.pd.256" => "__builtin_ia32_shufpd256_mask", + "llvm.x86.avx512.mask.shuf.pd.512" => "__builtin_ia32_shufpd512_mask", + "llvm.x86.avx512.mask.shuf.ps.128" => "__builtin_ia32_shufps128_mask", + "llvm.x86.avx512.mask.shuf.ps.256" => "__builtin_ia32_shufps256_mask", + "llvm.x86.avx512.mask.shuf.ps.512" => "__builtin_ia32_shufps512_mask", + "llvm.x86.avx512.mask.sqrt.pd.128" => "__builtin_ia32_sqrtpd128_mask", + "llvm.x86.avx512.mask.sqrt.pd.256" => "__builtin_ia32_sqrtpd256_mask", + "llvm.x86.avx512.mask.sqrt.pd.512" => "__builtin_ia32_sqrtpd512_mask", + "llvm.x86.avx512.mask.sqrt.ps.128" => "__builtin_ia32_sqrtps128_mask", + "llvm.x86.avx512.mask.sqrt.ps.256" => "__builtin_ia32_sqrtps256_mask", + "llvm.x86.avx512.mask.sqrt.ps.512" => "__builtin_ia32_sqrtps512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.sqrt.sd" => "__builtin_ia32_sqrtsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.sqrt.ss" => "__builtin_ia32_sqrtss_round_mask", + "llvm.x86.avx512.mask.store.ss" => "__builtin_ia32_storess_mask", + "llvm.x86.avx512.mask.storeu.d.512" => "__builtin_ia32_storedqusi512_mask", + "llvm.x86.avx512.mask.storeu.pd.512" => "__builtin_ia32_storeupd512_mask", + "llvm.x86.avx512.mask.storeu.ps.512" => "__builtin_ia32_storeups512_mask", + "llvm.x86.avx512.mask.storeu.q.512" => "__builtin_ia32_storedqudi512_mask", + "llvm.x86.avx512.mask.sub.pd.128" => "__builtin_ia32_subpd128_mask", + "llvm.x86.avx512.mask.sub.pd.256" => "__builtin_ia32_subpd256_mask", + "llvm.x86.avx512.mask.sub.pd.512" => "__builtin_ia32_subpd512_mask", + "llvm.x86.avx512.mask.sub.ps.128" => "__builtin_ia32_subps128_mask", + "llvm.x86.avx512.mask.sub.ps.256" => "__builtin_ia32_subps256_mask", + "llvm.x86.avx512.mask.sub.ps.512" => "__builtin_ia32_subps512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.sub.sd.round" => "__builtin_ia32_subsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.sub.ss.round" => "__builtin_ia32_subss_round_mask", + "llvm.x86.avx512.mask.valign.d.128" => "__builtin_ia32_alignd128_mask", + "llvm.x86.avx512.mask.valign.d.256" => "__builtin_ia32_alignd256_mask", + "llvm.x86.avx512.mask.valign.d.512" => "__builtin_ia32_alignd512_mask", + "llvm.x86.avx512.mask.valign.q.128" => "__builtin_ia32_alignq128_mask", + "llvm.x86.avx512.mask.valign.q.256" => "__builtin_ia32_alignq256_mask", + "llvm.x86.avx512.mask.valign.q.512" => "__builtin_ia32_alignq512_mask", + "llvm.x86.avx512.mask.vcvtph2ps.128" => "__builtin_ia32_vcvtph2ps_mask", + "llvm.x86.avx512.mask.vcvtph2ps.256" => "__builtin_ia32_vcvtph2ps256_mask", + "llvm.x86.avx512.mask.vcvtph2ps.512" => "__builtin_ia32_vcvtph2ps512_mask", + "llvm.x86.avx512.mask.vcvtps2ph.128" => "__builtin_ia32_vcvtps2ph_mask", + "llvm.x86.avx512.mask.vcvtps2ph.256" => "__builtin_ia32_vcvtps2ph256_mask", + "llvm.x86.avx512.mask.vcvtps2ph.512" => "__builtin_ia32_vcvtps2ph512_mask", + "llvm.x86.avx512.mask.vextractf32x4.256" => "__builtin_ia32_extractf32x4_256_mask", + "llvm.x86.avx512.mask.vextractf32x4.512" => "__builtin_ia32_extractf32x4_mask", + "llvm.x86.avx512.mask.vextractf32x8.512" => "__builtin_ia32_extractf32x8_mask", + "llvm.x86.avx512.mask.vextractf64x2.256" => "__builtin_ia32_extractf64x2_256_mask", + "llvm.x86.avx512.mask.vextractf64x2.512" => "__builtin_ia32_extractf64x2_512_mask", + "llvm.x86.avx512.mask.vextractf64x4.512" => "__builtin_ia32_extractf64x4_mask", + "llvm.x86.avx512.mask.vextracti32x4.256" => "__builtin_ia32_extracti32x4_256_mask", + "llvm.x86.avx512.mask.vextracti32x4.512" => "__builtin_ia32_extracti32x4_mask", + "llvm.x86.avx512.mask.vextracti32x8.512" => "__builtin_ia32_extracti32x8_mask", + "llvm.x86.avx512.mask.vextracti64x2.256" => "__builtin_ia32_extracti64x2_256_mask", + "llvm.x86.avx512.mask.vextracti64x2.512" => "__builtin_ia32_extracti64x2_512_mask", + "llvm.x86.avx512.mask.vextracti64x4.512" => "__builtin_ia32_extracti64x4_mask", + "llvm.x86.avx512.mask.vfmadd.pd.128" => "__builtin_ia32_vfmaddpd128_mask", + "llvm.x86.avx512.mask.vfmadd.pd.256" => "__builtin_ia32_vfmaddpd256_mask", + "llvm.x86.avx512.mask.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_mask", + "llvm.x86.avx512.mask.vfmadd.ps.128" => "__builtin_ia32_vfmaddps128_mask", + "llvm.x86.avx512.mask.vfmadd.ps.256" => "__builtin_ia32_vfmaddps256_mask", + "llvm.x86.avx512.mask.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_mask", + "llvm.x86.avx512.mask.vfmadd.sd" => "__builtin_ia32_vfmaddsd3_mask", + "llvm.x86.avx512.mask.vfmadd.ss" => "__builtin_ia32_vfmaddss3_mask", + "llvm.x86.avx512.mask.vfmaddsub.pd.128" => "__builtin_ia32_vfmaddsubpd128_mask", + "llvm.x86.avx512.mask.vfmaddsub.pd.256" => "__builtin_ia32_vfmaddsubpd256_mask", + "llvm.x86.avx512.mask.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_mask", + "llvm.x86.avx512.mask.vfmaddsub.ps.128" => "__builtin_ia32_vfmaddsubps128_mask", + "llvm.x86.avx512.mask.vfmaddsub.ps.256" => "__builtin_ia32_vfmaddsubps256_mask", + "llvm.x86.avx512.mask.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_mask", + "llvm.x86.avx512.mask.vfnmadd.pd.128" => "__builtin_ia32_vfnmaddpd128_mask", + "llvm.x86.avx512.mask.vfnmadd.pd.256" => "__builtin_ia32_vfnmaddpd256_mask", + "llvm.x86.avx512.mask.vfnmadd.pd.512" => "__builtin_ia32_vfnmaddpd512_mask", + "llvm.x86.avx512.mask.vfnmadd.ps.128" => "__builtin_ia32_vfnmaddps128_mask", + "llvm.x86.avx512.mask.vfnmadd.ps.256" => "__builtin_ia32_vfnmaddps256_mask", + "llvm.x86.avx512.mask.vfnmadd.ps.512" => "__builtin_ia32_vfnmaddps512_mask", + "llvm.x86.avx512.mask.vfnmsub.pd.128" => "__builtin_ia32_vfnmsubpd128_mask", + "llvm.x86.avx512.mask.vfnmsub.pd.256" => "__builtin_ia32_vfnmsubpd256_mask", + "llvm.x86.avx512.mask.vfnmsub.pd.512" => "__builtin_ia32_vfnmsubpd512_mask", + "llvm.x86.avx512.mask.vfnmsub.ps.128" => "__builtin_ia32_vfnmsubps128_mask", + "llvm.x86.avx512.mask.vfnmsub.ps.256" => "__builtin_ia32_vfnmsubps256_mask", + "llvm.x86.avx512.mask.vfnmsub.ps.512" => "__builtin_ia32_vfnmsubps512_mask", + "llvm.x86.avx512.mask.vpermi2var.d.128" => "__builtin_ia32_vpermi2vard128_mask", + "llvm.x86.avx512.mask.vpermi2var.d.256" => "__builtin_ia32_vpermi2vard256_mask", + "llvm.x86.avx512.mask.vpermi2var.d.512" => "__builtin_ia32_vpermi2vard512_mask", + "llvm.x86.avx512.mask.vpermi2var.hi.128" => "__builtin_ia32_vpermi2varhi128_mask", + "llvm.x86.avx512.mask.vpermi2var.hi.256" => "__builtin_ia32_vpermi2varhi256_mask", + "llvm.x86.avx512.mask.vpermi2var.hi.512" => "__builtin_ia32_vpermi2varhi512_mask", + "llvm.x86.avx512.mask.vpermi2var.pd.128" => "__builtin_ia32_vpermi2varpd128_mask", + "llvm.x86.avx512.mask.vpermi2var.pd.256" => "__builtin_ia32_vpermi2varpd256_mask", + "llvm.x86.avx512.mask.vpermi2var.pd.512" => "__builtin_ia32_vpermi2varpd512_mask", + "llvm.x86.avx512.mask.vpermi2var.ps.128" => "__builtin_ia32_vpermi2varps128_mask", + "llvm.x86.avx512.mask.vpermi2var.ps.256" => "__builtin_ia32_vpermi2varps256_mask", + "llvm.x86.avx512.mask.vpermi2var.ps.512" => "__builtin_ia32_vpermi2varps512_mask", + "llvm.x86.avx512.mask.vpermi2var.q.128" => "__builtin_ia32_vpermi2varq128_mask", + "llvm.x86.avx512.mask.vpermi2var.q.256" => "__builtin_ia32_vpermi2varq256_mask", + "llvm.x86.avx512.mask.vpermi2var.q.512" => "__builtin_ia32_vpermi2varq512_mask", + "llvm.x86.avx512.mask.vpermi2var.qi.128" => "__builtin_ia32_vpermi2varqi128_mask", + "llvm.x86.avx512.mask.vpermi2var.qi.256" => "__builtin_ia32_vpermi2varqi256_mask", + "llvm.x86.avx512.mask.vpermi2var.qi.512" => "__builtin_ia32_vpermi2varqi512_mask", + "llvm.x86.avx512.mask.vpermilvar.pd.128" => "__builtin_ia32_vpermilvarpd_mask", + "llvm.x86.avx512.mask.vpermilvar.pd.256" => "__builtin_ia32_vpermilvarpd256_mask", + "llvm.x86.avx512.mask.vpermilvar.pd.512" => "__builtin_ia32_vpermilvarpd512_mask", + "llvm.x86.avx512.mask.vpermilvar.ps.128" => "__builtin_ia32_vpermilvarps_mask", + "llvm.x86.avx512.mask.vpermilvar.ps.256" => "__builtin_ia32_vpermilvarps256_mask", + "llvm.x86.avx512.mask.vpermilvar.ps.512" => "__builtin_ia32_vpermilvarps512_mask", + "llvm.x86.avx512.mask.vpermt.d.512" => "__builtin_ia32_vpermt2vard512_mask", + "llvm.x86.avx512.mask.vpermt.pd.512" => "__builtin_ia32_vpermt2varpd512_mask", + "llvm.x86.avx512.mask.vpermt.ps.512" => "__builtin_ia32_vpermt2varps512_mask", + "llvm.x86.avx512.mask.vpermt.q.512" => "__builtin_ia32_vpermt2varq512_mask", + "llvm.x86.avx512.mask.vpermt2var.d.128" => "__builtin_ia32_vpermt2vard128_mask", + "llvm.x86.avx512.mask.vpermt2var.d.256" => "__builtin_ia32_vpermt2vard256_mask", + "llvm.x86.avx512.mask.vpermt2var.d.512" => "__builtin_ia32_vpermt2vard512_mask", + "llvm.x86.avx512.mask.vpermt2var.hi.128" => "__builtin_ia32_vpermt2varhi128_mask", + "llvm.x86.avx512.mask.vpermt2var.hi.256" => "__builtin_ia32_vpermt2varhi256_mask", + "llvm.x86.avx512.mask.vpermt2var.hi.512" => "__builtin_ia32_vpermt2varhi512_mask", + "llvm.x86.avx512.mask.vpermt2var.pd.128" => "__builtin_ia32_vpermt2varpd128_mask", + "llvm.x86.avx512.mask.vpermt2var.pd.256" => "__builtin_ia32_vpermt2varpd256_mask", + "llvm.x86.avx512.mask.vpermt2var.pd.512" => "__builtin_ia32_vpermt2varpd512_mask", + "llvm.x86.avx512.mask.vpermt2var.ps.128" => "__builtin_ia32_vpermt2varps128_mask", + "llvm.x86.avx512.mask.vpermt2var.ps.256" => "__builtin_ia32_vpermt2varps256_mask", + "llvm.x86.avx512.mask.vpermt2var.ps.512" => "__builtin_ia32_vpermt2varps512_mask", + "llvm.x86.avx512.mask.vpermt2var.q.128" => "__builtin_ia32_vpermt2varq128_mask", + "llvm.x86.avx512.mask.vpermt2var.q.256" => "__builtin_ia32_vpermt2varq256_mask", + "llvm.x86.avx512.mask.vpermt2var.q.512" => "__builtin_ia32_vpermt2varq512_mask", + "llvm.x86.avx512.mask.vpermt2var.qi.128" => "__builtin_ia32_vpermt2varqi128_mask", + "llvm.x86.avx512.mask.vpermt2var.qi.256" => "__builtin_ia32_vpermt2varqi256_mask", + "llvm.x86.avx512.mask.vpermt2var.qi.512" => "__builtin_ia32_vpermt2varqi512_mask", + "llvm.x86.avx512.mask.vpmadd52h.uq.128" => "__builtin_ia32_vpmadd52huq128_mask", + "llvm.x86.avx512.mask.vpmadd52h.uq.256" => "__builtin_ia32_vpmadd52huq256_mask", + "llvm.x86.avx512.mask.vpmadd52h.uq.512" => "__builtin_ia32_vpmadd52huq512_mask", + "llvm.x86.avx512.mask.vpmadd52l.uq.128" => "__builtin_ia32_vpmadd52luq128_mask", + "llvm.x86.avx512.mask.vpmadd52l.uq.256" => "__builtin_ia32_vpmadd52luq256_mask", + "llvm.x86.avx512.mask.vpmadd52l.uq.512" => "__builtin_ia32_vpmadd52luq512_mask", + "llvm.x86.avx512.mask.xor.pd.128" => "__builtin_ia32_xorpd128_mask", + "llvm.x86.avx512.mask.xor.pd.256" => "__builtin_ia32_xorpd256_mask", + "llvm.x86.avx512.mask.xor.pd.512" => "__builtin_ia32_xorpd512_mask", + "llvm.x86.avx512.mask.xor.ps.128" => "__builtin_ia32_xorps128_mask", + "llvm.x86.avx512.mask.xor.ps.256" => "__builtin_ia32_xorps256_mask", + "llvm.x86.avx512.mask.xor.ps.512" => "__builtin_ia32_xorps512_mask", + "llvm.x86.avx512.mask3.vfmadd.pd.128" => "__builtin_ia32_vfmaddpd128_mask3", + "llvm.x86.avx512.mask3.vfmadd.pd.256" => "__builtin_ia32_vfmaddpd256_mask3", + "llvm.x86.avx512.mask3.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_mask3", + "llvm.x86.avx512.mask3.vfmadd.ps.128" => "__builtin_ia32_vfmaddps128_mask3", + "llvm.x86.avx512.mask3.vfmadd.ps.256" => "__builtin_ia32_vfmaddps256_mask3", + "llvm.x86.avx512.mask3.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_mask3", + "llvm.x86.avx512.mask3.vfmadd.sd" => "__builtin_ia32_vfmaddsd3_mask3", + "llvm.x86.avx512.mask3.vfmadd.ss" => "__builtin_ia32_vfmaddss3_mask3", + "llvm.x86.avx512.mask3.vfmaddsub.pd.128" => "__builtin_ia32_vfmaddsubpd128_mask3", + "llvm.x86.avx512.mask3.vfmaddsub.pd.256" => "__builtin_ia32_vfmaddsubpd256_mask3", + "llvm.x86.avx512.mask3.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_mask3", + "llvm.x86.avx512.mask3.vfmaddsub.ps.128" => "__builtin_ia32_vfmaddsubps128_mask3", + "llvm.x86.avx512.mask3.vfmaddsub.ps.256" => "__builtin_ia32_vfmaddsubps256_mask3", + "llvm.x86.avx512.mask3.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_mask3", + "llvm.x86.avx512.mask3.vfmsub.pd.128" => "__builtin_ia32_vfmsubpd128_mask3", + "llvm.x86.avx512.mask3.vfmsub.pd.256" => "__builtin_ia32_vfmsubpd256_mask3", + "llvm.x86.avx512.mask3.vfmsub.pd.512" => "__builtin_ia32_vfmsubpd512_mask3", + "llvm.x86.avx512.mask3.vfmsub.ps.128" => "__builtin_ia32_vfmsubps128_mask3", + "llvm.x86.avx512.mask3.vfmsub.ps.256" => "__builtin_ia32_vfmsubps256_mask3", + "llvm.x86.avx512.mask3.vfmsub.ps.512" => "__builtin_ia32_vfmsubps512_mask3", + "llvm.x86.avx512.mask3.vfmsubadd.pd.128" => "__builtin_ia32_vfmsubaddpd128_mask3", + "llvm.x86.avx512.mask3.vfmsubadd.pd.256" => "__builtin_ia32_vfmsubaddpd256_mask3", + "llvm.x86.avx512.mask3.vfmsubadd.pd.512" => "__builtin_ia32_vfmsubaddpd512_mask3", + "llvm.x86.avx512.mask3.vfmsubadd.ps.128" => "__builtin_ia32_vfmsubaddps128_mask3", + "llvm.x86.avx512.mask3.vfmsubadd.ps.256" => "__builtin_ia32_vfmsubaddps256_mask3", + "llvm.x86.avx512.mask3.vfmsubadd.ps.512" => "__builtin_ia32_vfmsubaddps512_mask3", + "llvm.x86.avx512.mask3.vfnmsub.pd.128" => "__builtin_ia32_vfnmsubpd128_mask3", + "llvm.x86.avx512.mask3.vfnmsub.pd.256" => "__builtin_ia32_vfnmsubpd256_mask3", + "llvm.x86.avx512.mask3.vfnmsub.pd.512" => "__builtin_ia32_vfnmsubpd512_mask3", + "llvm.x86.avx512.mask3.vfnmsub.ps.128" => "__builtin_ia32_vfnmsubps128_mask3", + "llvm.x86.avx512.mask3.vfnmsub.ps.256" => "__builtin_ia32_vfnmsubps256_mask3", + "llvm.x86.avx512.mask3.vfnmsub.ps.512" => "__builtin_ia32_vfnmsubps512_mask3", + "llvm.x86.avx512.maskz.fixupimm.pd.128" => "__builtin_ia32_fixupimmpd128_maskz", + "llvm.x86.avx512.maskz.fixupimm.pd.256" => "__builtin_ia32_fixupimmpd256_maskz", + "llvm.x86.avx512.maskz.fixupimm.pd.512" => "__builtin_ia32_fixupimmpd512_maskz", + "llvm.x86.avx512.maskz.fixupimm.ps.128" => "__builtin_ia32_fixupimmps128_maskz", + "llvm.x86.avx512.maskz.fixupimm.ps.256" => "__builtin_ia32_fixupimmps256_maskz", + "llvm.x86.avx512.maskz.fixupimm.ps.512" => "__builtin_ia32_fixupimmps512_maskz", + "llvm.x86.avx512.maskz.fixupimm.sd" => "__builtin_ia32_fixupimmsd_maskz", + "llvm.x86.avx512.maskz.fixupimm.ss" => "__builtin_ia32_fixupimmss_maskz", + "llvm.x86.avx512.maskz.pternlog.d.128" => "__builtin_ia32_pternlogd128_maskz", + "llvm.x86.avx512.maskz.pternlog.d.256" => "__builtin_ia32_pternlogd256_maskz", + "llvm.x86.avx512.maskz.pternlog.d.512" => "__builtin_ia32_pternlogd512_maskz", + "llvm.x86.avx512.maskz.pternlog.q.128" => "__builtin_ia32_pternlogq128_maskz", + "llvm.x86.avx512.maskz.pternlog.q.256" => "__builtin_ia32_pternlogq256_maskz", + "llvm.x86.avx512.maskz.pternlog.q.512" => "__builtin_ia32_pternlogq512_maskz", + "llvm.x86.avx512.maskz.vfmadd.pd.128" => "__builtin_ia32_vfmaddpd128_maskz", + "llvm.x86.avx512.maskz.vfmadd.pd.256" => "__builtin_ia32_vfmaddpd256_maskz", + "llvm.x86.avx512.maskz.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_maskz", + "llvm.x86.avx512.maskz.vfmadd.ps.128" => "__builtin_ia32_vfmaddps128_maskz", + "llvm.x86.avx512.maskz.vfmadd.ps.256" => "__builtin_ia32_vfmaddps256_maskz", + "llvm.x86.avx512.maskz.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_maskz", + "llvm.x86.avx512.maskz.vfmadd.sd" => "__builtin_ia32_vfmaddsd3_maskz", + "llvm.x86.avx512.maskz.vfmadd.ss" => "__builtin_ia32_vfmaddss3_maskz", + "llvm.x86.avx512.maskz.vfmaddsub.pd.128" => "__builtin_ia32_vfmaddsubpd128_maskz", + "llvm.x86.avx512.maskz.vfmaddsub.pd.256" => "__builtin_ia32_vfmaddsubpd256_maskz", + "llvm.x86.avx512.maskz.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_maskz", + "llvm.x86.avx512.maskz.vfmaddsub.ps.128" => "__builtin_ia32_vfmaddsubps128_maskz", + "llvm.x86.avx512.maskz.vfmaddsub.ps.256" => "__builtin_ia32_vfmaddsubps256_maskz", + "llvm.x86.avx512.maskz.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_maskz", + "llvm.x86.avx512.maskz.vpermt2var.d.128" => "__builtin_ia32_vpermt2vard128_maskz", + "llvm.x86.avx512.maskz.vpermt2var.d.256" => "__builtin_ia32_vpermt2vard256_maskz", + "llvm.x86.avx512.maskz.vpermt2var.d.512" => "__builtin_ia32_vpermt2vard512_maskz", + "llvm.x86.avx512.maskz.vpermt2var.hi.128" => "__builtin_ia32_vpermt2varhi128_maskz", + "llvm.x86.avx512.maskz.vpermt2var.hi.256" => "__builtin_ia32_vpermt2varhi256_maskz", + "llvm.x86.avx512.maskz.vpermt2var.hi.512" => "__builtin_ia32_vpermt2varhi512_maskz", + "llvm.x86.avx512.maskz.vpermt2var.pd.128" => "__builtin_ia32_vpermt2varpd128_maskz", + "llvm.x86.avx512.maskz.vpermt2var.pd.256" => "__builtin_ia32_vpermt2varpd256_maskz", + "llvm.x86.avx512.maskz.vpermt2var.pd.512" => "__builtin_ia32_vpermt2varpd512_maskz", + "llvm.x86.avx512.maskz.vpermt2var.ps.128" => "__builtin_ia32_vpermt2varps128_maskz", + "llvm.x86.avx512.maskz.vpermt2var.ps.256" => "__builtin_ia32_vpermt2varps256_maskz", + "llvm.x86.avx512.maskz.vpermt2var.ps.512" => "__builtin_ia32_vpermt2varps512_maskz", + "llvm.x86.avx512.maskz.vpermt2var.q.128" => "__builtin_ia32_vpermt2varq128_maskz", + "llvm.x86.avx512.maskz.vpermt2var.q.256" => "__builtin_ia32_vpermt2varq256_maskz", + "llvm.x86.avx512.maskz.vpermt2var.q.512" => "__builtin_ia32_vpermt2varq512_maskz", + "llvm.x86.avx512.maskz.vpermt2var.qi.128" => "__builtin_ia32_vpermt2varqi128_maskz", + "llvm.x86.avx512.maskz.vpermt2var.qi.256" => "__builtin_ia32_vpermt2varqi256_maskz", + "llvm.x86.avx512.maskz.vpermt2var.qi.512" => "__builtin_ia32_vpermt2varqi512_maskz", + "llvm.x86.avx512.maskz.vpmadd52h.uq.128" => "__builtin_ia32_vpmadd52huq128_maskz", + "llvm.x86.avx512.maskz.vpmadd52h.uq.256" => "__builtin_ia32_vpmadd52huq256_maskz", + "llvm.x86.avx512.maskz.vpmadd52h.uq.512" => "__builtin_ia32_vpmadd52huq512_maskz", + "llvm.x86.avx512.maskz.vpmadd52l.uq.128" => "__builtin_ia32_vpmadd52luq128_maskz", + "llvm.x86.avx512.maskz.vpmadd52l.uq.256" => "__builtin_ia32_vpmadd52luq256_maskz", + "llvm.x86.avx512.maskz.vpmadd52l.uq.512" => "__builtin_ia32_vpmadd52luq512_maskz", + "llvm.x86.avx512.max.pd.512" => "__builtin_ia32_maxpd512", + "llvm.x86.avx512.max.ps.512" => "__builtin_ia32_maxps512", + "llvm.x86.avx512.min.pd.512" => "__builtin_ia32_minpd512", + "llvm.x86.avx512.min.ps.512" => "__builtin_ia32_minps512", + "llvm.x86.avx512.movntdqa" => "__builtin_ia32_movntdqa512", + "llvm.x86.avx512.mul.pd.512" => "__builtin_ia32_mulpd512", + "llvm.x86.avx512.mul.ps.512" => "__builtin_ia32_mulps512", + "llvm.x86.avx512.packssdw.512" => "__builtin_ia32_packssdw512", + "llvm.x86.avx512.packsswb.512" => "__builtin_ia32_packsswb512", + "llvm.x86.avx512.packusdw.512" => "__builtin_ia32_packusdw512", + "llvm.x86.avx512.packuswb.512" => "__builtin_ia32_packuswb512", + "llvm.x86.avx512.pavg.b.512" => "__builtin_ia32_pavgb512", + "llvm.x86.avx512.pavg.w.512" => "__builtin_ia32_pavgw512", + "llvm.x86.avx512.pbroadcastd.512" => "__builtin_ia32_pbroadcastd512", + "llvm.x86.avx512.pbroadcastq.512" => "__builtin_ia32_pbroadcastq512", + "llvm.x86.avx512.permvar.df.256" => "__builtin_ia32_permvardf256", + "llvm.x86.avx512.permvar.df.512" => "__builtin_ia32_permvardf512", + "llvm.x86.avx512.permvar.di.256" => "__builtin_ia32_permvardi256", + "llvm.x86.avx512.permvar.di.512" => "__builtin_ia32_permvardi512", + "llvm.x86.avx512.permvar.hi.128" => "__builtin_ia32_permvarhi128", + "llvm.x86.avx512.permvar.hi.256" => "__builtin_ia32_permvarhi256", + "llvm.x86.avx512.permvar.hi.512" => "__builtin_ia32_permvarhi512", + "llvm.x86.avx512.permvar.qi.128" => "__builtin_ia32_permvarqi128", + "llvm.x86.avx512.permvar.qi.256" => "__builtin_ia32_permvarqi256", + "llvm.x86.avx512.permvar.qi.512" => "__builtin_ia32_permvarqi512", + "llvm.x86.avx512.permvar.sf.512" => "__builtin_ia32_permvarsf512", + "llvm.x86.avx512.permvar.si.512" => "__builtin_ia32_permvarsi512", + "llvm.x86.avx512.pmaddubs.w.512" => "__builtin_ia32_pmaddubsw512", + "llvm.x86.avx512.pmaddw.d.512" => "__builtin_ia32_pmaddwd512", + "llvm.x86.avx512.pmovzxbd" => "__builtin_ia32_pmovzxbd512", + "llvm.x86.avx512.pmovzxbq" => "__builtin_ia32_pmovzxbq512", + "llvm.x86.avx512.pmovzxdq" => "__builtin_ia32_pmovzxdq512", + "llvm.x86.avx512.pmovzxwd" => "__builtin_ia32_pmovzxwd512", + "llvm.x86.avx512.pmovzxwq" => "__builtin_ia32_pmovzxwq512", + "llvm.x86.avx512.pmul.hr.sw.512" => "__builtin_ia32_pmulhrsw512", + "llvm.x86.avx512.pmulh.w.512" => "__builtin_ia32_pmulhw512", + "llvm.x86.avx512.pmulhu.w.512" => "__builtin_ia32_pmulhuw512", + "llvm.x86.avx512.pmultishift.qb.128" => "__builtin_ia32_vpmultishiftqb128", + "llvm.x86.avx512.pmultishift.qb.256" => "__builtin_ia32_vpmultishiftqb256", + "llvm.x86.avx512.pmultishift.qb.512" => "__builtin_ia32_vpmultishiftqb512", + "llvm.x86.avx512.psad.bw.512" => "__builtin_ia32_psadbw512", + "llvm.x86.avx512.pshuf.b.512" => "__builtin_ia32_pshufb512", + "llvm.x86.avx512.psll.d.512" => "__builtin_ia32_pslld512", + "llvm.x86.avx512.psll.dq" => "__builtin_ia32_pslldqi512", + "llvm.x86.avx512.psll.dq.bs" => "__builtin_ia32_pslldqi512_byteshift", + "llvm.x86.avx512.psll.q.512" => "__builtin_ia32_psllq512", + "llvm.x86.avx512.psll.w.512" => "__builtin_ia32_psllw512", + "llvm.x86.avx512.pslli.d.512" => "__builtin_ia32_pslldi512", + "llvm.x86.avx512.pslli.q.512" => "__builtin_ia32_psllqi512", + "llvm.x86.avx512.pslli.w.512" => "__builtin_ia32_psllwi512", + "llvm.x86.avx512.psllv.d.512" => "__builtin_ia32_psllv16si", + "llvm.x86.avx512.psllv.q.512" => "__builtin_ia32_psllv8di", + "llvm.x86.avx512.psllv.w.128" => "__builtin_ia32_psllv8hi", + "llvm.x86.avx512.psllv.w.256" => "__builtin_ia32_psllv16hi", + "llvm.x86.avx512.psllv.w.512" => "__builtin_ia32_psllv32hi", + "llvm.x86.avx512.psra.d.512" => "__builtin_ia32_psrad512", + "llvm.x86.avx512.psra.q.128" => "__builtin_ia32_psraq128", + "llvm.x86.avx512.psra.q.256" => "__builtin_ia32_psraq256", + "llvm.x86.avx512.psra.q.512" => "__builtin_ia32_psraq512", + "llvm.x86.avx512.psra.w.512" => "__builtin_ia32_psraw512", + "llvm.x86.avx512.psrai.d.512" => "__builtin_ia32_psradi512", + "llvm.x86.avx512.psrai.q.128" => "__builtin_ia32_psraqi128", + "llvm.x86.avx512.psrai.q.256" => "__builtin_ia32_psraqi256", + "llvm.x86.avx512.psrai.q.512" => "__builtin_ia32_psraqi512", + "llvm.x86.avx512.psrai.w.512" => "__builtin_ia32_psrawi512", + "llvm.x86.avx512.psrav.d.512" => "__builtin_ia32_psrav16si", + "llvm.x86.avx512.psrav.q.128" => "__builtin_ia32_psravq128", + "llvm.x86.avx512.psrav.q.256" => "__builtin_ia32_psravq256", + "llvm.x86.avx512.psrav.q.512" => "__builtin_ia32_psrav8di", + "llvm.x86.avx512.psrav.w.128" => "__builtin_ia32_psrav8hi", + "llvm.x86.avx512.psrav.w.256" => "__builtin_ia32_psrav16hi", + "llvm.x86.avx512.psrav.w.512" => "__builtin_ia32_psrav32hi", + "llvm.x86.avx512.psrl.d.512" => "__builtin_ia32_psrld512", + "llvm.x86.avx512.psrl.dq" => "__builtin_ia32_psrldqi512", + "llvm.x86.avx512.psrl.dq.bs" => "__builtin_ia32_psrldqi512_byteshift", + "llvm.x86.avx512.psrl.q.512" => "__builtin_ia32_psrlq512", + "llvm.x86.avx512.psrl.w.512" => "__builtin_ia32_psrlw512", + "llvm.x86.avx512.psrli.d.512" => "__builtin_ia32_psrldi512", + "llvm.x86.avx512.psrli.q.512" => "__builtin_ia32_psrlqi512", + "llvm.x86.avx512.psrli.w.512" => "__builtin_ia32_psrlwi512", + "llvm.x86.avx512.psrlv.d.512" => "__builtin_ia32_psrlv16si", + "llvm.x86.avx512.psrlv.q.512" => "__builtin_ia32_psrlv8di", + "llvm.x86.avx512.psrlv.w.128" => "__builtin_ia32_psrlv8hi", + "llvm.x86.avx512.psrlv.w.256" => "__builtin_ia32_psrlv16hi", + "llvm.x86.avx512.psrlv.w.512" => "__builtin_ia32_psrlv32hi", + "llvm.x86.avx512.pternlog.d.128" => "__builtin_ia32_pternlogd128", + "llvm.x86.avx512.pternlog.d.256" => "__builtin_ia32_pternlogd256", + "llvm.x86.avx512.pternlog.d.512" => "__builtin_ia32_pternlogd512", + "llvm.x86.avx512.pternlog.q.128" => "__builtin_ia32_pternlogq128", + "llvm.x86.avx512.pternlog.q.256" => "__builtin_ia32_pternlogq256", + "llvm.x86.avx512.pternlog.q.512" => "__builtin_ia32_pternlogq512", + "llvm.x86.avx512.ptestm.b.128" => "__builtin_ia32_ptestmb128", + "llvm.x86.avx512.ptestm.b.256" => "__builtin_ia32_ptestmb256", + "llvm.x86.avx512.ptestm.b.512" => "__builtin_ia32_ptestmb512", + "llvm.x86.avx512.ptestm.d.128" => "__builtin_ia32_ptestmd128", + "llvm.x86.avx512.ptestm.d.256" => "__builtin_ia32_ptestmd256", + "llvm.x86.avx512.ptestm.d.512" => "__builtin_ia32_ptestmd512", + "llvm.x86.avx512.ptestm.q.128" => "__builtin_ia32_ptestmq128", + "llvm.x86.avx512.ptestm.q.256" => "__builtin_ia32_ptestmq256", + "llvm.x86.avx512.ptestm.q.512" => "__builtin_ia32_ptestmq512", + "llvm.x86.avx512.ptestm.w.128" => "__builtin_ia32_ptestmw128", + "llvm.x86.avx512.ptestm.w.256" => "__builtin_ia32_ptestmw256", + "llvm.x86.avx512.ptestm.w.512" => "__builtin_ia32_ptestmw512", + "llvm.x86.avx512.ptestnm.b.128" => "__builtin_ia32_ptestnmb128", + "llvm.x86.avx512.ptestnm.b.256" => "__builtin_ia32_ptestnmb256", + "llvm.x86.avx512.ptestnm.b.512" => "__builtin_ia32_ptestnmb512", + "llvm.x86.avx512.ptestnm.d.128" => "__builtin_ia32_ptestnmd128", + "llvm.x86.avx512.ptestnm.d.256" => "__builtin_ia32_ptestnmd256", + "llvm.x86.avx512.ptestnm.d.512" => "__builtin_ia32_ptestnmd512", + "llvm.x86.avx512.ptestnm.q.128" => "__builtin_ia32_ptestnmq128", + "llvm.x86.avx512.ptestnm.q.256" => "__builtin_ia32_ptestnmq256", + "llvm.x86.avx512.ptestnm.q.512" => "__builtin_ia32_ptestnmq512", + "llvm.x86.avx512.ptestnm.w.128" => "__builtin_ia32_ptestnmw128", + "llvm.x86.avx512.ptestnm.w.256" => "__builtin_ia32_ptestnmw256", + "llvm.x86.avx512.ptestnm.w.512" => "__builtin_ia32_ptestnmw512", + "llvm.x86.avx512.rcp14.pd.128" => "__builtin_ia32_rcp14pd128_mask", + "llvm.x86.avx512.rcp14.pd.256" => "__builtin_ia32_rcp14pd256_mask", + "llvm.x86.avx512.rcp14.pd.512" => "__builtin_ia32_rcp14pd512_mask", + "llvm.x86.avx512.rcp14.ps.128" => "__builtin_ia32_rcp14ps128_mask", + "llvm.x86.avx512.rcp14.ps.256" => "__builtin_ia32_rcp14ps256_mask", + "llvm.x86.avx512.rcp14.ps.512" => "__builtin_ia32_rcp14ps512_mask", + "llvm.x86.avx512.rcp14.sd" => "__builtin_ia32_rcp14sd_mask", + "llvm.x86.avx512.rcp14.ss" => "__builtin_ia32_rcp14ss_mask", + "llvm.x86.avx512.rcp28.pd" => "__builtin_ia32_rcp28pd_mask", + "llvm.x86.avx512.rcp28.ps" => "__builtin_ia32_rcp28ps_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_round_mask", + // [DUPLICATE]: "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_round_mask", + // [DUPLICATE]: "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_mask", + "llvm.x86.avx512.rndscale.sd" => "__builtin_ia32_rndscalesd", + "llvm.x86.avx512.rndscale.ss" => "__builtin_ia32_rndscaless", + "llvm.x86.avx512.rsqrt14.pd.128" => "__builtin_ia32_rsqrt14pd128_mask", + "llvm.x86.avx512.rsqrt14.pd.256" => "__builtin_ia32_rsqrt14pd256_mask", + "llvm.x86.avx512.rsqrt14.pd.512" => "__builtin_ia32_rsqrt14pd512_mask", + "llvm.x86.avx512.rsqrt14.ps.128" => "__builtin_ia32_rsqrt14ps128_mask", + "llvm.x86.avx512.rsqrt14.ps.256" => "__builtin_ia32_rsqrt14ps256_mask", + "llvm.x86.avx512.rsqrt14.ps.512" => "__builtin_ia32_rsqrt14ps512_mask", + "llvm.x86.avx512.rsqrt14.sd" => "__builtin_ia32_rsqrt14sd_mask", + "llvm.x86.avx512.rsqrt14.ss" => "__builtin_ia32_rsqrt14ss_mask", + "llvm.x86.avx512.rsqrt28.pd" => "__builtin_ia32_rsqrt28pd_mask", + "llvm.x86.avx512.rsqrt28.ps" => "__builtin_ia32_rsqrt28ps_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_round_mask", + // [DUPLICATE]: "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_round_mask", + // [DUPLICATE]: "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_mask", + "llvm.x86.avx512.scatter.dpd.512" => "__builtin_ia32_scattersiv8df", + "llvm.x86.avx512.scatter.dpi.512" => "__builtin_ia32_scattersiv16si", + "llvm.x86.avx512.scatter.dpq.512" => "__builtin_ia32_scattersiv8di", + "llvm.x86.avx512.scatter.dps.512" => "__builtin_ia32_scattersiv16sf", + "llvm.x86.avx512.scatter.qpd.512" => "__builtin_ia32_scatterdiv8df", + "llvm.x86.avx512.scatter.qpi.512" => "__builtin_ia32_scatterdiv16si", + "llvm.x86.avx512.scatter.qpq.512" => "__builtin_ia32_scatterdiv8di", + "llvm.x86.avx512.scatter.qps.512" => "__builtin_ia32_scatterdiv16sf", + "llvm.x86.avx512.scatterdiv2.df" => "__builtin_ia32_scatterdiv2df", + "llvm.x86.avx512.scatterdiv2.di" => "__builtin_ia32_scatterdiv2di", + "llvm.x86.avx512.scatterdiv4.df" => "__builtin_ia32_scatterdiv4df", + "llvm.x86.avx512.scatterdiv4.di" => "__builtin_ia32_scatterdiv4di", + "llvm.x86.avx512.scatterdiv4.sf" => "__builtin_ia32_scatterdiv4sf", + "llvm.x86.avx512.scatterdiv4.si" => "__builtin_ia32_scatterdiv4si", + "llvm.x86.avx512.scatterdiv8.sf" => "__builtin_ia32_scatterdiv8sf", + "llvm.x86.avx512.scatterdiv8.si" => "__builtin_ia32_scatterdiv8si", + "llvm.x86.avx512.scatterpf.dpd.512" => "__builtin_ia32_scatterpfdpd", + "llvm.x86.avx512.scatterpf.dps.512" => "__builtin_ia32_scatterpfdps", + "llvm.x86.avx512.scatterpf.qpd.512" => "__builtin_ia32_scatterpfqpd", + "llvm.x86.avx512.scatterpf.qps.512" => "__builtin_ia32_scatterpfqps", + "llvm.x86.avx512.scattersiv2.df" => "__builtin_ia32_scattersiv2df", + "llvm.x86.avx512.scattersiv2.di" => "__builtin_ia32_scattersiv2di", + "llvm.x86.avx512.scattersiv4.df" => "__builtin_ia32_scattersiv4df", + "llvm.x86.avx512.scattersiv4.di" => "__builtin_ia32_scattersiv4di", + "llvm.x86.avx512.scattersiv4.sf" => "__builtin_ia32_scattersiv4sf", + "llvm.x86.avx512.scattersiv4.si" => "__builtin_ia32_scattersiv4si", + "llvm.x86.avx512.scattersiv8.sf" => "__builtin_ia32_scattersiv8sf", + "llvm.x86.avx512.scattersiv8.si" => "__builtin_ia32_scattersiv8si", + "llvm.x86.avx512.sqrt.pd.512" => "__builtin_ia32_sqrtpd512_mask", + "llvm.x86.avx512.sqrt.ps.512" => "__builtin_ia32_sqrtps512_mask", + "llvm.x86.avx512.sqrt.sd" => "__builtin_ia32_sqrtrndsd", + "llvm.x86.avx512.sqrt.ss" => "__builtin_ia32_sqrtrndss", + "llvm.x86.avx512.sub.pd.512" => "__builtin_ia32_subpd512", + "llvm.x86.avx512.sub.ps.512" => "__builtin_ia32_subps512", + "llvm.x86.avx512.vbroadcast.sd.512" => "__builtin_ia32_vbroadcastsd512", + "llvm.x86.avx512.vbroadcast.sd.pd.512" => "__builtin_ia32_vbroadcastsd_pd512", + "llvm.x86.avx512.vbroadcast.ss.512" => "__builtin_ia32_vbroadcastss512", + "llvm.x86.avx512.vbroadcast.ss.ps.512" => "__builtin_ia32_vbroadcastss_ps512", + "llvm.x86.avx512.vcomi.sd" => "__builtin_ia32_vcomisd", + "llvm.x86.avx512.vcomi.ss" => "__builtin_ia32_vcomiss", + "llvm.x86.avx512.vcvtsd2si32" => "__builtin_ia32_vcvtsd2si32", + "llvm.x86.avx512.vcvtsd2si64" => "__builtin_ia32_vcvtsd2si64", + "llvm.x86.avx512.vcvtsd2usi32" => "__builtin_ia32_vcvtsd2usi32", + "llvm.x86.avx512.vcvtsd2usi64" => "__builtin_ia32_vcvtsd2usi64", + "llvm.x86.avx512.vcvtss2si32" => "__builtin_ia32_vcvtss2si32", + "llvm.x86.avx512.vcvtss2si64" => "__builtin_ia32_vcvtss2si64", + "llvm.x86.avx512.vcvtss2usi32" => "__builtin_ia32_vcvtss2usi32", + "llvm.x86.avx512.vcvtss2usi64" => "__builtin_ia32_vcvtss2usi64", + "llvm.x86.avx512.vpdpbusd.128" => "__builtin_ia32_vpdpbusd128", + "llvm.x86.avx512.vpdpbusd.256" => "__builtin_ia32_vpdpbusd256", + "llvm.x86.avx512.vpdpbusd.512" => "__builtin_ia32_vpdpbusd512", + "llvm.x86.avx512.vpdpbusds.128" => "__builtin_ia32_vpdpbusds128", + "llvm.x86.avx512.vpdpbusds.256" => "__builtin_ia32_vpdpbusds256", + "llvm.x86.avx512.vpdpbusds.512" => "__builtin_ia32_vpdpbusds512", + "llvm.x86.avx512.vpdpwssd.128" => "__builtin_ia32_vpdpwssd128", + "llvm.x86.avx512.vpdpwssd.256" => "__builtin_ia32_vpdpwssd256", + "llvm.x86.avx512.vpdpwssd.512" => "__builtin_ia32_vpdpwssd512", + "llvm.x86.avx512.vpdpwssds.128" => "__builtin_ia32_vpdpwssds128", + "llvm.x86.avx512.vpdpwssds.256" => "__builtin_ia32_vpdpwssds256", + "llvm.x86.avx512.vpdpwssds.512" => "__builtin_ia32_vpdpwssds512", + "llvm.x86.avx512.vpermi2var.d.128" => "__builtin_ia32_vpermi2vard128", + "llvm.x86.avx512.vpermi2var.d.256" => "__builtin_ia32_vpermi2vard256", + "llvm.x86.avx512.vpermi2var.d.512" => "__builtin_ia32_vpermi2vard512", + "llvm.x86.avx512.vpermi2var.hi.128" => "__builtin_ia32_vpermi2varhi128", + "llvm.x86.avx512.vpermi2var.hi.256" => "__builtin_ia32_vpermi2varhi256", + "llvm.x86.avx512.vpermi2var.hi.512" => "__builtin_ia32_vpermi2varhi512", + "llvm.x86.avx512.vpermi2var.pd.128" => "__builtin_ia32_vpermi2varpd128", + "llvm.x86.avx512.vpermi2var.pd.256" => "__builtin_ia32_vpermi2varpd256", + "llvm.x86.avx512.vpermi2var.pd.512" => "__builtin_ia32_vpermi2varpd512", + "llvm.x86.avx512.vpermi2var.ps.128" => "__builtin_ia32_vpermi2varps128", + "llvm.x86.avx512.vpermi2var.ps.256" => "__builtin_ia32_vpermi2varps256", + "llvm.x86.avx512.vpermi2var.ps.512" => "__builtin_ia32_vpermi2varps512", + "llvm.x86.avx512.vpermi2var.q.128" => "__builtin_ia32_vpermi2varq128", + "llvm.x86.avx512.vpermi2var.q.256" => "__builtin_ia32_vpermi2varq256", + "llvm.x86.avx512.vpermi2var.q.512" => "__builtin_ia32_vpermi2varq512", + "llvm.x86.avx512.vpermi2var.qi.128" => "__builtin_ia32_vpermi2varqi128", + "llvm.x86.avx512.vpermi2var.qi.256" => "__builtin_ia32_vpermi2varqi256", + "llvm.x86.avx512.vpermi2var.qi.512" => "__builtin_ia32_vpermi2varqi512", + "llvm.x86.avx512.vpermilvar.pd.512" => "__builtin_ia32_vpermilvarpd512", + "llvm.x86.avx512.vpermilvar.ps.512" => "__builtin_ia32_vpermilvarps512", + "llvm.x86.avx512.vpmadd52h.uq.128" => "__builtin_ia32_vpmadd52huq128", + "llvm.x86.avx512.vpmadd52h.uq.256" => "__builtin_ia32_vpmadd52huq256", + "llvm.x86.avx512.vpmadd52h.uq.512" => "__builtin_ia32_vpmadd52huq512", + "llvm.x86.avx512.vpmadd52l.uq.128" => "__builtin_ia32_vpmadd52luq128", + "llvm.x86.avx512.vpmadd52l.uq.256" => "__builtin_ia32_vpmadd52luq256", + "llvm.x86.avx512.vpmadd52l.uq.512" => "__builtin_ia32_vpmadd52luq512", + "llvm.x86.avx512bf16.cvtne2ps2bf16.128" => "__builtin_ia32_cvtne2ps2bf16_128", + "llvm.x86.avx512bf16.cvtne2ps2bf16.256" => "__builtin_ia32_cvtne2ps2bf16_256", + "llvm.x86.avx512bf16.cvtne2ps2bf16.512" => "__builtin_ia32_cvtne2ps2bf16_512", + "llvm.x86.avx512bf16.cvtneps2bf16.256" => "__builtin_ia32_cvtneps2bf16_256", + "llvm.x86.avx512bf16.cvtneps2bf16.512" => "__builtin_ia32_cvtneps2bf16_512", + "llvm.x86.avx512bf16.dpbf16ps.128" => "__builtin_ia32_dpbf16ps_128", + "llvm.x86.avx512bf16.dpbf16ps.256" => "__builtin_ia32_dpbf16ps_256", + "llvm.x86.avx512bf16.dpbf16ps.512" => "__builtin_ia32_dpbf16ps_512", + "llvm.x86.avx512fp16.add.ph.512" => "__builtin_ia32_addph512", + "llvm.x86.avx512fp16.div.ph.512" => "__builtin_ia32_divph512", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.add.sh.round" => "__builtin_ia32_addsh_round_mask", + "llvm.x86.avx512fp16.mask.cmp.sh" => "__builtin_ia32_cmpsh_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.div.sh.round" => "__builtin_ia32_divsh_round_mask", + "llvm.x86.avx512fp16.mask.fpclass.sh" => "__builtin_ia32_fpclasssh_mask", + "llvm.x86.avx512fp16.mask.getexp.ph.128" => "__builtin_ia32_getexpph128_mask", + "llvm.x86.avx512fp16.mask.getexp.ph.256" => "__builtin_ia32_getexpph256_mask", + "llvm.x86.avx512fp16.mask.getexp.ph.512" => "__builtin_ia32_getexpph512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.getexp.sh" => "__builtin_ia32_getexpsh128_round_mask", + "llvm.x86.avx512fp16.mask.getmant.ph.128" => "__builtin_ia32_getmantph128_mask", + "llvm.x86.avx512fp16.mask.getmant.ph.256" => "__builtin_ia32_getmantph256_mask", + "llvm.x86.avx512fp16.mask.getmant.ph.512" => "__builtin_ia32_getmantph512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.getmant.sh" => "__builtin_ia32_getmantsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.max.sh.round" => "__builtin_ia32_maxsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.min.sh.round" => "__builtin_ia32_minsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.mul.sh.round" => "__builtin_ia32_mulsh_round_mask", + "llvm.x86.avx512fp16.mask.rcp.ph.128" => "__builtin_ia32_rcpph128_mask", + "llvm.x86.avx512fp16.mask.rcp.ph.256" => "__builtin_ia32_rcpph256_mask", + "llvm.x86.avx512fp16.mask.rcp.ph.512" => "__builtin_ia32_rcpph512_mask", + "llvm.x86.avx512fp16.mask.rcp.sh" => "__builtin_ia32_rcpsh_mask", + "llvm.x86.avx512fp16.mask.reduce.ph.128" => "__builtin_ia32_reduceph128_mask", + "llvm.x86.avx512fp16.mask.reduce.ph.256" => "__builtin_ia32_reduceph256_mask", + "llvm.x86.avx512fp16.mask.reduce.ph.512" => "__builtin_ia32_reduceph512_mask", + "llvm.x86.avx512fp16.mask.reduce.sh" => "__builtin_ia32_reducesh_mask", + "llvm.x86.avx512fp16.mask.rndscale.ph.128" => "__builtin_ia32_rndscaleph_128_mask", + "llvm.x86.avx512fp16.mask.rndscale.ph.256" => "__builtin_ia32_rndscaleph_256_mask", + "llvm.x86.avx512fp16.mask.rndscale.ph.512" => "__builtin_ia32_rndscaleph_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.rndscale.sh" => "__builtin_ia32_rndscalesh_round_mask", + "llvm.x86.avx512fp16.mask.rsqrt.ph.128" => "__builtin_ia32_rsqrtph128_mask", + "llvm.x86.avx512fp16.mask.rsqrt.ph.256" => "__builtin_ia32_rsqrtph256_mask", + "llvm.x86.avx512fp16.mask.rsqrt.ph.512" => "__builtin_ia32_rsqrtph512_mask", + "llvm.x86.avx512fp16.mask.rsqrt.sh" => "__builtin_ia32_rsqrtsh_mask", + "llvm.x86.avx512fp16.mask.scalef.ph.128" => "__builtin_ia32_scalefph128_mask", + "llvm.x86.avx512fp16.mask.scalef.ph.256" => "__builtin_ia32_scalefph256_mask", + "llvm.x86.avx512fp16.mask.scalef.ph.512" => "__builtin_ia32_scalefph512_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.scalef.sh" => "__builtin_ia32_scalefsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.sub.sh.round" => "__builtin_ia32_subsh_round_mask", + "llvm.x86.avx512fp16.mask.vcvtdq2ph.128" => "__builtin_ia32_vcvtdq2ph128_mask", + "llvm.x86.avx512fp16.mask.vcvtpd2ph.128" => "__builtin_ia32_vcvtpd2ph128_mask", + "llvm.x86.avx512fp16.mask.vcvtpd2ph.256" => "__builtin_ia32_vcvtpd2ph256_mask", + "llvm.x86.avx512fp16.mask.vcvtpd2ph.512" => "__builtin_ia32_vcvtpd2ph512_mask", + "llvm.x86.avx512fp16.mask.vcvtph2dq.128" => "__builtin_ia32_vcvtph2dq128_mask", + "llvm.x86.avx512fp16.mask.vcvtph2dq.256" => "__builtin_ia32_vcvtph2dq256_mask", + "llvm.x86.avx512fp16.mask.vcvtph2dq.512" => "__builtin_ia32_vcvtph2dq512_mask", + "llvm.x86.avx512fp16.mask.vcvtph2pd.128" => "__builtin_ia32_vcvtph2pd128_mask", + "llvm.x86.avx512fp16.mask.vcvtph2pd.256" => "__builtin_ia32_vcvtph2pd256_mask", + "llvm.x86.avx512fp16.mask.vcvtph2pd.512" => "__builtin_ia32_vcvtph2pd512_mask", + "llvm.x86.avx512fp16.mask.vcvtph2psx.128" => "__builtin_ia32_vcvtph2psx128_mask", + "llvm.x86.avx512fp16.mask.vcvtph2psx.256" => "__builtin_ia32_vcvtph2psx256_mask", + "llvm.x86.avx512fp16.mask.vcvtph2psx.512" => "__builtin_ia32_vcvtph2psx512_mask", + "llvm.x86.avx512fp16.mask.vcvtph2qq.128" => "__builtin_ia32_vcvtph2qq128_mask", + "llvm.x86.avx512fp16.mask.vcvtph2qq.256" => "__builtin_ia32_vcvtph2qq256_mask", + "llvm.x86.avx512fp16.mask.vcvtph2qq.512" => "__builtin_ia32_vcvtph2qq512_mask", + "llvm.x86.avx512fp16.mask.vcvtph2udq.128" => "__builtin_ia32_vcvtph2udq128_mask", + "llvm.x86.avx512fp16.mask.vcvtph2udq.256" => "__builtin_ia32_vcvtph2udq256_mask", + "llvm.x86.avx512fp16.mask.vcvtph2udq.512" => "__builtin_ia32_vcvtph2udq512_mask", + "llvm.x86.avx512fp16.mask.vcvtph2uqq.128" => "__builtin_ia32_vcvtph2uqq128_mask", + "llvm.x86.avx512fp16.mask.vcvtph2uqq.256" => "__builtin_ia32_vcvtph2uqq256_mask", + "llvm.x86.avx512fp16.mask.vcvtph2uqq.512" => "__builtin_ia32_vcvtph2uqq512_mask", + "llvm.x86.avx512fp16.mask.vcvtph2uw.128" => "__builtin_ia32_vcvtph2uw128_mask", + "llvm.x86.avx512fp16.mask.vcvtph2uw.256" => "__builtin_ia32_vcvtph2uw256_mask", + "llvm.x86.avx512fp16.mask.vcvtph2uw.512" => "__builtin_ia32_vcvtph2uw512_mask", + "llvm.x86.avx512fp16.mask.vcvtph2w.128" => "__builtin_ia32_vcvtph2w128_mask", + "llvm.x86.avx512fp16.mask.vcvtph2w.256" => "__builtin_ia32_vcvtph2w256_mask", + "llvm.x86.avx512fp16.mask.vcvtph2w.512" => "__builtin_ia32_vcvtph2w512_mask", + "llvm.x86.avx512fp16.mask.vcvtps2phx.128" => "__builtin_ia32_vcvtps2phx128_mask", + "llvm.x86.avx512fp16.mask.vcvtps2phx.256" => "__builtin_ia32_vcvtps2phx256_mask", + "llvm.x86.avx512fp16.mask.vcvtps2phx.512" => "__builtin_ia32_vcvtps2phx512_mask", + "llvm.x86.avx512fp16.mask.vcvtqq2ph.128" => "__builtin_ia32_vcvtqq2ph128_mask", + "llvm.x86.avx512fp16.mask.vcvtqq2ph.256" => "__builtin_ia32_vcvtqq2ph256_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.vcvtsd2sh.round" => "__builtin_ia32_vcvtsd2sh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.vcvtsh2sd.round" => "__builtin_ia32_vcvtsh2sd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.vcvtsh2ss.round" => "__builtin_ia32_vcvtsh2ss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.vcvtss2sh.round" => "__builtin_ia32_vcvtss2sh_round_mask", + "llvm.x86.avx512fp16.mask.vcvttph2dq.128" => "__builtin_ia32_vcvttph2dq128_mask", + "llvm.x86.avx512fp16.mask.vcvttph2dq.256" => "__builtin_ia32_vcvttph2dq256_mask", + "llvm.x86.avx512fp16.mask.vcvttph2dq.512" => "__builtin_ia32_vcvttph2dq512_mask", + "llvm.x86.avx512fp16.mask.vcvttph2qq.128" => "__builtin_ia32_vcvttph2qq128_mask", + "llvm.x86.avx512fp16.mask.vcvttph2qq.256" => "__builtin_ia32_vcvttph2qq256_mask", + "llvm.x86.avx512fp16.mask.vcvttph2qq.512" => "__builtin_ia32_vcvttph2qq512_mask", + "llvm.x86.avx512fp16.mask.vcvttph2udq.128" => "__builtin_ia32_vcvttph2udq128_mask", + "llvm.x86.avx512fp16.mask.vcvttph2udq.256" => "__builtin_ia32_vcvttph2udq256_mask", + "llvm.x86.avx512fp16.mask.vcvttph2udq.512" => "__builtin_ia32_vcvttph2udq512_mask", + "llvm.x86.avx512fp16.mask.vcvttph2uqq.128" => "__builtin_ia32_vcvttph2uqq128_mask", + "llvm.x86.avx512fp16.mask.vcvttph2uqq.256" => "__builtin_ia32_vcvttph2uqq256_mask", + "llvm.x86.avx512fp16.mask.vcvttph2uqq.512" => "__builtin_ia32_vcvttph2uqq512_mask", + "llvm.x86.avx512fp16.mask.vcvttph2uw.128" => "__builtin_ia32_vcvttph2uw128_mask", + "llvm.x86.avx512fp16.mask.vcvttph2uw.256" => "__builtin_ia32_vcvttph2uw256_mask", + "llvm.x86.avx512fp16.mask.vcvttph2uw.512" => "__builtin_ia32_vcvttph2uw512_mask", + "llvm.x86.avx512fp16.mask.vcvttph2w.128" => "__builtin_ia32_vcvttph2w128_mask", + "llvm.x86.avx512fp16.mask.vcvttph2w.256" => "__builtin_ia32_vcvttph2w256_mask", + "llvm.x86.avx512fp16.mask.vcvttph2w.512" => "__builtin_ia32_vcvttph2w512_mask", + "llvm.x86.avx512fp16.mask.vcvtudq2ph.128" => "__builtin_ia32_vcvtudq2ph128_mask", + "llvm.x86.avx512fp16.mask.vcvtuqq2ph.128" => "__builtin_ia32_vcvtuqq2ph128_mask", + "llvm.x86.avx512fp16.mask.vcvtuqq2ph.256" => "__builtin_ia32_vcvtuqq2ph256_mask", + "llvm.x86.avx512fp16.mask.vfcmadd.cph.128" => "__builtin_ia32_vfcmaddcph128_mask", + "llvm.x86.avx512fp16.mask.vfcmadd.cph.256" => "__builtin_ia32_vfcmaddcph256_mask", + "llvm.x86.avx512fp16.mask.vfcmadd.cph.512" => "__builtin_ia32_vfcmaddcph512_mask3", + "llvm.x86.avx512fp16.mask.vfcmadd.csh" => "__builtin_ia32_vfcmaddcsh_mask", + "llvm.x86.avx512fp16.mask.vfcmul.cph.128" => "__builtin_ia32_vfcmulcph128_mask", + "llvm.x86.avx512fp16.mask.vfcmul.cph.256" => "__builtin_ia32_vfcmulcph256_mask", + "llvm.x86.avx512fp16.mask.vfcmul.cph.512" => "__builtin_ia32_vfcmulcph512_mask", + "llvm.x86.avx512fp16.mask.vfcmul.csh" => "__builtin_ia32_vfcmulcsh_mask", + "llvm.x86.avx512fp16.mask.vfmadd.cph.128" => "__builtin_ia32_vfmaddcph128_mask", + "llvm.x86.avx512fp16.mask.vfmadd.cph.256" => "__builtin_ia32_vfmaddcph256_mask", + "llvm.x86.avx512fp16.mask.vfmadd.cph.512" => "__builtin_ia32_vfmaddcph512_mask3", + "llvm.x86.avx512fp16.mask.vfmadd.csh" => "__builtin_ia32_vfmaddcsh_mask", + "llvm.x86.avx512fp16.mask.vfmul.cph.128" => "__builtin_ia32_vfmulcph128_mask", + "llvm.x86.avx512fp16.mask.vfmul.cph.256" => "__builtin_ia32_vfmulcph256_mask", + "llvm.x86.avx512fp16.mask.vfmul.cph.512" => "__builtin_ia32_vfmulcph512_mask", + "llvm.x86.avx512fp16.mask.vfmul.csh" => "__builtin_ia32_vfmulcsh_mask", + "llvm.x86.avx512fp16.maskz.vfcmadd.cph.128" => "__builtin_ia32_vfcmaddcph128_maskz", + "llvm.x86.avx512fp16.maskz.vfcmadd.cph.256" => "__builtin_ia32_vfcmaddcph256_maskz", + "llvm.x86.avx512fp16.maskz.vfcmadd.cph.512" => "__builtin_ia32_vfcmaddcph512_maskz", + "llvm.x86.avx512fp16.maskz.vfcmadd.csh" => "__builtin_ia32_vfcmaddcsh_maskz", + "llvm.x86.avx512fp16.maskz.vfmadd.cph.128" => "__builtin_ia32_vfmaddcph128_maskz", + "llvm.x86.avx512fp16.maskz.vfmadd.cph.256" => "__builtin_ia32_vfmaddcph256_maskz", + "llvm.x86.avx512fp16.maskz.vfmadd.cph.512" => "__builtin_ia32_vfmaddcph512_maskz", + "llvm.x86.avx512fp16.maskz.vfmadd.csh" => "__builtin_ia32_vfmaddcsh_maskz", + "llvm.x86.avx512fp16.max.ph.128" => "__builtin_ia32_maxph128", + "llvm.x86.avx512fp16.max.ph.256" => "__builtin_ia32_maxph256", + "llvm.x86.avx512fp16.max.ph.512" => "__builtin_ia32_maxph512", + "llvm.x86.avx512fp16.min.ph.128" => "__builtin_ia32_minph128", + "llvm.x86.avx512fp16.min.ph.256" => "__builtin_ia32_minph256", + "llvm.x86.avx512fp16.min.ph.512" => "__builtin_ia32_minph512", + "llvm.x86.avx512fp16.mul.ph.512" => "__builtin_ia32_mulph512", + "llvm.x86.avx512fp16.sub.ph.512" => "__builtin_ia32_subph512", + "llvm.x86.avx512fp16.vcomi.sh" => "__builtin_ia32_vcomish", + "llvm.x86.avx512fp16.vcvtsh2si32" => "__builtin_ia32_vcvtsh2si32", + "llvm.x86.avx512fp16.vcvtsh2si64" => "__builtin_ia32_vcvtsh2si64", + "llvm.x86.avx512fp16.vcvtsh2usi32" => "__builtin_ia32_vcvtsh2usi32", + "llvm.x86.avx512fp16.vcvtsh2usi64" => "__builtin_ia32_vcvtsh2usi64", + "llvm.x86.avx512fp16.vcvtsi2sh" => "__builtin_ia32_vcvtsi2sh", + "llvm.x86.avx512fp16.vcvtsi642sh" => "__builtin_ia32_vcvtsi642sh", + "llvm.x86.avx512fp16.vcvttsh2si32" => "__builtin_ia32_vcvttsh2si32", + "llvm.x86.avx512fp16.vcvttsh2si64" => "__builtin_ia32_vcvttsh2si64", + "llvm.x86.avx512fp16.vcvttsh2usi32" => "__builtin_ia32_vcvttsh2usi32", + "llvm.x86.avx512fp16.vcvttsh2usi64" => "__builtin_ia32_vcvttsh2usi64", + "llvm.x86.avx512fp16.vcvtusi2sh" => "__builtin_ia32_vcvtusi2sh", + "llvm.x86.avx512fp16.vcvtusi642sh" => "__builtin_ia32_vcvtusi642sh", + "llvm.x86.avx512fp16.vfmaddsub.ph.128" => "__builtin_ia32_vfmaddsubph", + "llvm.x86.avx512fp16.vfmaddsub.ph.256" => "__builtin_ia32_vfmaddsubph256", + "llvm.x86.axor32" => "__builtin_ia32_axor32", + "llvm.x86.axor64" => "__builtin_ia32_axor64", + "llvm.x86.bmi.bextr.32" => "__builtin_ia32_bextr_u32", + "llvm.x86.bmi.bextr.64" => "__builtin_ia32_bextr_u64", + "llvm.x86.bmi.bzhi.32" => "__builtin_ia32_bzhi_si", + "llvm.x86.bmi.bzhi.64" => "__builtin_ia32_bzhi_di", + "llvm.x86.bmi.pdep.32" => "__builtin_ia32_pdep_si", + "llvm.x86.bmi.pdep.64" => "__builtin_ia32_pdep_di", + "llvm.x86.bmi.pext.32" => "__builtin_ia32_pext_si", + "llvm.x86.bmi.pext.64" => "__builtin_ia32_pext_di", + "llvm.x86.cldemote" => "__builtin_ia32_cldemote", + "llvm.x86.clflushopt" => "__builtin_ia32_clflushopt", + "llvm.x86.clrssbsy" => "__builtin_ia32_clrssbsy", + "llvm.x86.clui" => "__builtin_ia32_clui", + "llvm.x86.clwb" => "__builtin_ia32_clwb", + "llvm.x86.clzero" => "__builtin_ia32_clzero", + "llvm.x86.cmpccxadd32" => "__builtin_ia32_cmpccxadd32", + "llvm.x86.cmpccxadd64" => "__builtin_ia32_cmpccxadd64", + "llvm.x86.directstore32" => "__builtin_ia32_directstore_u32", + "llvm.x86.directstore64" => "__builtin_ia32_directstore_u64", + "llvm.x86.enqcmd" => "__builtin_ia32_enqcmd", + "llvm.x86.enqcmds" => "__builtin_ia32_enqcmds", + "llvm.x86.flags.read.u32" => "__builtin_ia32_readeflags_u32", + "llvm.x86.flags.read.u64" => "__builtin_ia32_readeflags_u64", + "llvm.x86.flags.write.u32" => "__builtin_ia32_writeeflags_u32", + "llvm.x86.flags.write.u64" => "__builtin_ia32_writeeflags_u64", + "llvm.x86.fma.mask.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_mask", + "llvm.x86.fma.mask.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_mask", + "llvm.x86.fma.mask.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_mask", + "llvm.x86.fma.mask.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_mask", + "llvm.x86.fma.mask.vfmsub.pd.512" => "__builtin_ia32_vfmsubpd512_mask", + "llvm.x86.fma.mask.vfmsub.ps.512" => "__builtin_ia32_vfmsubps512_mask", + "llvm.x86.fma.mask.vfmsubadd.pd.512" => "__builtin_ia32_vfmsubaddpd512_mask", + "llvm.x86.fma.mask.vfmsubadd.ps.512" => "__builtin_ia32_vfmsubaddps512_mask", + "llvm.x86.fma.mask.vfnmadd.pd.512" => "__builtin_ia32_vfnmaddpd512_mask", + "llvm.x86.fma.mask.vfnmadd.ps.512" => "__builtin_ia32_vfnmaddps512_mask", + "llvm.x86.fma.mask.vfnmsub.pd.512" => "__builtin_ia32_vfnmsubpd512_mask", + "llvm.x86.fma.mask.vfnmsub.ps.512" => "__builtin_ia32_vfnmsubps512_mask", + "llvm.x86.fma.vfmadd.pd" => "__builtin_ia32_vfmaddpd", + "llvm.x86.fma.vfmadd.pd.256" => "__builtin_ia32_vfmaddpd256", + "llvm.x86.fma.vfmadd.ps" => "__builtin_ia32_vfmaddps", + "llvm.x86.fma.vfmadd.ps.256" => "__builtin_ia32_vfmaddps256", + "llvm.x86.fma.vfmadd.sd" => "__builtin_ia32_vfmaddsd", + "llvm.x86.fma.vfmadd.ss" => "__builtin_ia32_vfmaddss", + "llvm.x86.fma.vfmaddsub.pd" => "__builtin_ia32_vfmaddsubpd", + "llvm.x86.fma.vfmaddsub.pd.256" => "__builtin_ia32_vfmaddsubpd256", + "llvm.x86.fma.vfmaddsub.ps" => "__builtin_ia32_vfmaddsubps", + "llvm.x86.fma.vfmaddsub.ps.256" => "__builtin_ia32_vfmaddsubps256", + "llvm.x86.fma.vfmsub.pd" => "__builtin_ia32_vfmsubpd", + "llvm.x86.fma.vfmsub.pd.256" => "__builtin_ia32_vfmsubpd256", + "llvm.x86.fma.vfmsub.ps" => "__builtin_ia32_vfmsubps", + "llvm.x86.fma.vfmsub.ps.256" => "__builtin_ia32_vfmsubps256", + "llvm.x86.fma.vfmsub.sd" => "__builtin_ia32_vfmsubsd", + "llvm.x86.fma.vfmsub.ss" => "__builtin_ia32_vfmsubss", + "llvm.x86.fma.vfmsubadd.pd" => "__builtin_ia32_vfmsubaddpd", + "llvm.x86.fma.vfmsubadd.pd.256" => "__builtin_ia32_vfmsubaddpd256", + "llvm.x86.fma.vfmsubadd.ps" => "__builtin_ia32_vfmsubaddps", + "llvm.x86.fma.vfmsubadd.ps.256" => "__builtin_ia32_vfmsubaddps256", + "llvm.x86.fma.vfnmadd.pd" => "__builtin_ia32_vfnmaddpd", + "llvm.x86.fma.vfnmadd.pd.256" => "__builtin_ia32_vfnmaddpd256", + "llvm.x86.fma.vfnmadd.ps" => "__builtin_ia32_vfnmaddps", + "llvm.x86.fma.vfnmadd.ps.256" => "__builtin_ia32_vfnmaddps256", + "llvm.x86.fma.vfnmadd.sd" => "__builtin_ia32_vfnmaddsd", + "llvm.x86.fma.vfnmadd.ss" => "__builtin_ia32_vfnmaddss", + "llvm.x86.fma.vfnmsub.pd" => "__builtin_ia32_vfnmsubpd", + "llvm.x86.fma.vfnmsub.pd.256" => "__builtin_ia32_vfnmsubpd256", + "llvm.x86.fma.vfnmsub.ps" => "__builtin_ia32_vfnmsubps", + "llvm.x86.fma.vfnmsub.ps.256" => "__builtin_ia32_vfnmsubps256", + "llvm.x86.fma.vfnmsub.sd" => "__builtin_ia32_vfnmsubsd", + "llvm.x86.fma.vfnmsub.ss" => "__builtin_ia32_vfnmsubss", + "llvm.x86.fxrstor" => "__builtin_ia32_fxrstor", + "llvm.x86.fxrstor64" => "__builtin_ia32_fxrstor64", + "llvm.x86.fxsave" => "__builtin_ia32_fxsave", + "llvm.x86.fxsave64" => "__builtin_ia32_fxsave64", + "llvm.x86.incsspd" => "__builtin_ia32_incsspd", + "llvm.x86.incsspq" => "__builtin_ia32_incsspq", + "llvm.x86.invpcid" => "__builtin_ia32_invpcid", + "llvm.x86.ldtilecfg" => "__builtin_ia32_tile_loadconfig", + "llvm.x86.ldtilecfg.internal" => "__builtin_ia32_tile_loadconfig_internal", + "llvm.x86.llwpcb" => "__builtin_ia32_llwpcb", + "llvm.x86.loadiwkey" => "__builtin_ia32_loadiwkey", + "llvm.x86.lwpins32" => "__builtin_ia32_lwpins32", + "llvm.x86.lwpins64" => "__builtin_ia32_lwpins64", + "llvm.x86.lwpval32" => "__builtin_ia32_lwpval32", + "llvm.x86.lwpval64" => "__builtin_ia32_lwpval64", + "llvm.x86.mmx.emms" => "__builtin_ia32_emms", + "llvm.x86.mmx.femms" => "__builtin_ia32_femms", + "llvm.x86.mmx.maskmovq" => "__builtin_ia32_maskmovq", + "llvm.x86.mmx.movnt.dq" => "__builtin_ia32_movntq", + "llvm.x86.mmx.packssdw" => "__builtin_ia32_packssdw", + "llvm.x86.mmx.packsswb" => "__builtin_ia32_packsswb", + "llvm.x86.mmx.packuswb" => "__builtin_ia32_packuswb", + "llvm.x86.mmx.padd.b" => "__builtin_ia32_paddb", + "llvm.x86.mmx.padd.d" => "__builtin_ia32_paddd", + "llvm.x86.mmx.padd.q" => "__builtin_ia32_paddq", + "llvm.x86.mmx.padd.w" => "__builtin_ia32_paddw", + "llvm.x86.mmx.padds.b" => "__builtin_ia32_paddsb", + "llvm.x86.mmx.padds.w" => "__builtin_ia32_paddsw", + "llvm.x86.mmx.paddus.b" => "__builtin_ia32_paddusb", + "llvm.x86.mmx.paddus.w" => "__builtin_ia32_paddusw", + "llvm.x86.mmx.palignr.b" => "__builtin_ia32_palignr", + "llvm.x86.mmx.pand" => "__builtin_ia32_pand", + "llvm.x86.mmx.pandn" => "__builtin_ia32_pandn", + "llvm.x86.mmx.pavg.b" => "__builtin_ia32_pavgb", + "llvm.x86.mmx.pavg.w" => "__builtin_ia32_pavgw", + "llvm.x86.mmx.pcmpeq.b" => "__builtin_ia32_pcmpeqb", + "llvm.x86.mmx.pcmpeq.d" => "__builtin_ia32_pcmpeqd", + "llvm.x86.mmx.pcmpeq.w" => "__builtin_ia32_pcmpeqw", + "llvm.x86.mmx.pcmpgt.b" => "__builtin_ia32_pcmpgtb", + "llvm.x86.mmx.pcmpgt.d" => "__builtin_ia32_pcmpgtd", + "llvm.x86.mmx.pcmpgt.w" => "__builtin_ia32_pcmpgtw", + "llvm.x86.mmx.pextr.w" => "__builtin_ia32_vec_ext_v4hi", + "llvm.x86.mmx.pinsr.w" => "__builtin_ia32_vec_set_v4hi", + "llvm.x86.mmx.pmadd.wd" => "__builtin_ia32_pmaddwd", + "llvm.x86.mmx.pmaxs.w" => "__builtin_ia32_pmaxsw", + "llvm.x86.mmx.pmaxu.b" => "__builtin_ia32_pmaxub", + "llvm.x86.mmx.pmins.w" => "__builtin_ia32_pminsw", + "llvm.x86.mmx.pminu.b" => "__builtin_ia32_pminub", + "llvm.x86.mmx.pmovmskb" => "__builtin_ia32_pmovmskb", + "llvm.x86.mmx.pmulh.w" => "__builtin_ia32_pmulhw", + "llvm.x86.mmx.pmulhu.w" => "__builtin_ia32_pmulhuw", + "llvm.x86.mmx.pmull.w" => "__builtin_ia32_pmullw", + "llvm.x86.mmx.pmulu.dq" => "__builtin_ia32_pmuludq", + "llvm.x86.mmx.por" => "__builtin_ia32_por", + "llvm.x86.mmx.psad.bw" => "__builtin_ia32_psadbw", + "llvm.x86.mmx.psll.d" => "__builtin_ia32_pslld", + "llvm.x86.mmx.psll.q" => "__builtin_ia32_psllq", + "llvm.x86.mmx.psll.w" => "__builtin_ia32_psllw", + "llvm.x86.mmx.pslli.d" => "__builtin_ia32_pslldi", + "llvm.x86.mmx.pslli.q" => "__builtin_ia32_psllqi", + "llvm.x86.mmx.pslli.w" => "__builtin_ia32_psllwi", + "llvm.x86.mmx.psra.d" => "__builtin_ia32_psrad", + "llvm.x86.mmx.psra.w" => "__builtin_ia32_psraw", + "llvm.x86.mmx.psrai.d" => "__builtin_ia32_psradi", + "llvm.x86.mmx.psrai.w" => "__builtin_ia32_psrawi", + "llvm.x86.mmx.psrl.d" => "__builtin_ia32_psrld", + "llvm.x86.mmx.psrl.q" => "__builtin_ia32_psrlq", + "llvm.x86.mmx.psrl.w" => "__builtin_ia32_psrlw", + "llvm.x86.mmx.psrli.d" => "__builtin_ia32_psrldi", + "llvm.x86.mmx.psrli.q" => "__builtin_ia32_psrlqi", + "llvm.x86.mmx.psrli.w" => "__builtin_ia32_psrlwi", + "llvm.x86.mmx.psub.b" => "__builtin_ia32_psubb", + "llvm.x86.mmx.psub.d" => "__builtin_ia32_psubd", + "llvm.x86.mmx.psub.q" => "__builtin_ia32_psubq", + "llvm.x86.mmx.psub.w" => "__builtin_ia32_psubw", + "llvm.x86.mmx.psubs.b" => "__builtin_ia32_psubsb", + "llvm.x86.mmx.psubs.w" => "__builtin_ia32_psubsw", + "llvm.x86.mmx.psubus.b" => "__builtin_ia32_psubusb", + "llvm.x86.mmx.psubus.w" => "__builtin_ia32_psubusw", + "llvm.x86.mmx.punpckhbw" => "__builtin_ia32_punpckhbw", + "llvm.x86.mmx.punpckhdq" => "__builtin_ia32_punpckhdq", + "llvm.x86.mmx.punpckhwd" => "__builtin_ia32_punpckhwd", + "llvm.x86.mmx.punpcklbw" => "__builtin_ia32_punpcklbw", + "llvm.x86.mmx.punpckldq" => "__builtin_ia32_punpckldq", + "llvm.x86.mmx.punpcklwd" => "__builtin_ia32_punpcklwd", + "llvm.x86.mmx.pxor" => "__builtin_ia32_pxor", + "llvm.x86.monitorx" => "__builtin_ia32_monitorx", + "llvm.x86.movdir64b" => "__builtin_ia32_movdir64b", + "llvm.x86.mwaitx" => "__builtin_ia32_mwaitx", + "llvm.x86.pclmulqdq" => "__builtin_ia32_pclmulqdq128", + "llvm.x86.pclmulqdq.256" => "__builtin_ia32_pclmulqdq256", + "llvm.x86.pclmulqdq.512" => "__builtin_ia32_pclmulqdq512", + "llvm.x86.ptwrite32" => "__builtin_ia32_ptwrite32", + "llvm.x86.ptwrite64" => "__builtin_ia32_ptwrite64", + "llvm.x86.rdfsbase.32" => "__builtin_ia32_rdfsbase32", + "llvm.x86.rdfsbase.64" => "__builtin_ia32_rdfsbase64", + "llvm.x86.rdgsbase.32" => "__builtin_ia32_rdgsbase32", + "llvm.x86.rdgsbase.64" => "__builtin_ia32_rdgsbase64", + "llvm.x86.rdpid" => "__builtin_ia32_rdpid", + "llvm.x86.rdpkru" => "__builtin_ia32_rdpkru", + "llvm.x86.rdpmc" => "__builtin_ia32_rdpmc", + "llvm.x86.rdpru" => "__builtin_ia32_rdpru", + "llvm.x86.rdsspd" => "__builtin_ia32_rdsspd", + "llvm.x86.rdsspq" => "__builtin_ia32_rdsspq", + "llvm.x86.rdtsc" => "__builtin_ia32_rdtsc", + "llvm.x86.rdtscp" => "__builtin_ia32_rdtscp", + "llvm.x86.rstorssp" => "__builtin_ia32_rstorssp", + "llvm.x86.saveprevssp" => "__builtin_ia32_saveprevssp", + "llvm.x86.senduipi" => "__builtin_ia32_senduipi", + "llvm.x86.serialize" => "__builtin_ia32_serialize", + "llvm.x86.setssbsy" => "__builtin_ia32_setssbsy", + "llvm.x86.sha1msg1" => "__builtin_ia32_sha1msg1", + "llvm.x86.sha1msg2" => "__builtin_ia32_sha1msg2", + "llvm.x86.sha1nexte" => "__builtin_ia32_sha1nexte", + "llvm.x86.sha1rnds4" => "__builtin_ia32_sha1rnds4", + "llvm.x86.sha256msg1" => "__builtin_ia32_sha256msg1", + "llvm.x86.sha256msg2" => "__builtin_ia32_sha256msg2", + "llvm.x86.sha256rnds2" => "__builtin_ia32_sha256rnds2", + "llvm.x86.slwpcb" => "__builtin_ia32_slwpcb", + "llvm.x86.sse.add.ss" => "__builtin_ia32_addss", + "llvm.x86.sse.cmp.ps" => "__builtin_ia32_cmpps", + "llvm.x86.sse.cmp.ss" => "__builtin_ia32_cmpss", + "llvm.x86.sse.comieq.ss" => "__builtin_ia32_comieq", + "llvm.x86.sse.comige.ss" => "__builtin_ia32_comige", + "llvm.x86.sse.comigt.ss" => "__builtin_ia32_comigt", + "llvm.x86.sse.comile.ss" => "__builtin_ia32_comile", + "llvm.x86.sse.comilt.ss" => "__builtin_ia32_comilt", + "llvm.x86.sse.comineq.ss" => "__builtin_ia32_comineq", + "llvm.x86.sse.cvtpd2pi" => "__builtin_ia32_cvtpd2pi", + "llvm.x86.sse.cvtpi2pd" => "__builtin_ia32_cvtpi2pd", + "llvm.x86.sse.cvtpi2ps" => "__builtin_ia32_cvtpi2ps", + "llvm.x86.sse.cvtps2pi" => "__builtin_ia32_cvtps2pi", + "llvm.x86.sse.cvtsi2ss" => "__builtin_ia32_cvtsi2ss", + "llvm.x86.sse.cvtsi642ss" => "__builtin_ia32_cvtsi642ss", + "llvm.x86.sse.cvtss2si" => "__builtin_ia32_cvtss2si", + "llvm.x86.sse.cvtss2si64" => "__builtin_ia32_cvtss2si64", + "llvm.x86.sse.cvttpd2pi" => "__builtin_ia32_cvttpd2pi", + "llvm.x86.sse.cvttps2pi" => "__builtin_ia32_cvttps2pi", + "llvm.x86.sse.cvttss2si" => "__builtin_ia32_cvttss2si", + "llvm.x86.sse.cvttss2si64" => "__builtin_ia32_cvttss2si64", + "llvm.x86.sse.div.ss" => "__builtin_ia32_divss", + "llvm.x86.sse.max.ps" => "__builtin_ia32_maxps", + "llvm.x86.sse.max.ss" => "__builtin_ia32_maxss", + "llvm.x86.sse.min.ps" => "__builtin_ia32_minps", + "llvm.x86.sse.min.ss" => "__builtin_ia32_minss", + "llvm.x86.sse.movmsk.ps" => "__builtin_ia32_movmskps", + "llvm.x86.sse.mul.ss" => "__builtin_ia32_mulss", + "llvm.x86.sse.pshuf.w" => "__builtin_ia32_pshufw", + "llvm.x86.sse.rcp.ps" => "__builtin_ia32_rcpps", + "llvm.x86.sse.rcp.ss" => "__builtin_ia32_rcpss", + "llvm.x86.sse.rsqrt.ps" => "__builtin_ia32_rsqrtps", + "llvm.x86.sse.rsqrt.ss" => "__builtin_ia32_rsqrtss", + "llvm.x86.sse.sfence" => "__builtin_ia32_sfence", + "llvm.x86.sse.sqrt.ps" => "__builtin_ia32_sqrtps", + "llvm.x86.sse.sqrt.ss" => "__builtin_ia32_sqrtss", + "llvm.x86.sse.storeu.ps" => "__builtin_ia32_storeups", + "llvm.x86.sse.sub.ss" => "__builtin_ia32_subss", + "llvm.x86.sse.ucomieq.ss" => "__builtin_ia32_ucomieq", + "llvm.x86.sse.ucomige.ss" => "__builtin_ia32_ucomige", + "llvm.x86.sse.ucomigt.ss" => "__builtin_ia32_ucomigt", + "llvm.x86.sse.ucomile.ss" => "__builtin_ia32_ucomile", + "llvm.x86.sse.ucomilt.ss" => "__builtin_ia32_ucomilt", + "llvm.x86.sse.ucomineq.ss" => "__builtin_ia32_ucomineq", + "llvm.x86.sse2.add.sd" => "__builtin_ia32_addsd", + "llvm.x86.sse2.clflush" => "__builtin_ia32_clflush", + "llvm.x86.sse2.cmp.pd" => "__builtin_ia32_cmppd", + "llvm.x86.sse2.cmp.sd" => "__builtin_ia32_cmpsd", + "llvm.x86.sse2.comieq.sd" => "__builtin_ia32_comisdeq", + "llvm.x86.sse2.comige.sd" => "__builtin_ia32_comisdge", + "llvm.x86.sse2.comigt.sd" => "__builtin_ia32_comisdgt", + "llvm.x86.sse2.comile.sd" => "__builtin_ia32_comisdle", + "llvm.x86.sse2.comilt.sd" => "__builtin_ia32_comisdlt", + "llvm.x86.sse2.comineq.sd" => "__builtin_ia32_comisdneq", + "llvm.x86.sse2.cvtdq2pd" => "__builtin_ia32_cvtdq2pd", + "llvm.x86.sse2.cvtdq2ps" => "__builtin_ia32_cvtdq2ps", + "llvm.x86.sse2.cvtpd2dq" => "__builtin_ia32_cvtpd2dq", + "llvm.x86.sse2.cvtpd2ps" => "__builtin_ia32_cvtpd2ps", + "llvm.x86.sse2.cvtps2dq" => "__builtin_ia32_cvtps2dq", + "llvm.x86.sse2.cvtps2pd" => "__builtin_ia32_cvtps2pd", + "llvm.x86.sse2.cvtsd2si" => "__builtin_ia32_cvtsd2si", + "llvm.x86.sse2.cvtsd2si64" => "__builtin_ia32_cvtsd2si64", + "llvm.x86.sse2.cvtsd2ss" => "__builtin_ia32_cvtsd2ss", + "llvm.x86.sse2.cvtsi2sd" => "__builtin_ia32_cvtsi2sd", + "llvm.x86.sse2.cvtsi642sd" => "__builtin_ia32_cvtsi642sd", + "llvm.x86.sse2.cvtss2sd" => "__builtin_ia32_cvtss2sd", + "llvm.x86.sse2.cvttpd2dq" => "__builtin_ia32_cvttpd2dq", + "llvm.x86.sse2.cvttps2dq" => "__builtin_ia32_cvttps2dq", + "llvm.x86.sse2.cvttsd2si" => "__builtin_ia32_cvttsd2si", + "llvm.x86.sse2.cvttsd2si64" => "__builtin_ia32_cvttsd2si64", + "llvm.x86.sse2.div.sd" => "__builtin_ia32_divsd", + "llvm.x86.sse2.lfence" => "__builtin_ia32_lfence", + "llvm.x86.sse2.maskmov.dqu" => "__builtin_ia32_maskmovdqu", + "llvm.x86.sse2.max.pd" => "__builtin_ia32_maxpd", + "llvm.x86.sse2.max.sd" => "__builtin_ia32_maxsd", + "llvm.x86.sse2.mfence" => "__builtin_ia32_mfence", + "llvm.x86.sse2.min.pd" => "__builtin_ia32_minpd", + "llvm.x86.sse2.min.sd" => "__builtin_ia32_minsd", + "llvm.x86.sse2.movmsk.pd" => "__builtin_ia32_movmskpd", + "llvm.x86.sse2.mul.sd" => "__builtin_ia32_mulsd", + "llvm.x86.sse2.packssdw.128" => "__builtin_ia32_packssdw128", + "llvm.x86.sse2.packsswb.128" => "__builtin_ia32_packsswb128", + "llvm.x86.sse2.packuswb.128" => "__builtin_ia32_packuswb128", + "llvm.x86.sse2.padds.b" => "__builtin_ia32_paddsb128", + "llvm.x86.sse2.padds.w" => "__builtin_ia32_paddsw128", + "llvm.x86.sse2.paddus.b" => "__builtin_ia32_paddusb128", + "llvm.x86.sse2.paddus.w" => "__builtin_ia32_paddusw128", + "llvm.x86.sse2.pause" => "__builtin_ia32_pause", + "llvm.x86.sse2.pavg.b" => "__builtin_ia32_pavgb128", + "llvm.x86.sse2.pavg.w" => "__builtin_ia32_pavgw128", + "llvm.x86.sse2.pmadd.wd" => "__builtin_ia32_pmaddwd128", + "llvm.x86.sse2.pmaxs.w" => "__builtin_ia32_pmaxsw128", + "llvm.x86.sse2.pmaxu.b" => "__builtin_ia32_pmaxub128", + "llvm.x86.sse2.pmins.w" => "__builtin_ia32_pminsw128", + "llvm.x86.sse2.pminu.b" => "__builtin_ia32_pminub128", + "llvm.x86.sse2.pmovmskb.128" => "__builtin_ia32_pmovmskb128", + "llvm.x86.sse2.pmulh.w" => "__builtin_ia32_pmulhw128", + "llvm.x86.sse2.pmulhu.w" => "__builtin_ia32_pmulhuw128", + "llvm.x86.sse2.pmulu.dq" => "__builtin_ia32_pmuludq128", + "llvm.x86.sse2.psad.bw" => "__builtin_ia32_psadbw128", + "llvm.x86.sse2.pshuf.d" => "__builtin_ia32_pshufd", + "llvm.x86.sse2.pshufh.w" => "__builtin_ia32_pshufhw", + "llvm.x86.sse2.pshufl.w" => "__builtin_ia32_pshuflw", + "llvm.x86.sse2.psll.d" => "__builtin_ia32_pslld128", + "llvm.x86.sse2.psll.dq" => "__builtin_ia32_pslldqi128", + "llvm.x86.sse2.psll.dq.bs" => "__builtin_ia32_pslldqi128_byteshift", + "llvm.x86.sse2.psll.q" => "__builtin_ia32_psllq128", + "llvm.x86.sse2.psll.w" => "__builtin_ia32_psllw128", + "llvm.x86.sse2.pslli.d" => "__builtin_ia32_pslldi128", + "llvm.x86.sse2.pslli.q" => "__builtin_ia32_psllqi128", + "llvm.x86.sse2.pslli.w" => "__builtin_ia32_psllwi128", + "llvm.x86.sse2.psra.d" => "__builtin_ia32_psrad128", + "llvm.x86.sse2.psra.w" => "__builtin_ia32_psraw128", + "llvm.x86.sse2.psrai.d" => "__builtin_ia32_psradi128", + "llvm.x86.sse2.psrai.w" => "__builtin_ia32_psrawi128", + "llvm.x86.sse2.psrl.d" => "__builtin_ia32_psrld128", + "llvm.x86.sse2.psrl.dq" => "__builtin_ia32_psrldqi128", + "llvm.x86.sse2.psrl.dq.bs" => "__builtin_ia32_psrldqi128_byteshift", + "llvm.x86.sse2.psrl.q" => "__builtin_ia32_psrlq128", + "llvm.x86.sse2.psrl.w" => "__builtin_ia32_psrlw128", + "llvm.x86.sse2.psrli.d" => "__builtin_ia32_psrldi128", + "llvm.x86.sse2.psrli.q" => "__builtin_ia32_psrlqi128", + "llvm.x86.sse2.psrli.w" => "__builtin_ia32_psrlwi128", + "llvm.x86.sse2.psubs.b" => "__builtin_ia32_psubsb128", + "llvm.x86.sse2.psubs.w" => "__builtin_ia32_psubsw128", + "llvm.x86.sse2.psubus.b" => "__builtin_ia32_psubusb128", + "llvm.x86.sse2.psubus.w" => "__builtin_ia32_psubusw128", + "llvm.x86.sse2.sqrt.pd" => "__builtin_ia32_sqrtpd", + "llvm.x86.sse2.sqrt.sd" => "__builtin_ia32_sqrtsd", + "llvm.x86.sse2.storel.dq" => "__builtin_ia32_storelv4si", + "llvm.x86.sse2.storeu.dq" => "__builtin_ia32_storedqu", + "llvm.x86.sse2.storeu.pd" => "__builtin_ia32_storeupd", + "llvm.x86.sse2.sub.sd" => "__builtin_ia32_subsd", + "llvm.x86.sse2.ucomieq.sd" => "__builtin_ia32_ucomisdeq", + "llvm.x86.sse2.ucomige.sd" => "__builtin_ia32_ucomisdge", + "llvm.x86.sse2.ucomigt.sd" => "__builtin_ia32_ucomisdgt", + "llvm.x86.sse2.ucomile.sd" => "__builtin_ia32_ucomisdle", + "llvm.x86.sse2.ucomilt.sd" => "__builtin_ia32_ucomisdlt", + "llvm.x86.sse2.ucomineq.sd" => "__builtin_ia32_ucomisdneq", + "llvm.x86.sse3.addsub.pd" => "__builtin_ia32_addsubpd", + "llvm.x86.sse3.addsub.ps" => "__builtin_ia32_addsubps", + "llvm.x86.sse3.hadd.pd" => "__builtin_ia32_haddpd", + "llvm.x86.sse3.hadd.ps" => "__builtin_ia32_haddps", + "llvm.x86.sse3.hsub.pd" => "__builtin_ia32_hsubpd", + "llvm.x86.sse3.hsub.ps" => "__builtin_ia32_hsubps", + "llvm.x86.sse3.ldu.dq" => "__builtin_ia32_lddqu", + "llvm.x86.sse3.monitor" => "__builtin_ia32_monitor", + "llvm.x86.sse3.mwait" => "__builtin_ia32_mwait", + "llvm.x86.sse41.blendpd" => "__builtin_ia32_blendpd", + "llvm.x86.sse41.blendps" => "__builtin_ia32_blendps", + "llvm.x86.sse41.blendvpd" => "__builtin_ia32_blendvpd", + "llvm.x86.sse41.blendvps" => "__builtin_ia32_blendvps", + "llvm.x86.sse41.dppd" => "__builtin_ia32_dppd", + "llvm.x86.sse41.dpps" => "__builtin_ia32_dpps", + "llvm.x86.sse41.extractps" => "__builtin_ia32_extractps128", + "llvm.x86.sse41.insertps" => "__builtin_ia32_insertps128", + "llvm.x86.sse41.movntdqa" => "__builtin_ia32_movntdqa", + "llvm.x86.sse41.mpsadbw" => "__builtin_ia32_mpsadbw128", + "llvm.x86.sse41.packusdw" => "__builtin_ia32_packusdw128", + "llvm.x86.sse41.pblendvb" => "__builtin_ia32_pblendvb128", + "llvm.x86.sse41.pblendw" => "__builtin_ia32_pblendw128", + "llvm.x86.sse41.phminposuw" => "__builtin_ia32_phminposuw128", + "llvm.x86.sse41.pmaxsb" => "__builtin_ia32_pmaxsb128", + "llvm.x86.sse41.pmaxsd" => "__builtin_ia32_pmaxsd128", + "llvm.x86.sse41.pmaxud" => "__builtin_ia32_pmaxud128", + "llvm.x86.sse41.pmaxuw" => "__builtin_ia32_pmaxuw128", + "llvm.x86.sse41.pminsb" => "__builtin_ia32_pminsb128", + "llvm.x86.sse41.pminsd" => "__builtin_ia32_pminsd128", + "llvm.x86.sse41.pminud" => "__builtin_ia32_pminud128", + "llvm.x86.sse41.pminuw" => "__builtin_ia32_pminuw128", + "llvm.x86.sse41.pmovsxbd" => "__builtin_ia32_pmovsxbd128", + "llvm.x86.sse41.pmovsxbq" => "__builtin_ia32_pmovsxbq128", + "llvm.x86.sse41.pmovsxbw" => "__builtin_ia32_pmovsxbw128", + "llvm.x86.sse41.pmovsxdq" => "__builtin_ia32_pmovsxdq128", + "llvm.x86.sse41.pmovsxwd" => "__builtin_ia32_pmovsxwd128", + "llvm.x86.sse41.pmovsxwq" => "__builtin_ia32_pmovsxwq128", + "llvm.x86.sse41.pmovzxbd" => "__builtin_ia32_pmovzxbd128", + "llvm.x86.sse41.pmovzxbq" => "__builtin_ia32_pmovzxbq128", + "llvm.x86.sse41.pmovzxbw" => "__builtin_ia32_pmovzxbw128", + "llvm.x86.sse41.pmovzxdq" => "__builtin_ia32_pmovzxdq128", + "llvm.x86.sse41.pmovzxwd" => "__builtin_ia32_pmovzxwd128", + "llvm.x86.sse41.pmovzxwq" => "__builtin_ia32_pmovzxwq128", + "llvm.x86.sse41.pmuldq" => "__builtin_ia32_pmuldq128", + "llvm.x86.sse41.ptestc" => "__builtin_ia32_ptestc128", + "llvm.x86.sse41.ptestnzc" => "__builtin_ia32_ptestnzc128", + "llvm.x86.sse41.ptestz" => "__builtin_ia32_ptestz128", + "llvm.x86.sse41.round.pd" => "__builtin_ia32_roundpd", + "llvm.x86.sse41.round.ps" => "__builtin_ia32_roundps", + "llvm.x86.sse41.round.sd" => "__builtin_ia32_roundsd", + "llvm.x86.sse41.round.ss" => "__builtin_ia32_roundss", + "llvm.x86.sse42.crc32.32.16" => "__builtin_ia32_crc32hi", + "llvm.x86.sse42.crc32.32.32" => "__builtin_ia32_crc32si", + "llvm.x86.sse42.crc32.32.8" => "__builtin_ia32_crc32qi", + "llvm.x86.sse42.crc32.64.64" => "__builtin_ia32_crc32di", + "llvm.x86.sse42.pcmpestri128" => "__builtin_ia32_pcmpestri128", + "llvm.x86.sse42.pcmpestria128" => "__builtin_ia32_pcmpestria128", + "llvm.x86.sse42.pcmpestric128" => "__builtin_ia32_pcmpestric128", + "llvm.x86.sse42.pcmpestrio128" => "__builtin_ia32_pcmpestrio128", + "llvm.x86.sse42.pcmpestris128" => "__builtin_ia32_pcmpestris128", + "llvm.x86.sse42.pcmpestriz128" => "__builtin_ia32_pcmpestriz128", + "llvm.x86.sse42.pcmpestrm128" => "__builtin_ia32_pcmpestrm128", + "llvm.x86.sse42.pcmpistri128" => "__builtin_ia32_pcmpistri128", + "llvm.x86.sse42.pcmpistria128" => "__builtin_ia32_pcmpistria128", + "llvm.x86.sse42.pcmpistric128" => "__builtin_ia32_pcmpistric128", + "llvm.x86.sse42.pcmpistrio128" => "__builtin_ia32_pcmpistrio128", + "llvm.x86.sse42.pcmpistris128" => "__builtin_ia32_pcmpistris128", + "llvm.x86.sse42.pcmpistriz128" => "__builtin_ia32_pcmpistriz128", + "llvm.x86.sse42.pcmpistrm128" => "__builtin_ia32_pcmpistrm128", + "llvm.x86.sse4a.extrq" => "__builtin_ia32_extrq", + "llvm.x86.sse4a.extrqi" => "__builtin_ia32_extrqi", + "llvm.x86.sse4a.insertq" => "__builtin_ia32_insertq", + "llvm.x86.sse4a.insertqi" => "__builtin_ia32_insertqi", + "llvm.x86.sse4a.movnt.sd" => "__builtin_ia32_movntsd", + "llvm.x86.sse4a.movnt.ss" => "__builtin_ia32_movntss", + "llvm.x86.ssse3.pabs.b" => "__builtin_ia32_pabsb", + "llvm.x86.ssse3.pabs.b.128" => "__builtin_ia32_pabsb128", + "llvm.x86.ssse3.pabs.d" => "__builtin_ia32_pabsd", + "llvm.x86.ssse3.pabs.d.128" => "__builtin_ia32_pabsd128", + "llvm.x86.ssse3.pabs.w" => "__builtin_ia32_pabsw", + "llvm.x86.ssse3.pabs.w.128" => "__builtin_ia32_pabsw128", + "llvm.x86.ssse3.phadd.d" => "__builtin_ia32_phaddd", + "llvm.x86.ssse3.phadd.d.128" => "__builtin_ia32_phaddd128", + "llvm.x86.ssse3.phadd.sw" => "__builtin_ia32_phaddsw", + "llvm.x86.ssse3.phadd.sw.128" => "__builtin_ia32_phaddsw128", + "llvm.x86.ssse3.phadd.w" => "__builtin_ia32_phaddw", + "llvm.x86.ssse3.phadd.w.128" => "__builtin_ia32_phaddw128", + "llvm.x86.ssse3.phsub.d" => "__builtin_ia32_phsubd", + "llvm.x86.ssse3.phsub.d.128" => "__builtin_ia32_phsubd128", + "llvm.x86.ssse3.phsub.sw" => "__builtin_ia32_phsubsw", + "llvm.x86.ssse3.phsub.sw.128" => "__builtin_ia32_phsubsw128", + "llvm.x86.ssse3.phsub.w" => "__builtin_ia32_phsubw", + "llvm.x86.ssse3.phsub.w.128" => "__builtin_ia32_phsubw128", + "llvm.x86.ssse3.pmadd.ub.sw" => "__builtin_ia32_pmaddubsw", + "llvm.x86.ssse3.pmadd.ub.sw.128" => "__builtin_ia32_pmaddubsw128", + "llvm.x86.ssse3.pmul.hr.sw" => "__builtin_ia32_pmulhrsw", + "llvm.x86.ssse3.pmul.hr.sw.128" => "__builtin_ia32_pmulhrsw128", + "llvm.x86.ssse3.pshuf.b" => "__builtin_ia32_pshufb", + "llvm.x86.ssse3.pshuf.b.128" => "__builtin_ia32_pshufb128", + "llvm.x86.ssse3.psign.b" => "__builtin_ia32_psignb", + "llvm.x86.ssse3.psign.b.128" => "__builtin_ia32_psignb128", + "llvm.x86.ssse3.psign.d" => "__builtin_ia32_psignd", + "llvm.x86.ssse3.psign.d.128" => "__builtin_ia32_psignd128", + "llvm.x86.ssse3.psign.w" => "__builtin_ia32_psignw", + "llvm.x86.ssse3.psign.w.128" => "__builtin_ia32_psignw128", + "llvm.x86.sttilecfg" => "__builtin_ia32_tile_storeconfig", + "llvm.x86.stui" => "__builtin_ia32_stui", + "llvm.x86.subborrow.u32" => "__builtin_ia32_subborrow_u32", + "llvm.x86.subborrow.u64" => "__builtin_ia32_subborrow_u64", + "llvm.x86.tbm.bextri.u32" => "__builtin_ia32_bextri_u32", + "llvm.x86.tbm.bextri.u64" => "__builtin_ia32_bextri_u64", + "llvm.x86.tdpbf16ps" => "__builtin_ia32_tdpbf16ps", + "llvm.x86.tdpbf16ps.internal" => "__builtin_ia32_tdpbf16ps_internal", + "llvm.x86.tdpbssd" => "__builtin_ia32_tdpbssd", + "llvm.x86.tdpbssd.internal" => "__builtin_ia32_tdpbssd_internal", + "llvm.x86.tdpbsud" => "__builtin_ia32_tdpbsud", + "llvm.x86.tdpbsud.internal" => "__builtin_ia32_tdpbsud_internal", + "llvm.x86.tdpbusd" => "__builtin_ia32_tdpbusd", + "llvm.x86.tdpbusd.internal" => "__builtin_ia32_tdpbusd_internal", + "llvm.x86.tdpbuud" => "__builtin_ia32_tdpbuud", + "llvm.x86.tdpbuud.internal" => "__builtin_ia32_tdpbuud_internal", + "llvm.x86.tdpfp16ps" => "__builtin_ia32_tdpfp16ps", + "llvm.x86.tdpfp16ps.internal" => "__builtin_ia32_tdpfp16ps_internal", + "llvm.x86.testui" => "__builtin_ia32_testui", + "llvm.x86.tileloadd64" => "__builtin_ia32_tileloadd64", + "llvm.x86.tileloadd64.internal" => "__builtin_ia32_tileloadd64_internal", + "llvm.x86.tileloaddt164" => "__builtin_ia32_tileloaddt164", + "llvm.x86.tileloaddt164.internal" => "__builtin_ia32_tileloaddt164_internal", + "llvm.x86.tilerelease" => "__builtin_ia32_tilerelease", + "llvm.x86.tilestored64" => "__builtin_ia32_tilestored64", + "llvm.x86.tilestored64.internal" => "__builtin_ia32_tilestored64_internal", + "llvm.x86.tilezero" => "__builtin_ia32_tilezero", + "llvm.x86.tilezero.internal" => "__builtin_ia32_tilezero_internal", + "llvm.x86.tpause" => "__builtin_ia32_tpause", + "llvm.x86.umonitor" => "__builtin_ia32_umonitor", + "llvm.x86.umwait" => "__builtin_ia32_umwait", + "llvm.x86.vbcstnebf162ps128" => "__builtin_ia32_vbcstnebf162ps128", + "llvm.x86.vbcstnebf162ps256" => "__builtin_ia32_vbcstnebf162ps256", + "llvm.x86.vbcstnesh2ps128" => "__builtin_ia32_vbcstnesh2ps128", + "llvm.x86.vbcstnesh2ps256" => "__builtin_ia32_vbcstnesh2ps256", + "llvm.x86.vcvtneebf162ps128" => "__builtin_ia32_vcvtneebf162ps128", + "llvm.x86.vcvtneebf162ps256" => "__builtin_ia32_vcvtneebf162ps256", + "llvm.x86.vcvtneeph2ps128" => "__builtin_ia32_vcvtneeph2ps128", + "llvm.x86.vcvtneeph2ps256" => "__builtin_ia32_vcvtneeph2ps256", + "llvm.x86.vcvtneobf162ps128" => "__builtin_ia32_vcvtneobf162ps128", + "llvm.x86.vcvtneobf162ps256" => "__builtin_ia32_vcvtneobf162ps256", + "llvm.x86.vcvtneoph2ps128" => "__builtin_ia32_vcvtneoph2ps128", + "llvm.x86.vcvtneoph2ps256" => "__builtin_ia32_vcvtneoph2ps256", + "llvm.x86.vcvtneps2bf16128" => "__builtin_ia32_vcvtneps2bf16128", + "llvm.x86.vcvtneps2bf16256" => "__builtin_ia32_vcvtneps2bf16256", + "llvm.x86.vcvtph2ps.128" => "__builtin_ia32_vcvtph2ps", + "llvm.x86.vcvtph2ps.256" => "__builtin_ia32_vcvtph2ps256", + "llvm.x86.vcvtps2ph.128" => "__builtin_ia32_vcvtps2ph", + "llvm.x86.vcvtps2ph.256" => "__builtin_ia32_vcvtps2ph256", + "llvm.x86.vgf2p8affineinvqb.128" => "__builtin_ia32_vgf2p8affineinvqb_v16qi", + "llvm.x86.vgf2p8affineinvqb.256" => "__builtin_ia32_vgf2p8affineinvqb_v32qi", + "llvm.x86.vgf2p8affineinvqb.512" => "__builtin_ia32_vgf2p8affineinvqb_v64qi", + "llvm.x86.vgf2p8affineqb.128" => "__builtin_ia32_vgf2p8affineqb_v16qi", + "llvm.x86.vgf2p8affineqb.256" => "__builtin_ia32_vgf2p8affineqb_v32qi", + "llvm.x86.vgf2p8affineqb.512" => "__builtin_ia32_vgf2p8affineqb_v64qi", + "llvm.x86.vgf2p8mulb.128" => "__builtin_ia32_vgf2p8mulb_v16qi", + "llvm.x86.vgf2p8mulb.256" => "__builtin_ia32_vgf2p8mulb_v32qi", + "llvm.x86.vgf2p8mulb.512" => "__builtin_ia32_vgf2p8mulb_v64qi", + "llvm.x86.wbinvd" => "__builtin_ia32_wbinvd", + "llvm.x86.wbnoinvd" => "__builtin_ia32_wbnoinvd", + "llvm.x86.wrfsbase.32" => "__builtin_ia32_wrfsbase32", + "llvm.x86.wrfsbase.64" => "__builtin_ia32_wrfsbase64", + "llvm.x86.wrgsbase.32" => "__builtin_ia32_wrgsbase32", + "llvm.x86.wrgsbase.64" => "__builtin_ia32_wrgsbase64", + "llvm.x86.wrpkru" => "__builtin_ia32_wrpkru", + "llvm.x86.wrssd" => "__builtin_ia32_wrssd", + "llvm.x86.wrssq" => "__builtin_ia32_wrssq", + "llvm.x86.wrussd" => "__builtin_ia32_wrussd", + "llvm.x86.wrussq" => "__builtin_ia32_wrussq", + "llvm.x86.xabort" => "__builtin_ia32_xabort", + "llvm.x86.xbegin" => "__builtin_ia32_xbegin", + "llvm.x86.xend" => "__builtin_ia32_xend", + "llvm.x86.xop.vfrcz.pd" => "__builtin_ia32_vfrczpd", + "llvm.x86.xop.vfrcz.pd.256" => "__builtin_ia32_vfrczpd256", + "llvm.x86.xop.vfrcz.ps" => "__builtin_ia32_vfrczps", + "llvm.x86.xop.vfrcz.ps.256" => "__builtin_ia32_vfrczps256", + "llvm.x86.xop.vfrcz.sd" => "__builtin_ia32_vfrczsd", + "llvm.x86.xop.vfrcz.ss" => "__builtin_ia32_vfrczss", + "llvm.x86.xop.vpcmov" => "__builtin_ia32_vpcmov", + "llvm.x86.xop.vpcmov.256" => "__builtin_ia32_vpcmov_256", + "llvm.x86.xop.vpcomb" => "__builtin_ia32_vpcomb", + "llvm.x86.xop.vpcomd" => "__builtin_ia32_vpcomd", + "llvm.x86.xop.vpcomq" => "__builtin_ia32_vpcomq", + "llvm.x86.xop.vpcomub" => "__builtin_ia32_vpcomub", + "llvm.x86.xop.vpcomud" => "__builtin_ia32_vpcomud", + "llvm.x86.xop.vpcomuq" => "__builtin_ia32_vpcomuq", + "llvm.x86.xop.vpcomuw" => "__builtin_ia32_vpcomuw", + "llvm.x86.xop.vpcomw" => "__builtin_ia32_vpcomw", + "llvm.x86.xop.vpermil2pd" => "__builtin_ia32_vpermil2pd", + "llvm.x86.xop.vpermil2pd.256" => "__builtin_ia32_vpermil2pd256", + "llvm.x86.xop.vpermil2ps" => "__builtin_ia32_vpermil2ps", + "llvm.x86.xop.vpermil2ps.256" => "__builtin_ia32_vpermil2ps256", + "llvm.x86.xop.vphaddbd" => "__builtin_ia32_vphaddbd", + "llvm.x86.xop.vphaddbq" => "__builtin_ia32_vphaddbq", + "llvm.x86.xop.vphaddbw" => "__builtin_ia32_vphaddbw", + "llvm.x86.xop.vphadddq" => "__builtin_ia32_vphadddq", + "llvm.x86.xop.vphaddubd" => "__builtin_ia32_vphaddubd", + "llvm.x86.xop.vphaddubq" => "__builtin_ia32_vphaddubq", + "llvm.x86.xop.vphaddubw" => "__builtin_ia32_vphaddubw", + "llvm.x86.xop.vphaddudq" => "__builtin_ia32_vphaddudq", + "llvm.x86.xop.vphadduwd" => "__builtin_ia32_vphadduwd", + "llvm.x86.xop.vphadduwq" => "__builtin_ia32_vphadduwq", + "llvm.x86.xop.vphaddwd" => "__builtin_ia32_vphaddwd", + "llvm.x86.xop.vphaddwq" => "__builtin_ia32_vphaddwq", + "llvm.x86.xop.vphsubbw" => "__builtin_ia32_vphsubbw", + "llvm.x86.xop.vphsubdq" => "__builtin_ia32_vphsubdq", + "llvm.x86.xop.vphsubwd" => "__builtin_ia32_vphsubwd", + "llvm.x86.xop.vpmacsdd" => "__builtin_ia32_vpmacsdd", + "llvm.x86.xop.vpmacsdqh" => "__builtin_ia32_vpmacsdqh", + "llvm.x86.xop.vpmacsdql" => "__builtin_ia32_vpmacsdql", + "llvm.x86.xop.vpmacssdd" => "__builtin_ia32_vpmacssdd", + "llvm.x86.xop.vpmacssdqh" => "__builtin_ia32_vpmacssdqh", + "llvm.x86.xop.vpmacssdql" => "__builtin_ia32_vpmacssdql", + "llvm.x86.xop.vpmacsswd" => "__builtin_ia32_vpmacsswd", + "llvm.x86.xop.vpmacssww" => "__builtin_ia32_vpmacssww", + "llvm.x86.xop.vpmacswd" => "__builtin_ia32_vpmacswd", + "llvm.x86.xop.vpmacsww" => "__builtin_ia32_vpmacsww", + "llvm.x86.xop.vpmadcsswd" => "__builtin_ia32_vpmadcsswd", + "llvm.x86.xop.vpmadcswd" => "__builtin_ia32_vpmadcswd", + "llvm.x86.xop.vpperm" => "__builtin_ia32_vpperm", + "llvm.x86.xop.vprotb" => "__builtin_ia32_vprotb", + "llvm.x86.xop.vprotbi" => "__builtin_ia32_vprotbi", + "llvm.x86.xop.vprotd" => "__builtin_ia32_vprotd", + "llvm.x86.xop.vprotdi" => "__builtin_ia32_vprotdi", + "llvm.x86.xop.vprotq" => "__builtin_ia32_vprotq", + "llvm.x86.xop.vprotqi" => "__builtin_ia32_vprotqi", + "llvm.x86.xop.vprotw" => "__builtin_ia32_vprotw", + "llvm.x86.xop.vprotwi" => "__builtin_ia32_vprotwi", + "llvm.x86.xop.vpshab" => "__builtin_ia32_vpshab", + "llvm.x86.xop.vpshad" => "__builtin_ia32_vpshad", + "llvm.x86.xop.vpshaq" => "__builtin_ia32_vpshaq", + "llvm.x86.xop.vpshaw" => "__builtin_ia32_vpshaw", + "llvm.x86.xop.vpshlb" => "__builtin_ia32_vpshlb", + "llvm.x86.xop.vpshld" => "__builtin_ia32_vpshld", + "llvm.x86.xop.vpshlq" => "__builtin_ia32_vpshlq", + "llvm.x86.xop.vpshlw" => "__builtin_ia32_vpshlw", + "llvm.x86.xresldtrk" => "__builtin_ia32_xresldtrk", + "llvm.x86.xsusldtrk" => "__builtin_ia32_xsusldtrk", + "llvm.x86.xtest" => "__builtin_ia32_xtest", + // xcore + "llvm.xcore.bitrev" => "__builtin_bitrev", + "llvm.xcore.getid" => "__builtin_getid", + "llvm.xcore.getps" => "__builtin_getps", + "llvm.xcore.setps" => "__builtin_setps", + _ => unimplemented!("***** unsupported LLVM intrinsic {}", name), +} diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs new file mode 100644 index 00000000000..0edec566be3 --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs @@ -0,0 +1,779 @@ +use std::borrow::Cow; + +use gccjit::{Function, FunctionPtrType, RValue, ToRValue, UnaryOp}; +use rustc_codegen_ssa::traits::BuilderMethods; + +use crate::{context::CodegenCx, builder::Builder}; + +pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc, 'tcx>, gcc_func: FunctionPtrType<'gcc>, mut args: Cow<'b, [RValue<'gcc>]>, func_name: &str, original_function_name: Option<&String>) -> Cow<'b, [RValue<'gcc>]> { + // Some LLVM intrinsics do not map 1-to-1 to GCC intrinsics, so we add the missing + // arguments here. + if gcc_func.get_param_count() != args.len() { + match &*func_name { + // NOTE: the following intrinsics have a different number of parameters in LLVM and GCC. + "__builtin_ia32_prold512_mask" | "__builtin_ia32_pmuldq512_mask" | "__builtin_ia32_pmuludq512_mask" + | "__builtin_ia32_pmaxsd512_mask" | "__builtin_ia32_pmaxsq512_mask" | "__builtin_ia32_pmaxsq256_mask" + | "__builtin_ia32_pmaxsq128_mask" | "__builtin_ia32_pmaxud512_mask" | "__builtin_ia32_pmaxuq512_mask" + | "__builtin_ia32_pminsd512_mask" | "__builtin_ia32_pminsq512_mask" | "__builtin_ia32_pminsq256_mask" + | "__builtin_ia32_pminsq128_mask" | "__builtin_ia32_pminud512_mask" | "__builtin_ia32_pminuq512_mask" + | "__builtin_ia32_prolq512_mask" | "__builtin_ia32_prorq512_mask" | "__builtin_ia32_pslldi512_mask" + | "__builtin_ia32_psrldi512_mask" | "__builtin_ia32_psllqi512_mask" | "__builtin_ia32_psrlqi512_mask" + | "__builtin_ia32_pslld512_mask" | "__builtin_ia32_psrld512_mask" | "__builtin_ia32_psllq512_mask" + | "__builtin_ia32_psrlq512_mask" | "__builtin_ia32_psrad512_mask" | "__builtin_ia32_psraq512_mask" + | "__builtin_ia32_psradi512_mask" | "__builtin_ia32_psraqi512_mask" | "__builtin_ia32_psrav16si_mask" + | "__builtin_ia32_psrav8di_mask" | "__builtin_ia32_prolvd512_mask" | "__builtin_ia32_prorvd512_mask" + | "__builtin_ia32_prolvq512_mask" | "__builtin_ia32_prorvq512_mask" | "__builtin_ia32_psllv16si_mask" + | "__builtin_ia32_psrlv16si_mask" | "__builtin_ia32_psllv8di_mask" | "__builtin_ia32_psrlv8di_mask" + | "__builtin_ia32_permvarsi512_mask" | "__builtin_ia32_vpermilvarps512_mask" + | "__builtin_ia32_vpermilvarpd512_mask" | "__builtin_ia32_permvardi512_mask" + | "__builtin_ia32_permvarsf512_mask" | "__builtin_ia32_permvarqi512_mask" + | "__builtin_ia32_permvarqi256_mask" | "__builtin_ia32_permvarqi128_mask" + | "__builtin_ia32_vpmultishiftqb512_mask" | "__builtin_ia32_vpmultishiftqb256_mask" + | "__builtin_ia32_vpmultishiftqb128_mask" + => { + let mut new_args = args.to_vec(); + let arg3_type = gcc_func.get_param_type(2); + let first_arg = builder.current_func().new_local(None, arg3_type, "undefined_for_intrinsic").to_rvalue(); + new_args.push(first_arg); + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_pmaxuq256_mask" | "__builtin_ia32_pmaxuq128_mask" | "__builtin_ia32_pminuq256_mask" + | "__builtin_ia32_pminuq128_mask" | "__builtin_ia32_prold256_mask" | "__builtin_ia32_prold128_mask" + | "__builtin_ia32_prord512_mask" | "__builtin_ia32_prord256_mask" | "__builtin_ia32_prord128_mask" + | "__builtin_ia32_prolq256_mask" | "__builtin_ia32_prolq128_mask" | "__builtin_ia32_prorq256_mask" + | "__builtin_ia32_prorq128_mask" | "__builtin_ia32_psraq256_mask" | "__builtin_ia32_psraq128_mask" + | "__builtin_ia32_psraqi256_mask" | "__builtin_ia32_psraqi128_mask" | "__builtin_ia32_psravq256_mask" + | "__builtin_ia32_psravq128_mask" | "__builtin_ia32_prolvd256_mask" | "__builtin_ia32_prolvd128_mask" + | "__builtin_ia32_prorvd256_mask" | "__builtin_ia32_prorvd128_mask" | "__builtin_ia32_prolvq256_mask" + | "__builtin_ia32_prolvq128_mask" | "__builtin_ia32_prorvq256_mask" | "__builtin_ia32_prorvq128_mask" + | "__builtin_ia32_permvardi256_mask" | "__builtin_ia32_permvardf512_mask" | "__builtin_ia32_permvardf256_mask" + | "__builtin_ia32_pmulhuw512_mask" | "__builtin_ia32_pmulhw512_mask" | "__builtin_ia32_pmulhrsw512_mask" + | "__builtin_ia32_pmaxuw512_mask" | "__builtin_ia32_pmaxub512_mask" | "__builtin_ia32_pmaxsw512_mask" + | "__builtin_ia32_pmaxsb512_mask" | "__builtin_ia32_pminuw512_mask" | "__builtin_ia32_pminub512_mask" + | "__builtin_ia32_pminsw512_mask" | "__builtin_ia32_pminsb512_mask" + | "__builtin_ia32_pmaddwd512_mask" | "__builtin_ia32_pmaddubsw512_mask" | "__builtin_ia32_packssdw512_mask" + | "__builtin_ia32_packsswb512_mask" | "__builtin_ia32_packusdw512_mask" | "__builtin_ia32_packuswb512_mask" + | "__builtin_ia32_pavgw512_mask" | "__builtin_ia32_pavgb512_mask" | "__builtin_ia32_psllw512_mask" + | "__builtin_ia32_psllwi512_mask" | "__builtin_ia32_psllv32hi_mask" | "__builtin_ia32_psrlw512_mask" + | "__builtin_ia32_psrlwi512_mask" | "__builtin_ia32_psllv16hi_mask" | "__builtin_ia32_psllv8hi_mask" + | "__builtin_ia32_psrlv32hi_mask" | "__builtin_ia32_psraw512_mask" | "__builtin_ia32_psrawi512_mask" + | "__builtin_ia32_psrlv16hi_mask" | "__builtin_ia32_psrlv8hi_mask" | "__builtin_ia32_psrav32hi_mask" + | "__builtin_ia32_permvarhi512_mask" | "__builtin_ia32_pshufb512_mask" | "__builtin_ia32_psrav16hi_mask" + | "__builtin_ia32_psrav8hi_mask" | "__builtin_ia32_permvarhi256_mask" | "__builtin_ia32_permvarhi128_mask" + => { + let mut new_args = args.to_vec(); + let arg3_type = gcc_func.get_param_type(2); + let vector_type = arg3_type.dyncast_vector().expect("vector type"); + let zero = builder.context.new_rvalue_zero(vector_type.get_element_type()); + let num_units = vector_type.get_num_units(); + let first_arg = builder.context.new_rvalue_from_vector(None, arg3_type, &vec![zero; num_units]); + new_args.push(first_arg); + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_dbpsadbw512_mask" | "__builtin_ia32_dbpsadbw256_mask" | "__builtin_ia32_dbpsadbw128_mask" => { + let mut new_args = args.to_vec(); + let arg4_type = gcc_func.get_param_type(3); + let vector_type = arg4_type.dyncast_vector().expect("vector type"); + let zero = builder.context.new_rvalue_zero(vector_type.get_element_type()); + let num_units = vector_type.get_num_units(); + let first_arg = builder.context.new_rvalue_from_vector(None, arg4_type, &vec![zero; num_units]); + new_args.push(first_arg); + let arg5_type = gcc_func.get_param_type(4); + let minus_one = builder.context.new_rvalue_from_int(arg5_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_vplzcntd_512_mask" | "__builtin_ia32_vplzcntd_256_mask" | "__builtin_ia32_vplzcntd_128_mask" + | "__builtin_ia32_vplzcntq_512_mask" | "__builtin_ia32_vplzcntq_256_mask" | "__builtin_ia32_vplzcntq_128_mask" => { + let mut new_args = args.to_vec(); + // Remove last arg as it doesn't seem to be used in GCC and is always false. + new_args.pop(); + let arg2_type = gcc_func.get_param_type(1); + let vector_type = arg2_type.dyncast_vector().expect("vector type"); + let zero = builder.context.new_rvalue_zero(vector_type.get_element_type()); + let num_units = vector_type.get_num_units(); + let first_arg = builder.context.new_rvalue_from_vector(None, arg2_type, &vec![zero; num_units]); + new_args.push(first_arg); + let arg3_type = gcc_func.get_param_type(2); + let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_vpconflictsi_512_mask" | "__builtin_ia32_vpconflictsi_256_mask" + | "__builtin_ia32_vpconflictsi_128_mask" | "__builtin_ia32_vpconflictdi_512_mask" + | "__builtin_ia32_vpconflictdi_256_mask" | "__builtin_ia32_vpconflictdi_128_mask" => { + let mut new_args = args.to_vec(); + let arg2_type = gcc_func.get_param_type(1); + let vector_type = arg2_type.dyncast_vector().expect("vector type"); + let zero = builder.context.new_rvalue_zero(vector_type.get_element_type()); + let num_units = vector_type.get_num_units(); + let first_arg = builder.context.new_rvalue_from_vector(None, arg2_type, &vec![zero; num_units]); + new_args.push(first_arg); + let arg3_type = gcc_func.get_param_type(2); + let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_pternlogd512_mask" | "__builtin_ia32_pternlogd256_mask" + | "__builtin_ia32_pternlogd128_mask" | "__builtin_ia32_pternlogq512_mask" + | "__builtin_ia32_pternlogq256_mask" | "__builtin_ia32_pternlogq128_mask" => { + let mut new_args = args.to_vec(); + let arg5_type = gcc_func.get_param_type(4); + let minus_one = builder.context.new_rvalue_from_int(arg5_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_vfmaddps512_mask" | "__builtin_ia32_vfmaddpd512_mask" => { + let mut new_args = args.to_vec(); + + let mut last_arg = None; + if args.len() == 4 { + last_arg = new_args.pop(); + } + + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + new_args.push(minus_one); + + if args.len() == 3 { + // Both llvm.fma.v16f32 and llvm.x86.avx512.vfmadd.ps.512 maps to + // the same GCC intrinsic, but the former has 3 parameters and the + // latter has 4 so it doesn't require this additional argument. + let arg5_type = gcc_func.get_param_type(4); + new_args.push(builder.context.new_rvalue_from_int(arg5_type, 4)); + } + + if let Some(last_arg) = last_arg { + new_args.push(last_arg); + } + + args = new_args.into(); + }, + "__builtin_ia32_addps512_mask" | "__builtin_ia32_addpd512_mask" + | "__builtin_ia32_subps512_mask" | "__builtin_ia32_subpd512_mask" + | "__builtin_ia32_mulps512_mask" | "__builtin_ia32_mulpd512_mask" + | "__builtin_ia32_divps512_mask" | "__builtin_ia32_divpd512_mask" + | "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask" + | "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask" => { + let mut new_args = args.to_vec(); + let last_arg = new_args.pop().expect("last arg"); + let arg3_type = gcc_func.get_param_type(2); + let undefined = builder.current_func().new_local(None, arg3_type, "undefined_for_intrinsic").to_rvalue(); + new_args.push(undefined); + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + new_args.push(minus_one); + new_args.push(last_arg); + args = new_args.into(); + }, + "__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask" => { + let mut new_args = args.to_vec(); + let last_arg = new_args.pop().expect("last arg"); + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + new_args.push(minus_one); + new_args.push(last_arg); + args = new_args.into(); + }, + "__builtin_ia32_vpermi2vard512_mask" | "__builtin_ia32_vpermi2vard256_mask" + | "__builtin_ia32_vpermi2vard128_mask" | "__builtin_ia32_vpermi2varq512_mask" + | "__builtin_ia32_vpermi2varq256_mask" | "__builtin_ia32_vpermi2varq128_mask" + | "__builtin_ia32_vpermi2varps512_mask" | "__builtin_ia32_vpermi2varps256_mask" + | "__builtin_ia32_vpermi2varps128_mask" | "__builtin_ia32_vpermi2varpd512_mask" + | "__builtin_ia32_vpermi2varpd256_mask" | "__builtin_ia32_vpermi2varpd128_mask" | "__builtin_ia32_vpmadd52huq512_mask" + | "__builtin_ia32_vpmadd52luq512_mask" | "__builtin_ia32_vpmadd52huq256_mask" | "__builtin_ia32_vpmadd52luq256_mask" + | "__builtin_ia32_vpmadd52huq128_mask" + => { + let mut new_args = args.to_vec(); + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_cvtdq2ps512_mask" | "__builtin_ia32_cvtudq2ps512_mask" + | "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" => { + let mut new_args = args.to_vec(); + let last_arg = new_args.pop().expect("last arg"); + let arg2_type = gcc_func.get_param_type(1); + let undefined = builder.current_func().new_local(None, arg2_type, "undefined_for_intrinsic").to_rvalue(); + new_args.push(undefined); + let arg3_type = gcc_func.get_param_type(2); + let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1); + new_args.push(minus_one); + new_args.push(last_arg); + args = new_args.into(); + }, + "__builtin_ia32_stmxcsr" => { + args = vec![].into(); + }, + "__builtin_ia32_addcarryx_u64" | "__builtin_ia32_sbb_u64" | "__builtin_ia32_addcarryx_u32" | "__builtin_ia32_sbb_u32" => { + let mut new_args = args.to_vec(); + let arg2_type = gcc_func.get_param_type(1); + let variable = builder.current_func().new_local(None, arg2_type, "addcarryResult"); + new_args.push(variable.get_address(None)); + args = new_args.into(); + }, + "__builtin_ia32_vpermt2varqi512_mask" | "__builtin_ia32_vpermt2varqi256_mask" + | "__builtin_ia32_vpermt2varqi128_mask" | "__builtin_ia32_vpermt2varhi512_mask" + | "__builtin_ia32_vpermt2varhi256_mask" | "__builtin_ia32_vpermt2varhi128_mask" + => { + let new_args = args.to_vec(); + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + args = vec![new_args[1], new_args[0], new_args[2], minus_one].into(); + }, + "__builtin_ia32_xrstor" | "__builtin_ia32_xsavec" => { + let new_args = args.to_vec(); + let thirty_two = builder.context.new_rvalue_from_int(new_args[1].get_type(), 32); + let arg2 = new_args[1] << thirty_two | new_args[2]; + let arg2_type = gcc_func.get_param_type(1); + let arg2 = builder.context.new_cast(None, arg2, arg2_type); + args = vec![new_args[0], arg2].into(); + }, + "__builtin_prefetch" => { + let mut new_args = args.to_vec(); + new_args.pop(); + args = new_args.into(); + }, + _ => (), + } + } + else { + match &*func_name { + "__builtin_ia32_rndscaless_mask_round" | "__builtin_ia32_rndscalesd_mask_round" => { + let new_args = args.to_vec(); + let arg3_type = gcc_func.get_param_type(2); + let arg3 = builder.context.new_cast(None, new_args[4], arg3_type); + let arg4_type = gcc_func.get_param_type(3); + let arg4 = builder.context.new_bitcast(None, new_args[2], arg4_type); + args = vec![new_args[0], new_args[1], arg3, arg4, new_args[3], new_args[5]].into(); + }, + // NOTE: the LLVM intrinsic receives 3 floats, but the GCC builtin requires 3 vectors. + // FIXME: the intrinsics like _mm_mask_fmadd_sd should probably directly call the GCC + // instrinsic to avoid this. + "__builtin_ia32_vfmaddss3_round" => { + let new_args = args.to_vec(); + let arg1_type = gcc_func.get_param_type(0); + let arg2_type = gcc_func.get_param_type(1); + let arg3_type = gcc_func.get_param_type(2); + let a = builder.context.new_rvalue_from_vector(None, arg1_type, &[new_args[0]; 4]); + let b = builder.context.new_rvalue_from_vector(None, arg2_type, &[new_args[1]; 4]); + let c = builder.context.new_rvalue_from_vector(None, arg3_type, &[new_args[2]; 4]); + args = vec![a, b, c, new_args[3]].into(); + }, + "__builtin_ia32_vfmaddsd3_round" => { + let new_args = args.to_vec(); + let arg1_type = gcc_func.get_param_type(0); + let arg2_type = gcc_func.get_param_type(1); + let arg3_type = gcc_func.get_param_type(2); + let a = builder.context.new_rvalue_from_vector(None, arg1_type, &[new_args[0]; 2]); + let b = builder.context.new_rvalue_from_vector(None, arg2_type, &[new_args[1]; 2]); + let c = builder.context.new_rvalue_from_vector(None, arg3_type, &[new_args[2]; 2]); + args = vec![a, b, c, new_args[3]].into(); + }, + "__builtin_ia32_vfmaddsubpd256" | "__builtin_ia32_vfmaddsubps" | "__builtin_ia32_vfmaddsubps256" + | "__builtin_ia32_vfmaddsubpd" => { + if let Some(original_function_name) = original_function_name { + match &**original_function_name { + "llvm.x86.fma.vfmsubadd.pd.256" | "llvm.x86.fma.vfmsubadd.ps" | "llvm.x86.fma.vfmsubadd.ps.256" + | "llvm.x86.fma.vfmsubadd.pd" => { + // NOTE: since both llvm.x86.fma.vfmsubadd.ps and llvm.x86.fma.vfmaddsub.ps maps to + // __builtin_ia32_vfmaddsubps, only add minus if this comes from a + // subadd LLVM intrinsic, e.g. _mm256_fmsubadd_pd. + let mut new_args = args.to_vec(); + let arg3 = &mut new_args[2]; + *arg3 = builder.context.new_unary_op(None, UnaryOp::Minus, arg3.get_type(), *arg3); + args = new_args.into(); + }, + _ => (), + } + } + }, + "__builtin_ia32_ldmxcsr" => { + // The builtin __builtin_ia32_ldmxcsr takes an integer value while llvm.x86.sse.ldmxcsr takes a pointer, + // so dereference the pointer. + let mut new_args = args.to_vec(); + let uint_ptr_type = builder.uint_type.make_pointer(); + let arg1 = builder.context.new_cast(None, args[0], uint_ptr_type); + new_args[0] = arg1.dereference(None).to_rvalue(); + args = new_args.into(); + }, + "__builtin_ia32_rcp14sd_mask" | "__builtin_ia32_rcp14ss_mask" | "__builtin_ia32_rsqrt14sd_mask" + | "__builtin_ia32_rsqrt14ss_mask" => { + let new_args = args.to_vec(); + args = vec![new_args[1], new_args[0], new_args[2], new_args[3]].into(); + }, + "__builtin_ia32_sqrtsd_mask_round" | "__builtin_ia32_sqrtss_mask_round" => { + let new_args = args.to_vec(); + args = vec![new_args[1], new_args[0], new_args[2], new_args[3], new_args[4]].into(); + }, + _ => (), + } + } + + args +} + +pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc, 'tcx>, mut return_value: RValue<'gcc>, func_name: &str, args: &[RValue<'gcc>], args_adjusted: bool, orig_args: &[RValue<'gcc>]) -> RValue<'gcc> { + match func_name { + "__builtin_ia32_vfmaddss3_round" | "__builtin_ia32_vfmaddsd3_round" => { + #[cfg(feature="master")] + { + let zero = builder.context.new_rvalue_zero(builder.int_type); + return_value = builder.context.new_vector_access(None, return_value, zero).to_rvalue(); + } + }, + "__builtin_ia32_addcarryx_u64" | "__builtin_ia32_sbb_u64" | "__builtin_ia32_addcarryx_u32" | "__builtin_ia32_sbb_u32" => { + // Both llvm.x86.addcarry.32 and llvm.x86.addcarryx.u32 points to the same GCC builtin, + // but only the former requires adjusting the return value. + // Those 2 LLVM intrinsics differ by their argument count, that's why we check if the + // arguments were adjusted. + if args_adjusted { + let last_arg = args.last().expect("last arg"); + let field1 = builder.context.new_field(None, builder.u8_type, "carryFlag"); + let field2 = builder.context.new_field(None, args[1].get_type(), "carryResult"); + let struct_type = builder.context.new_struct_type(None, "addcarryResult", &[field1, field2]); + return_value = builder.context.new_struct_constructor(None, struct_type.as_type(), None, &[return_value, last_arg.dereference(None).to_rvalue()]); + } + }, + "__builtin_ia32_stmxcsr" => { + // The builtin __builtin_ia32_stmxcsr returns a value while llvm.x86.sse.stmxcsr writes + // the result in its pointer argument. + // We removed the argument since __builtin_ia32_stmxcsr takes no arguments, so we need + // to get back the original argument to get the pointer we need to write the result to. + let uint_ptr_type = builder.uint_type.make_pointer(); + let ptr = builder.context.new_cast(None, orig_args[0], uint_ptr_type); + builder.llbb().add_assignment(None, ptr.dereference(None), return_value); + // The return value was assigned to the result pointer above. In order to not call the + // builtin twice, we overwrite the return value with a dummy value. + return_value = builder.context.new_rvalue_zero(builder.int_type); + }, + _ => (), + } + + return_value +} + +pub fn ignore_arg_cast(func_name: &str, index: usize, args_len: usize) -> bool { + // FIXME(antoyo): find a way to refactor in order to avoid this hack. + match func_name { + // NOTE: these intrinsics have missing parameters before the last one, so ignore the + // last argument type check. + "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask" + | "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask" | "__builtin_ia32_sqrtps512_mask" + | "__builtin_ia32_sqrtpd512_mask" | "__builtin_ia32_addps512_mask" | "__builtin_ia32_addpd512_mask" + | "__builtin_ia32_subps512_mask" | "__builtin_ia32_subpd512_mask" + | "__builtin_ia32_mulps512_mask" | "__builtin_ia32_mulpd512_mask" + | "__builtin_ia32_divps512_mask" | "__builtin_ia32_divpd512_mask" + | "__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask" + | "__builtin_ia32_cvtdq2ps512_mask" | "__builtin_ia32_cvtudq2ps512_mask" => { + if index == args_len - 1 { + return true; + } + }, + "__builtin_ia32_rndscaless_mask_round" | "__builtin_ia32_rndscalesd_mask_round" => { + if index == 2 || index == 3 { + return true; + } + }, + "__builtin_ia32_vfmaddps512_mask" | "__builtin_ia32_vfmaddpd512_mask" => { + // Since there are two LLVM intrinsics that map to each of these GCC builtins and only + // one of them has a missing parameter before the last one, we check the number of + // arguments to distinguish those cases. + if args_len == 4 && index == args_len - 1 { + return true; + } + }, + // NOTE: the LLVM intrinsic receives 3 floats, but the GCC builtin requires 3 vectors. + "__builtin_ia32_vfmaddss3_round" | "__builtin_ia32_vfmaddsd3_round" => return true, + "__builtin_ia32_vplzcntd_512_mask" | "__builtin_ia32_vplzcntd_256_mask" | "__builtin_ia32_vplzcntd_128_mask" + | "__builtin_ia32_vplzcntq_512_mask" | "__builtin_ia32_vplzcntq_256_mask" | "__builtin_ia32_vplzcntq_128_mask" => { + if index == args_len - 1 { + return true; + } + }, + _ => (), + } + + false +} + +#[cfg(not(feature="master"))] +pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> { + match name { + "llvm.x86.xgetbv" | "llvm.x86.sse2.pause" => { + let gcc_name = "__builtin_trap"; + let func = cx.context.get_builtin_function(gcc_name); + cx.functions.borrow_mut().insert(gcc_name.to_string(), func); + return func; + }, + _ => unimplemented!("unsupported LLVM intrinsic {}", name), + } +} + +#[cfg(feature="master")] +pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> { + match name { + "llvm.prefetch" => { + let gcc_name = "__builtin_prefetch"; + let func = cx.context.get_builtin_function(gcc_name); + cx.functions.borrow_mut().insert(gcc_name.to_string(), func); + return func + }, + _ => (), + } + + let gcc_name = match name { + "llvm.x86.xgetbv" => "__builtin_ia32_xgetbv", + // NOTE: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html + "llvm.sqrt.v2f64" => "__builtin_ia32_sqrtpd", + "llvm.x86.avx512.pmul.dq.512" => "__builtin_ia32_pmuldq512_mask", + "llvm.x86.avx512.pmulu.dq.512" => "__builtin_ia32_pmuludq512_mask", + "llvm.x86.avx512.max.ps.512" => "__builtin_ia32_maxps512_mask", + "llvm.x86.avx512.max.pd.512" => "__builtin_ia32_maxpd512_mask", + "llvm.x86.avx512.min.ps.512" => "__builtin_ia32_minps512_mask", + "llvm.x86.avx512.min.pd.512" => "__builtin_ia32_minpd512_mask", + "llvm.fma.v16f32" => "__builtin_ia32_vfmaddps512_mask", + "llvm.fma.v8f64" => "__builtin_ia32_vfmaddpd512_mask", + "llvm.x86.avx512.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_mask", + "llvm.x86.avx512.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_mask", + "llvm.x86.avx512.pternlog.d.512" => "__builtin_ia32_pternlogd512_mask", + "llvm.x86.avx512.pternlog.d.256" => "__builtin_ia32_pternlogd256_mask", + "llvm.x86.avx512.pternlog.d.128" => "__builtin_ia32_pternlogd128_mask", + "llvm.x86.avx512.pternlog.q.512" => "__builtin_ia32_pternlogq512_mask", + "llvm.x86.avx512.pternlog.q.256" => "__builtin_ia32_pternlogq256_mask", + "llvm.x86.avx512.pternlog.q.128" => "__builtin_ia32_pternlogq128_mask", + "llvm.x86.avx512.add.ps.512" => "__builtin_ia32_addps512_mask", + "llvm.x86.avx512.add.pd.512" => "__builtin_ia32_addpd512_mask", + "llvm.x86.avx512.sub.ps.512" => "__builtin_ia32_subps512_mask", + "llvm.x86.avx512.sub.pd.512" => "__builtin_ia32_subpd512_mask", + "llvm.x86.avx512.mul.ps.512" => "__builtin_ia32_mulps512_mask", + "llvm.x86.avx512.mul.pd.512" => "__builtin_ia32_mulpd512_mask", + "llvm.x86.avx512.div.ps.512" => "__builtin_ia32_divps512_mask", + "llvm.x86.avx512.div.pd.512" => "__builtin_ia32_divpd512_mask", + "llvm.x86.avx512.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_mask", + "llvm.x86.avx512.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_mask", + "llvm.x86.avx512.sitofp.round.v16f32.v16i32" => "__builtin_ia32_cvtdq2ps512_mask", + "llvm.x86.avx512.uitofp.round.v16f32.v16i32" => "__builtin_ia32_cvtudq2ps512_mask", + "llvm.x86.avx512.mask.ucmp.d.512" => "__builtin_ia32_ucmpd512_mask", + "llvm.x86.avx512.mask.ucmp.d.256" => "__builtin_ia32_ucmpd256_mask", + "llvm.x86.avx512.mask.ucmp.d.128" => "__builtin_ia32_ucmpd128_mask", + "llvm.x86.avx512.mask.cmp.d.512" => "__builtin_ia32_cmpd512_mask", + "llvm.x86.avx512.mask.cmp.d.256" => "__builtin_ia32_cmpd256_mask", + "llvm.x86.avx512.mask.cmp.d.128" => "__builtin_ia32_cmpd128_mask", + "llvm.x86.avx512.mask.ucmp.q.512" => "__builtin_ia32_ucmpq512_mask", + "llvm.x86.avx512.mask.ucmp.q.256" => "__builtin_ia32_ucmpq256_mask", + "llvm.x86.avx512.mask.ucmp.q.128" => "__builtin_ia32_ucmpq128_mask", + "llvm.x86.avx512.mask.cmp.q.512" => "__builtin_ia32_cmpq512_mask", + "llvm.x86.avx512.mask.cmp.q.256" => "__builtin_ia32_cmpq256_mask", + "llvm.x86.avx512.mask.cmp.q.128" => "__builtin_ia32_cmpq128_mask", + "llvm.x86.avx512.mask.max.ss.round" => "__builtin_ia32_maxss_mask_round", + "llvm.x86.avx512.mask.max.sd.round" => "__builtin_ia32_maxsd_mask_round", + "llvm.x86.avx512.mask.min.ss.round" => "__builtin_ia32_minss_mask_round", + "llvm.x86.avx512.mask.min.sd.round" => "__builtin_ia32_minsd_mask_round", + "llvm.x86.avx512.mask.sqrt.ss" => "__builtin_ia32_sqrtss_mask_round", + "llvm.x86.avx512.mask.sqrt.sd" => "__builtin_ia32_sqrtsd_mask_round", + "llvm.x86.avx512.mask.getexp.ss" => "__builtin_ia32_getexpss_mask_round", + "llvm.x86.avx512.mask.getexp.sd" => "__builtin_ia32_getexpsd_mask_round", + "llvm.x86.avx512.mask.getmant.ss" => "__builtin_ia32_getmantss_mask_round", + "llvm.x86.avx512.mask.getmant.sd" => "__builtin_ia32_getmantsd_mask_round", + "llvm.x86.avx512.mask.rndscale.ss" => "__builtin_ia32_rndscaless_mask_round", + "llvm.x86.avx512.mask.rndscale.sd" => "__builtin_ia32_rndscalesd_mask_round", + "llvm.x86.avx512.mask.scalef.ss" => "__builtin_ia32_scalefss_mask_round", + "llvm.x86.avx512.mask.scalef.sd" => "__builtin_ia32_scalefsd_mask_round", + "llvm.x86.avx512.vfmadd.f32" => "__builtin_ia32_vfmaddss3_round", + "llvm.x86.avx512.vfmadd.f64" => "__builtin_ia32_vfmaddsd3_round", + "llvm.ceil.v4f64" => "__builtin_ia32_ceilpd256", + "llvm.ceil.v8f32" => "__builtin_ia32_ceilps256", + "llvm.floor.v4f64" => "__builtin_ia32_floorpd256", + "llvm.floor.v8f32" => "__builtin_ia32_floorps256", + "llvm.sqrt.v4f64" => "__builtin_ia32_sqrtpd256", + "llvm.x86.sse.stmxcsr" => "__builtin_ia32_stmxcsr", + "llvm.x86.sse.ldmxcsr" => "__builtin_ia32_ldmxcsr", + "llvm.ctpop.v16i32" => "__builtin_ia32_vpopcountd_v16si", + "llvm.ctpop.v8i32" => "__builtin_ia32_vpopcountd_v8si", + "llvm.ctpop.v4i32" => "__builtin_ia32_vpopcountd_v4si", + "llvm.ctpop.v8i64" => "__builtin_ia32_vpopcountq_v8di", + "llvm.ctpop.v4i64" => "__builtin_ia32_vpopcountq_v4di", + "llvm.ctpop.v2i64" => "__builtin_ia32_vpopcountq_v2di", + "llvm.x86.addcarry.64" => "__builtin_ia32_addcarryx_u64", + "llvm.x86.subborrow.64" => "__builtin_ia32_sbb_u64", + "llvm.floor.v2f64" => "__builtin_ia32_floorpd", + "llvm.floor.v4f32" => "__builtin_ia32_floorps", + "llvm.ceil.v2f64" => "__builtin_ia32_ceilpd", + "llvm.ceil.v4f32" => "__builtin_ia32_ceilps", + "llvm.fma.v2f64" => "__builtin_ia32_vfmaddpd", + "llvm.fma.v4f64" => "__builtin_ia32_vfmaddpd256", + "llvm.fma.v4f32" => "__builtin_ia32_vfmaddps", + "llvm.fma.v8f32" => "__builtin_ia32_vfmaddps256", + "llvm.ctlz.v16i32" => "__builtin_ia32_vplzcntd_512_mask", + "llvm.ctlz.v8i32" => "__builtin_ia32_vplzcntd_256_mask", + "llvm.ctlz.v4i32" => "__builtin_ia32_vplzcntd_128_mask", + "llvm.ctlz.v8i64" => "__builtin_ia32_vplzcntq_512_mask", + "llvm.ctlz.v4i64" => "__builtin_ia32_vplzcntq_256_mask", + "llvm.ctlz.v2i64" => "__builtin_ia32_vplzcntq_128_mask", + "llvm.ctpop.v32i16" => "__builtin_ia32_vpopcountw_v32hi", + "llvm.x86.fma.vfmsub.sd" => "__builtin_ia32_vfmsubsd3", + "llvm.x86.fma.vfmsub.ss" => "__builtin_ia32_vfmsubss3", + "llvm.x86.fma.vfmsubadd.pd" => "__builtin_ia32_vfmaddsubpd", + "llvm.x86.fma.vfmsubadd.pd.256" => "__builtin_ia32_vfmaddsubpd256", + "llvm.x86.fma.vfmsubadd.ps" => "__builtin_ia32_vfmaddsubps", + "llvm.x86.fma.vfmsubadd.ps.256" => "__builtin_ia32_vfmaddsubps256", + "llvm.x86.fma.vfnmadd.sd" => "__builtin_ia32_vfnmaddsd3", + "llvm.x86.fma.vfnmadd.ss" => "__builtin_ia32_vfnmaddss3", + "llvm.x86.fma.vfnmsub.sd" => "__builtin_ia32_vfnmsubsd3", + "llvm.x86.fma.vfnmsub.ss" => "__builtin_ia32_vfnmsubss3", + "llvm.x86.avx512.conflict.d.512" => "__builtin_ia32_vpconflictsi_512_mask", + "llvm.x86.avx512.conflict.d.256" => "__builtin_ia32_vpconflictsi_256_mask", + "llvm.x86.avx512.conflict.d.128" => "__builtin_ia32_vpconflictsi_128_mask", + "llvm.x86.avx512.conflict.q.512" => "__builtin_ia32_vpconflictdi_512_mask", + "llvm.x86.avx512.conflict.q.256" => "__builtin_ia32_vpconflictdi_256_mask", + "llvm.x86.avx512.conflict.q.128" => "__builtin_ia32_vpconflictdi_128_mask", + "llvm.x86.avx512.vpermi2var.qi.512" => "__builtin_ia32_vpermt2varqi512_mask", + "llvm.x86.avx512.vpermi2var.qi.256" => "__builtin_ia32_vpermt2varqi256_mask", + "llvm.x86.avx512.vpermi2var.qi.128" => "__builtin_ia32_vpermt2varqi128_mask", + "llvm.x86.avx512.permvar.qi.512" => "__builtin_ia32_permvarqi512_mask", + "llvm.x86.avx512.permvar.qi.256" => "__builtin_ia32_permvarqi256_mask", + "llvm.x86.avx512.permvar.qi.128" => "__builtin_ia32_permvarqi128_mask", + "llvm.x86.avx512.pmultishift.qb.512" => "__builtin_ia32_vpmultishiftqb512_mask", + "llvm.x86.avx512.pmultishift.qb.256" => "__builtin_ia32_vpmultishiftqb256_mask", + "llvm.x86.avx512.pmultishift.qb.128" => "__builtin_ia32_vpmultishiftqb128_mask", + "llvm.ctpop.v16i16" => "__builtin_ia32_vpopcountw_v16hi", + "llvm.ctpop.v8i16" => "__builtin_ia32_vpopcountw_v8hi", + "llvm.ctpop.v64i8" => "__builtin_ia32_vpopcountb_v64qi", + "llvm.ctpop.v32i8" => "__builtin_ia32_vpopcountb_v32qi", + "llvm.ctpop.v16i8" => "__builtin_ia32_vpopcountb_v16qi", + "llvm.x86.avx512.mask.vpshufbitqmb.512" => "__builtin_ia32_vpshufbitqmb512_mask", + "llvm.x86.avx512.mask.vpshufbitqmb.256" => "__builtin_ia32_vpshufbitqmb256_mask", + "llvm.x86.avx512.mask.vpshufbitqmb.128" => "__builtin_ia32_vpshufbitqmb128_mask", + "llvm.x86.avx512.mask.ucmp.w.512" => "__builtin_ia32_ucmpw512_mask", + "llvm.x86.avx512.mask.ucmp.w.256" => "__builtin_ia32_ucmpw256_mask", + "llvm.x86.avx512.mask.ucmp.w.128" => "__builtin_ia32_ucmpw128_mask", + "llvm.x86.avx512.mask.ucmp.b.512" => "__builtin_ia32_ucmpb512_mask", + "llvm.x86.avx512.mask.ucmp.b.256" => "__builtin_ia32_ucmpb256_mask", + "llvm.x86.avx512.mask.ucmp.b.128" => "__builtin_ia32_ucmpb128_mask", + "llvm.x86.avx512.mask.cmp.w.512" => "__builtin_ia32_cmpw512_mask", + "llvm.x86.avx512.mask.cmp.w.256" => "__builtin_ia32_cmpw256_mask", + "llvm.x86.avx512.mask.cmp.w.128" => "__builtin_ia32_cmpw128_mask", + "llvm.x86.avx512.mask.cmp.b.512" => "__builtin_ia32_cmpb512_mask", + "llvm.x86.avx512.mask.cmp.b.256" => "__builtin_ia32_cmpb256_mask", + "llvm.x86.avx512.mask.cmp.b.128" => "__builtin_ia32_cmpb128_mask", + "llvm.x86.xrstor" => "__builtin_ia32_xrstor", + "llvm.x86.xsavec" => "__builtin_ia32_xsavec", + "llvm.x86.addcarry.32" => "__builtin_ia32_addcarryx_u32", + "llvm.x86.subborrow.32" => "__builtin_ia32_sbb_u32", + "llvm.x86.avx512.mask.compress.store.w.512" => "__builtin_ia32_compressstoreuhi512_mask", + "llvm.x86.avx512.mask.compress.store.w.256" => "__builtin_ia32_compressstoreuhi256_mask", + "llvm.x86.avx512.mask.compress.store.w.128" => "__builtin_ia32_compressstoreuhi128_mask", + "llvm.x86.avx512.mask.compress.store.b.512" => "__builtin_ia32_compressstoreuqi512_mask", + "llvm.x86.avx512.mask.compress.store.b.256" => "__builtin_ia32_compressstoreuqi256_mask", + "llvm.x86.avx512.mask.compress.store.b.128" => "__builtin_ia32_compressstoreuqi128_mask", + "llvm.x86.avx512.mask.compress.w.512" => "__builtin_ia32_compresshi512_mask", + "llvm.x86.avx512.mask.compress.w.256" => "__builtin_ia32_compresshi256_mask", + "llvm.x86.avx512.mask.compress.w.128" => "__builtin_ia32_compresshi128_mask", + "llvm.x86.avx512.mask.compress.b.512" => "__builtin_ia32_compressqi512_mask", + "llvm.x86.avx512.mask.compress.b.256" => "__builtin_ia32_compressqi256_mask", + "llvm.x86.avx512.mask.compress.b.128" => "__builtin_ia32_compressqi128_mask", + "llvm.x86.avx512.mask.expand.w.512" => "__builtin_ia32_expandhi512_mask", + "llvm.x86.avx512.mask.expand.w.256" => "__builtin_ia32_expandhi256_mask", + "llvm.x86.avx512.mask.expand.w.128" => "__builtin_ia32_expandhi128_mask", + "llvm.x86.avx512.mask.expand.b.512" => "__builtin_ia32_expandqi512_mask", + "llvm.x86.avx512.mask.expand.b.256" => "__builtin_ia32_expandqi256_mask", + "llvm.x86.avx512.mask.expand.b.128" => "__builtin_ia32_expandqi128_mask", + "llvm.fshl.v8i64" => "__builtin_ia32_vpshldv_v8di", + "llvm.fshl.v4i64" => "__builtin_ia32_vpshldv_v4di", + "llvm.fshl.v2i64" => "__builtin_ia32_vpshldv_v2di", + "llvm.fshl.v16i32" => "__builtin_ia32_vpshldv_v16si", + "llvm.fshl.v8i32" => "__builtin_ia32_vpshldv_v8si", + "llvm.fshl.v4i32" => "__builtin_ia32_vpshldv_v4si", + "llvm.fshl.v32i16" => "__builtin_ia32_vpshldv_v32hi", + "llvm.fshl.v16i16" => "__builtin_ia32_vpshldv_v16hi", + "llvm.fshl.v8i16" => "__builtin_ia32_vpshldv_v8hi", + "llvm.fshr.v8i64" => "__builtin_ia32_vpshrdv_v8di", + "llvm.fshr.v4i64" => "__builtin_ia32_vpshrdv_v4di", + "llvm.fshr.v2i64" => "__builtin_ia32_vpshrdv_v2di", + "llvm.fshr.v16i32" => "__builtin_ia32_vpshrdv_v16si", + "llvm.fshr.v8i32" => "__builtin_ia32_vpshrdv_v8si", + "llvm.fshr.v4i32" => "__builtin_ia32_vpshrdv_v4si", + "llvm.fshr.v32i16" => "__builtin_ia32_vpshrdv_v32hi", + "llvm.fshr.v16i16" => "__builtin_ia32_vpshrdv_v16hi", + "llvm.fshr.v8i16" => "__builtin_ia32_vpshrdv_v8hi", + "llvm.x86.fma.vfmadd.sd" => "__builtin_ia32_vfmaddsd3", + "llvm.x86.fma.vfmadd.ss" => "__builtin_ia32_vfmaddss3", + + // The above doc points to unknown builtins for the following, so override them: + "llvm.x86.avx2.gather.d.d" => "__builtin_ia32_gathersiv4si", + "llvm.x86.avx2.gather.d.d.256" => "__builtin_ia32_gathersiv8si", + "llvm.x86.avx2.gather.d.ps" => "__builtin_ia32_gathersiv4sf", + "llvm.x86.avx2.gather.d.ps.256" => "__builtin_ia32_gathersiv8sf", + "llvm.x86.avx2.gather.d.q" => "__builtin_ia32_gathersiv2di", + "llvm.x86.avx2.gather.d.q.256" => "__builtin_ia32_gathersiv4di", + "llvm.x86.avx2.gather.d.pd" => "__builtin_ia32_gathersiv2df", + "llvm.x86.avx2.gather.d.pd.256" => "__builtin_ia32_gathersiv4df", + "llvm.x86.avx2.gather.q.d" => "__builtin_ia32_gatherdiv4si", + "llvm.x86.avx2.gather.q.d.256" => "__builtin_ia32_gatherdiv4si256", + "llvm.x86.avx2.gather.q.ps" => "__builtin_ia32_gatherdiv4sf", + "llvm.x86.avx2.gather.q.ps.256" => "__builtin_ia32_gatherdiv4sf256", + "llvm.x86.avx2.gather.q.q" => "__builtin_ia32_gatherdiv2di", + "llvm.x86.avx2.gather.q.q.256" => "__builtin_ia32_gatherdiv4di", + "llvm.x86.avx2.gather.q.pd" => "__builtin_ia32_gatherdiv2df", + "llvm.x86.avx2.gather.q.pd.256" => "__builtin_ia32_gatherdiv4df", + "llvm.x86.avx512.pslli.d.512" => "__builtin_ia32_pslldi512_mask", + "llvm.x86.avx512.psrli.d.512" => "__builtin_ia32_psrldi512_mask", + "llvm.x86.avx512.pslli.q.512" => "__builtin_ia32_psllqi512_mask", + "llvm.x86.avx512.psrli.q.512" => "__builtin_ia32_psrlqi512_mask", + "llvm.x86.avx512.psll.d.512" => "__builtin_ia32_pslld512_mask", + "llvm.x86.avx512.psrl.d.512" => "__builtin_ia32_psrld512_mask", + "llvm.x86.avx512.psll.q.512" => "__builtin_ia32_psllq512_mask", + "llvm.x86.avx512.psrl.q.512" => "__builtin_ia32_psrlq512_mask", + "llvm.x86.avx512.psra.d.512" => "__builtin_ia32_psrad512_mask", + "llvm.x86.avx512.psra.q.512" => "__builtin_ia32_psraq512_mask", + "llvm.x86.avx512.psra.q.256" => "__builtin_ia32_psraq256_mask", + "llvm.x86.avx512.psra.q.128" => "__builtin_ia32_psraq128_mask", + "llvm.x86.avx512.psrai.d.512" => "__builtin_ia32_psradi512_mask", + "llvm.x86.avx512.psrai.q.512" => "__builtin_ia32_psraqi512_mask", + "llvm.x86.avx512.psrai.q.256" => "__builtin_ia32_psraqi256_mask", + "llvm.x86.avx512.psrai.q.128" => "__builtin_ia32_psraqi128_mask", + "llvm.x86.avx512.psrav.d.512" => "__builtin_ia32_psrav16si_mask", + "llvm.x86.avx512.psrav.q.512" => "__builtin_ia32_psrav8di_mask", + "llvm.x86.avx512.psrav.q.256" => "__builtin_ia32_psravq256_mask", + "llvm.x86.avx512.psrav.q.128" => "__builtin_ia32_psravq128_mask", + "llvm.x86.avx512.psllv.d.512" => "__builtin_ia32_psllv16si_mask", + "llvm.x86.avx512.psrlv.d.512" => "__builtin_ia32_psrlv16si_mask", + "llvm.x86.avx512.psllv.q.512" => "__builtin_ia32_psllv8di_mask", + "llvm.x86.avx512.psrlv.q.512" => "__builtin_ia32_psrlv8di_mask", + "llvm.x86.avx512.permvar.si.512" => "__builtin_ia32_permvarsi512_mask", + "llvm.x86.avx512.vpermilvar.ps.512" => "__builtin_ia32_vpermilvarps512_mask", + "llvm.x86.avx512.vpermilvar.pd.512" => "__builtin_ia32_vpermilvarpd512_mask", + "llvm.x86.avx512.permvar.di.512" => "__builtin_ia32_permvardi512_mask", + "llvm.x86.avx512.permvar.di.256" => "__builtin_ia32_permvardi256_mask", + "llvm.x86.avx512.permvar.sf.512" => "__builtin_ia32_permvarsf512_mask", + "llvm.x86.avx512.permvar.df.512" => "__builtin_ia32_permvardf512_mask", + "llvm.x86.avx512.permvar.df.256" => "__builtin_ia32_permvardf256_mask", + "llvm.x86.avx512.vpermi2var.d.512" => "__builtin_ia32_vpermi2vard512_mask", + "llvm.x86.avx512.vpermi2var.d.256" => "__builtin_ia32_vpermi2vard256_mask", + "llvm.x86.avx512.vpermi2var.d.128" => "__builtin_ia32_vpermi2vard128_mask", + "llvm.x86.avx512.vpermi2var.q.512" => "__builtin_ia32_vpermi2varq512_mask", + "llvm.x86.avx512.vpermi2var.q.256" => "__builtin_ia32_vpermi2varq256_mask", + "llvm.x86.avx512.vpermi2var.q.128" => "__builtin_ia32_vpermi2varq128_mask", + "llvm.x86.avx512.vpermi2var.ps.512" => "__builtin_ia32_vpermi2varps512_mask", + "llvm.x86.avx512.vpermi2var.ps.256" => "__builtin_ia32_vpermi2varps256_mask", + "llvm.x86.avx512.vpermi2var.ps.128" => "__builtin_ia32_vpermi2varps128_mask", + "llvm.x86.avx512.vpermi2var.pd.512" => "__builtin_ia32_vpermi2varpd512_mask", + "llvm.x86.avx512.vpermi2var.pd.256" => "__builtin_ia32_vpermi2varpd256_mask", + "llvm.x86.avx512.vpermi2var.pd.128" => "__builtin_ia32_vpermi2varpd128_mask", + "llvm.x86.avx512.mask.add.ss.round" => "__builtin_ia32_addss_mask_round", + "llvm.x86.avx512.mask.add.sd.round" => "__builtin_ia32_addsd_mask_round", + "llvm.x86.avx512.mask.sub.ss.round" => "__builtin_ia32_subss_mask_round", + "llvm.x86.avx512.mask.sub.sd.round" => "__builtin_ia32_subsd_mask_round", + "llvm.x86.avx512.mask.mul.ss.round" => "__builtin_ia32_mulss_mask_round", + "llvm.x86.avx512.mask.mul.sd.round" => "__builtin_ia32_mulsd_mask_round", + "llvm.x86.avx512.mask.div.ss.round" => "__builtin_ia32_divss_mask_round", + "llvm.x86.avx512.mask.div.sd.round" => "__builtin_ia32_divsd_mask_round", + "llvm.x86.avx512.mask.cvtss2sd.round" => "__builtin_ia32_cvtss2sd_mask_round", + "llvm.x86.avx512.mask.cvtsd2ss.round" => "__builtin_ia32_cvtsd2ss_mask_round", + "llvm.x86.avx512.mask.range.ss" => "__builtin_ia32_rangess128_mask_round", + "llvm.x86.avx512.mask.range.sd" => "__builtin_ia32_rangesd128_mask_round", + "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_mask_round", + "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_mask_round", + "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_mask_round", + "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_mask_round", + "llvm.x86.avx512fp16.mask.add.sh.round" => "__builtin_ia32_addsh_mask_round", + "llvm.x86.avx512fp16.mask.div.sh.round" => "__builtin_ia32_divsh_mask_round", + "llvm.x86.avx512fp16.mask.getmant.sh" => "__builtin_ia32_getmantsh_mask_round", + "llvm.x86.avx512fp16.mask.max.sh.round" => "__builtin_ia32_maxsh_mask_round", + "llvm.x86.avx512fp16.mask.min.sh.round" => "__builtin_ia32_minsh_mask_round", + "llvm.x86.avx512fp16.mask.mul.sh.round" => "__builtin_ia32_mulsh_mask_round", + "llvm.x86.avx512fp16.mask.rndscale.sh" => "__builtin_ia32_rndscalesh_mask_round", + "llvm.x86.avx512fp16.mask.scalef.sh" => "__builtin_ia32_scalefsh_mask_round", + "llvm.x86.avx512fp16.mask.sub.sh.round" => "__builtin_ia32_subsh_mask_round", + "llvm.x86.avx512fp16.mask.vcvtsd2sh.round" => "__builtin_ia32_vcvtsd2sh_mask_round", + "llvm.x86.avx512fp16.mask.vcvtsh2sd.round" => "__builtin_ia32_vcvtsh2sd_mask_round", + "llvm.x86.avx512fp16.mask.vcvtsh2ss.round" => "__builtin_ia32_vcvtsh2ss_mask_round", + "llvm.x86.avx512fp16.mask.vcvtss2sh.round" => "__builtin_ia32_vcvtss2sh_mask_round", + "llvm.x86.aesni.aesenc.256" => "__builtin_ia32_vaesenc_v32qi", + "llvm.x86.aesni.aesenclast.256" => "__builtin_ia32_vaesenclast_v32qi", + "llvm.x86.aesni.aesdec.256" => "__builtin_ia32_vaesdec_v32qi", + "llvm.x86.aesni.aesdeclast.256" => "__builtin_ia32_vaesdeclast_v32qi", + "llvm.x86.aesni.aesenc.512" => "__builtin_ia32_vaesenc_v64qi", + "llvm.x86.aesni.aesenclast.512" => "__builtin_ia32_vaesenclast_v64qi", + "llvm.x86.aesni.aesdec.512" => "__builtin_ia32_vaesdec_v64qi", + "llvm.x86.aesni.aesdeclast.512" => "__builtin_ia32_vaesdeclast_v64qi", + "llvm.x86.avx512bf16.cvtne2ps2bf16.128" => "__builtin_ia32_cvtne2ps2bf16_v8bf", + "llvm.x86.avx512bf16.cvtne2ps2bf16.256" => "__builtin_ia32_cvtne2ps2bf16_v16bf", + "llvm.x86.avx512bf16.cvtne2ps2bf16.512" => "__builtin_ia32_cvtne2ps2bf16_v32bf", + "llvm.x86.avx512bf16.cvtneps2bf16.256" => "__builtin_ia32_cvtneps2bf16_v8sf", + "llvm.x86.avx512bf16.cvtneps2bf16.512" => "__builtin_ia32_cvtneps2bf16_v16sf", + "llvm.x86.avx512bf16.dpbf16ps.128" => "__builtin_ia32_dpbf16ps_v4sf", + "llvm.x86.avx512bf16.dpbf16ps.256" => "__builtin_ia32_dpbf16ps_v8sf", + "llvm.x86.avx512bf16.dpbf16ps.512" => "__builtin_ia32_dpbf16ps_v16sf", + "llvm.x86.pclmulqdq.512" => "__builtin_ia32_vpclmulqdq_v8di", + "llvm.x86.pclmulqdq.256" => "__builtin_ia32_vpclmulqdq_v4di", + "llvm.x86.avx512.pmulhu.w.512" => "__builtin_ia32_pmulhuw512_mask", + "llvm.x86.avx512.pmulh.w.512" => "__builtin_ia32_pmulhw512_mask", + "llvm.x86.avx512.pmul.hr.sw.512" => "__builtin_ia32_pmulhrsw512_mask", + "llvm.x86.avx512.pmaddw.d.512" => "__builtin_ia32_pmaddwd512_mask", + "llvm.x86.avx512.pmaddubs.w.512" => "__builtin_ia32_pmaddubsw512_mask", + "llvm.x86.avx512.packssdw.512" => "__builtin_ia32_packssdw512_mask", + "llvm.x86.avx512.packsswb.512" => "__builtin_ia32_packsswb512_mask", + "llvm.x86.avx512.packusdw.512" => "__builtin_ia32_packusdw512_mask", + "llvm.x86.avx512.packuswb.512" => "__builtin_ia32_packuswb512_mask", + "llvm.x86.avx512.pavg.w.512" => "__builtin_ia32_pavgw512_mask", + "llvm.x86.avx512.pavg.b.512" => "__builtin_ia32_pavgb512_mask", + "llvm.x86.avx512.psll.w.512" => "__builtin_ia32_psllw512_mask", + "llvm.x86.avx512.pslli.w.512" => "__builtin_ia32_psllwi512_mask", + "llvm.x86.avx512.psllv.w.512" => "__builtin_ia32_psllv32hi_mask", + "llvm.x86.avx512.psllv.w.256" => "__builtin_ia32_psllv16hi_mask", + "llvm.x86.avx512.psllv.w.128" => "__builtin_ia32_psllv8hi_mask", + "llvm.x86.avx512.psrl.w.512" => "__builtin_ia32_psrlw512_mask", + "llvm.x86.avx512.psrli.w.512" => "__builtin_ia32_psrlwi512_mask", + "llvm.x86.avx512.psrlv.w.512" => "__builtin_ia32_psrlv32hi_mask", + "llvm.x86.avx512.psrlv.w.256" => "__builtin_ia32_psrlv16hi_mask", + "llvm.x86.avx512.psrlv.w.128" => "__builtin_ia32_psrlv8hi_mask", + "llvm.x86.avx512.psra.w.512" => "__builtin_ia32_psraw512_mask", + "llvm.x86.avx512.psrai.w.512" => "__builtin_ia32_psrawi512_mask", + "llvm.x86.avx512.psrav.w.512" => "__builtin_ia32_psrav32hi_mask", + "llvm.x86.avx512.psrav.w.256" => "__builtin_ia32_psrav16hi_mask", + "llvm.x86.avx512.psrav.w.128" => "__builtin_ia32_psrav8hi_mask", + "llvm.x86.avx512.vpermi2var.hi.512" => "__builtin_ia32_vpermt2varhi512_mask", + "llvm.x86.avx512.vpermi2var.hi.256" => "__builtin_ia32_vpermt2varhi256_mask", + "llvm.x86.avx512.vpermi2var.hi.128" => "__builtin_ia32_vpermt2varhi128_mask", + "llvm.x86.avx512.permvar.hi.512" => "__builtin_ia32_permvarhi512_mask", + "llvm.x86.avx512.permvar.hi.256" => "__builtin_ia32_permvarhi256_mask", + "llvm.x86.avx512.permvar.hi.128" => "__builtin_ia32_permvarhi128_mask", + "llvm.x86.avx512.pshuf.b.512" => "__builtin_ia32_pshufb512_mask", + "llvm.x86.avx512.dbpsadbw.512" => "__builtin_ia32_dbpsadbw512_mask", + "llvm.x86.avx512.dbpsadbw.256" => "__builtin_ia32_dbpsadbw256_mask", + "llvm.x86.avx512.dbpsadbw.128" => "__builtin_ia32_dbpsadbw128_mask", + "llvm.x86.avx512.vpmadd52h.uq.512" => "__builtin_ia32_vpmadd52huq512_mask", + "llvm.x86.avx512.vpmadd52l.uq.512" => "__builtin_ia32_vpmadd52luq512_mask", + "llvm.x86.avx512.vpmadd52h.uq.256" => "__builtin_ia32_vpmadd52huq256_mask", + "llvm.x86.avx512.vpmadd52l.uq.256" => "__builtin_ia32_vpmadd52luq256_mask", + "llvm.x86.avx512.vpmadd52h.uq.128" => "__builtin_ia32_vpmadd52huq128_mask", + "llvm.x86.avx512.vpdpwssd.512" => "__builtin_ia32_vpdpwssd_v16si", + "llvm.x86.avx512.vpdpwssd.256" => "__builtin_ia32_vpdpwssd_v8si", + "llvm.x86.avx512.vpdpwssd.128" => "__builtin_ia32_vpdpwssd_v4si", + "llvm.x86.avx512.vpdpwssds.512" => "__builtin_ia32_vpdpwssds_v16si", + "llvm.x86.avx512.vpdpwssds.256" => "__builtin_ia32_vpdpwssds_v8si", + "llvm.x86.avx512.vpdpwssds.128" => "__builtin_ia32_vpdpwssds_v4si", + "llvm.x86.avx512.vpdpbusd.512" => "__builtin_ia32_vpdpbusd_v16si", + "llvm.x86.avx512.vpdpbusd.256" => "__builtin_ia32_vpdpbusd_v8si", + "llvm.x86.avx512.vpdpbusd.128" => "__builtin_ia32_vpdpbusd_v4si", + "llvm.x86.avx512.vpdpbusds.512" => "__builtin_ia32_vpdpbusds_v16si", + "llvm.x86.avx512.vpdpbusds.256" => "__builtin_ia32_vpdpbusds_v8si", + "llvm.x86.avx512.vpdpbusds.128" => "__builtin_ia32_vpdpbusds_v4si", + + // NOTE: this file is generated by https://github.com/GuillaumeGomez/llvmint/blob/master/generate_list.py + _ => include!("archs.rs"), + }; + + let func = cx.context.get_target_builtin_function(gcc_name); + cx.functions.borrow_mut().insert(gcc_name.to_string(), func); + func +} diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs new file mode 100644 index 00000000000..2590e0e3af4 --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -0,0 +1,1283 @@ +pub mod llvm; +mod simd; + +#[cfg(feature="master")] +use std::iter; + +use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp, FunctionType}; +use rustc_codegen_ssa::MemFlags; +use rustc_codegen_ssa::base::wants_msvc_seh; +use rustc_codegen_ssa::common::IntPredicate; +use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods}; +#[cfg(feature="master")] +use rustc_codegen_ssa::traits::{DerivedTypeMethods, MiscMethods}; +use rustc_middle::bug; +use rustc_middle::ty::{self, Instance, Ty}; +use rustc_middle::ty::layout::LayoutOf; +#[cfg(feature="master")] +use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; +use rustc_span::{Span, Symbol, symbol::kw, sym}; +use rustc_target::abi::HasDataLayout; +use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode}; +use rustc_target::spec::PanicStrategy; +#[cfg(feature="master")] +use rustc_target::spec::abi::Abi; + +use crate::abi::GccType; +#[cfg(feature="master")] +use crate::abi::FnAbiGccExt; +use crate::builder::Builder; +use crate::common::{SignType, TypeReflection}; +use crate::context::CodegenCx; +use crate::errors::InvalidMonomorphizationBasicInteger; +use crate::type_of::LayoutGccExt; +use crate::intrinsic::simd::generic_simd_intrinsic; + +fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) -> Option<Function<'gcc>> { + let gcc_name = match name { + sym::sqrtf32 => "sqrtf", + sym::sqrtf64 => "sqrt", + sym::powif32 => "__builtin_powif", + sym::powif64 => "__builtin_powi", + sym::sinf32 => "sinf", + sym::sinf64 => "sin", + sym::cosf32 => "cosf", + sym::cosf64 => "cos", + sym::powf32 => "powf", + sym::powf64 => "pow", + sym::expf32 => "expf", + sym::expf64 => "exp", + sym::exp2f32 => "exp2f", + sym::exp2f64 => "exp2", + sym::logf32 => "logf", + sym::logf64 => "log", + sym::log10f32 => "log10f", + sym::log10f64 => "log10", + sym::log2f32 => "log2f", + sym::log2f64 => "log2", + sym::fmaf32 => "fmaf", + sym::fmaf64 => "fma", + sym::fabsf32 => "fabsf", + sym::fabsf64 => "fabs", + sym::minnumf32 => "fminf", + sym::minnumf64 => "fmin", + sym::maxnumf32 => "fmaxf", + sym::maxnumf64 => "fmax", + sym::copysignf32 => "copysignf", + sym::copysignf64 => "copysign", + sym::floorf32 => "floorf", + sym::floorf64 => "floor", + sym::ceilf32 => "ceilf", + sym::ceilf64 => "ceil", + sym::truncf32 => "truncf", + sym::truncf64 => "trunc", + sym::rintf32 => "rintf", + sym::rintf64 => "rint", + sym::nearbyintf32 => "nearbyintf", + sym::nearbyintf64 => "nearbyint", + sym::roundf32 => "roundf", + sym::roundf64 => "round", + sym::abort => "abort", + _ => return None, + }; + Some(cx.context.get_builtin_function(&gcc_name)) +} + +impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { + fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) { + let tcx = self.tcx; + let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all()); + + let (def_id, substs) = match *callee_ty.kind() { + ty::FnDef(def_id, substs) => (def_id, substs), + _ => bug!("expected fn item type, found {}", callee_ty), + }; + + let sig = callee_ty.fn_sig(tcx); + let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig); + let arg_tys = sig.inputs(); + let ret_ty = sig.output(); + let name = tcx.item_name(def_id); + let name_str = name.as_str(); + + let llret_ty = self.layout_of(ret_ty).gcc_type(self); + let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout); + + let simple = get_simple_intrinsic(self, name); + let llval = + match name { + _ if simple.is_some() => { + // FIXME(antoyo): remove this cast when the API supports function. + let func = unsafe { std::mem::transmute(simple.expect("simple")) }; + self.call(self.type_void(), None, func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None) + }, + sym::likely => { + self.expect(args[0].immediate(), true) + } + sym::unlikely => { + self.expect(args[0].immediate(), false) + } + kw::Try => { + try_intrinsic( + self, + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + llresult, + ); + return; + } + sym::breakpoint => { + unimplemented!(); + } + sym::va_copy => { + unimplemented!(); + } + sym::va_arg => { + unimplemented!(); + } + + sym::volatile_load | sym::unaligned_volatile_load => { + let tp_ty = substs.type_at(0); + let mut ptr = args[0].immediate(); + if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { + ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self))); + } + let load = self.volatile_load(ptr.get_type(), ptr); + // TODO(antoyo): set alignment. + self.to_immediate(load, self.layout_of(tp_ty)) + } + sym::volatile_store => { + let dst = args[0].deref(self.cx()); + args[1].val.volatile_store(self, dst); + return; + } + sym::unaligned_volatile_store => { + let dst = args[0].deref(self.cx()); + args[1].val.unaligned_volatile_store(self, dst); + return; + } + sym::prefetch_read_data + | sym::prefetch_write_data + | sym::prefetch_read_instruction + | sym::prefetch_write_instruction => { + unimplemented!(); + } + sym::ctlz + | sym::ctlz_nonzero + | sym::cttz + | sym::cttz_nonzero + | sym::ctpop + | sym::bswap + | sym::bitreverse + | sym::rotate_left + | sym::rotate_right + | sym::saturating_add + | sym::saturating_sub => { + let ty = arg_tys[0]; + match int_type_width_signed(ty, self) { + Some((width, signed)) => match name { + sym::ctlz | sym::cttz => { + let func = self.current_func.borrow().expect("func"); + let then_block = func.new_block("then"); + let else_block = func.new_block("else"); + let after_block = func.new_block("after"); + + let arg = args[0].immediate(); + let result = func.new_local(None, arg.get_type(), "zeros"); + let zero = self.cx.gcc_zero(arg.get_type()); + let cond = self.gcc_icmp(IntPredicate::IntEQ, arg, zero); + self.llbb().end_with_conditional(None, cond, then_block, else_block); + + let zero_result = self.cx.gcc_uint(arg.get_type(), width); + then_block.add_assignment(None, result, zero_result); + then_block.end_with_jump(None, after_block); + + // NOTE: since jumps were added in a place + // count_leading_zeroes() does not expect, the current block + // in the state need to be updated. + self.switch_to_block(else_block); + + let zeros = + match name { + sym::ctlz => self.count_leading_zeroes(width, arg), + sym::cttz => self.count_trailing_zeroes(width, arg), + _ => unreachable!(), + }; + self.llbb().add_assignment(None, result, zeros); + self.llbb().end_with_jump(None, after_block); + + // NOTE: since jumps were added in a place rustc does not + // expect, the current block in the state need to be updated. + self.switch_to_block(after_block); + + result.to_rvalue() + } + sym::ctlz_nonzero => { + self.count_leading_zeroes(width, args[0].immediate()) + }, + sym::cttz_nonzero => { + self.count_trailing_zeroes(width, args[0].immediate()) + } + sym::ctpop => self.pop_count(args[0].immediate()), + sym::bswap => { + if width == 8 { + args[0].immediate() // byte swap a u8/i8 is just a no-op + } + else { + self.gcc_bswap(args[0].immediate(), width) + } + }, + sym::bitreverse => self.bit_reverse(width, args[0].immediate()), + sym::rotate_left | sym::rotate_right => { + // TODO(antoyo): implement using algorithm from: + // https://blog.regehr.org/archives/1063 + // for other platforms. + let is_left = name == sym::rotate_left; + let val = args[0].immediate(); + let raw_shift = args[1].immediate(); + if is_left { + self.rotate_left(val, raw_shift, width) + } + else { + self.rotate_right(val, raw_shift, width) + } + }, + sym::saturating_add => { + self.saturating_add(args[0].immediate(), args[1].immediate(), signed, width) + }, + sym::saturating_sub => { + self.saturating_sub(args[0].immediate(), args[1].immediate(), signed, width) + }, + _ => bug!(), + }, + None => { + tcx.sess.emit_err(InvalidMonomorphizationBasicInteger { span, name, ty }); + return; + } + } + } + + sym::raw_eq => { + use rustc_target::abi::Abi::*; + let tp_ty = substs.type_at(0); + let layout = self.layout_of(tp_ty).layout; + let _use_integer_compare = match layout.abi() { + Scalar(_) | ScalarPair(_, _) => true, + Uninhabited | Vector { .. } => false, + Aggregate { .. } => { + // For rusty ABIs, small aggregates are actually passed + // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), + // so we re-use that same threshold here. + layout.size() <= self.data_layout().pointer_size * 2 + } + }; + + let a = args[0].immediate(); + let b = args[1].immediate(); + if layout.size().bytes() == 0 { + self.const_bool(true) + } + /*else if use_integer_compare { + let integer_ty = self.type_ix(layout.size.bits()); // FIXME(antoyo): LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits. + let ptr_ty = self.type_ptr_to(integer_ty); + let a_ptr = self.bitcast(a, ptr_ty); + let a_val = self.load(integer_ty, a_ptr, layout.align.abi); + let b_ptr = self.bitcast(b, ptr_ty); + let b_val = self.load(integer_ty, b_ptr, layout.align.abi); + self.icmp(IntPredicate::IntEQ, a_val, b_val) + }*/ + else { + let void_ptr_type = self.context.new_type::<*const ()>(); + let a_ptr = self.bitcast(a, void_ptr_type); + let b_ptr = self.bitcast(b, void_ptr_type); + let n = self.context.new_cast(None, self.const_usize(layout.size().bytes()), self.sizet_type); + let builtin = self.context.get_builtin_function("memcmp"); + let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]); + self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)) + } + } + + sym::black_box => { + args[0].val.store(self, result); + + let block = self.llbb(); + let extended_asm = block.add_extended_asm(None, ""); + extended_asm.add_input_operand(None, "r", result.llval); + extended_asm.add_clobber("memory"); + extended_asm.set_volatile_flag(true); + + // We have copied the value to `result` already. + return; + } + + sym::ptr_mask => { + let usize_type = self.context.new_type::<usize>(); + let void_ptr_type = self.context.new_type::<*const ()>(); + + let ptr = args[0].immediate(); + let mask = args[1].immediate(); + + let addr = self.bitcast(ptr, usize_type); + let masked = self.and(addr, mask); + self.bitcast(masked, void_ptr_type) + }, + + _ if name_str.starts_with("simd_") => { + match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { + Ok(llval) => llval, + Err(()) => return, + } + } + + _ => bug!("unknown intrinsic '{}'", name), + }; + + if !fn_abi.ret.is_ignore() { + if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { + let ptr_llty = self.type_ptr_to(ty.gcc_type(self)); + let ptr = self.pointercast(result.llval, ptr_llty); + self.store(llval, ptr, result.align); + } + else { + OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) + .val + .store(self, result); + } + } + } + + fn abort(&mut self) { + let func = self.context.get_builtin_function("abort"); + let func: RValue<'gcc> = unsafe { std::mem::transmute(func) }; + self.call(self.type_void(), None, func, &[], None); + } + + fn assume(&mut self, value: Self::Value) { + // TODO(antoyo): switch to assume when it exists. + // Or use something like this: + // #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0) + self.expect(value, true); + } + + fn expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value { + // TODO(antoyo) + cond + } + + fn type_test(&mut self, _pointer: Self::Value, _typeid: Self::Value) -> Self::Value { + // Unsupported. + self.context.new_rvalue_from_int(self.int_type, 0) + } + + fn type_checked_load( + &mut self, + _llvtable: Self::Value, + _vtable_byte_offset: u64, + _typeid: Self::Value, + ) -> Self::Value { + // Unsupported. + self.context.new_rvalue_from_int(self.int_type, 0) + } + + fn va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + } + + fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + } +} + +impl<'a, 'gcc, 'tcx> ArgAbiMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { + fn store_fn_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>) { + arg_abi.store_fn_arg(self, idx, dst) + } + + fn store_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) { + arg_abi.store(self, val, dst) + } + + fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> { + arg_abi.memory_ty(self) + } +} + +pub trait ArgAbiExt<'gcc, 'tcx> { + fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; + fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>); + fn store_fn_arg(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>); +} + +impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { + /// Gets the LLVM type for a place of the original Rust type of + /// this argument/return, i.e., the result of `type_of::type_of`. + fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { + self.layout.gcc_type(cx) + } + + /// Stores a direct/indirect value described by this ArgAbi into a + /// place for the original Rust type of this argument/return. + /// Can be used for both storing formal arguments into Rust variables + /// or results of call/invoke instructions into their destinations. + fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) { + if self.is_ignore() { + return; + } + if self.is_sized_indirect() { + OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst) + } + else if self.is_unsized_indirect() { + bug!("unsized `ArgAbi` must be handled through `store_fn_arg`"); + } + else if let PassMode::Cast(ref cast, _) = self.mode { + // FIXME(eddyb): Figure out when the simpler Store is safe, clang + // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. + let can_store_through_cast_ptr = false; + if can_store_through_cast_ptr { + let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx)); + let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty); + bx.store(val, cast_dst, self.layout.align.abi); + } + else { + // The actual return type is a struct, but the ABI + // adaptation code has cast it into some scalar type. The + // code that follows is the only reliable way I have + // found to do a transform like i64 -> {i32,i32}. + // Basically we dump the data onto the stack then memcpy it. + // + // Other approaches I tried: + // - Casting rust ret pointer to the foreign type and using Store + // is (a) unsafe if size of foreign type > size of rust type and + // (b) runs afoul of strict aliasing rules, yielding invalid + // assembly under -O (specifically, the store gets removed). + // - Truncating foreign type to correct integral type and then + // bitcasting to the struct type yields invalid cast errors. + + // We instead thus allocate some scratch space... + let scratch_size = cast.size(bx); + let scratch_align = cast.align(bx); + let llscratch = bx.alloca(cast.gcc_type(bx), scratch_align); + bx.lifetime_start(llscratch, scratch_size); + + // ... where we first store the value... + bx.store(val, llscratch, scratch_align); + + // ... and then memcpy it to the intended destination. + bx.memcpy( + dst.llval, + self.layout.align.abi, + llscratch, + scratch_align, + bx.const_usize(self.layout.size.bytes()), + MemFlags::empty(), + ); + + bx.lifetime_end(llscratch, scratch_size); + } + } + else { + OperandValue::Immediate(val).store(bx, dst); + } + } + + fn store_fn_arg<'a>(&self, bx: &mut Builder<'a, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>) { + let mut next = || { + let val = bx.current_func().get_param(*idx as i32); + *idx += 1; + val.to_rvalue() + }; + match self.mode { + PassMode::Ignore => {}, + PassMode::Pair(..) => { + OperandValue::Pair(next(), next()).store(bx, dst); + }, + PassMode::Indirect { extra_attrs: Some(_), .. } => { + OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst); + }, + PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(..) => { + let next_arg = next(); + self.store(bx, next_arg, dst); + }, + } + } +} + +fn int_type_width_signed<'gcc, 'tcx>(ty: Ty<'tcx>, cx: &CodegenCx<'gcc, 'tcx>) -> Option<(u64, bool)> { + match ty.kind() { + ty::Int(t) => Some(( + match t { + rustc_middle::ty::IntTy::Isize => u64::from(cx.tcx.sess.target.pointer_width), + rustc_middle::ty::IntTy::I8 => 8, + rustc_middle::ty::IntTy::I16 => 16, + rustc_middle::ty::IntTy::I32 => 32, + rustc_middle::ty::IntTy::I64 => 64, + rustc_middle::ty::IntTy::I128 => 128, + }, + true, + )), + ty::Uint(t) => Some(( + match t { + rustc_middle::ty::UintTy::Usize => u64::from(cx.tcx.sess.target.pointer_width), + rustc_middle::ty::UintTy::U8 => 8, + rustc_middle::ty::UintTy::U16 => 16, + rustc_middle::ty::UintTy::U32 => 32, + rustc_middle::ty::UintTy::U64 => 64, + rustc_middle::ty::UintTy::U128 => 128, + }, + false, + )), + _ => None, + } +} + +impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { + fn bit_reverse(&mut self, width: u64, value: RValue<'gcc>) -> RValue<'gcc> { + let result_type = value.get_type(); + let typ = result_type.to_unsigned(self.cx); + + let value = + if result_type.is_signed(self.cx) { + self.gcc_int_cast(value, typ) + } + else { + value + }; + + let context = &self.cx.context; + let result = + match width { + 8 => { + // First step. + let left = self.and(value, context.new_rvalue_from_int(typ, 0xF0)); + let left = self.lshr(left, context.new_rvalue_from_int(typ, 4)); + let right = self.and(value, context.new_rvalue_from_int(typ, 0x0F)); + let right = self.shl(right, context.new_rvalue_from_int(typ, 4)); + let step1 = self.or(left, right); + + // Second step. + let left = self.and(step1, context.new_rvalue_from_int(typ, 0xCC)); + let left = self.lshr(left, context.new_rvalue_from_int(typ, 2)); + let right = self.and(step1, context.new_rvalue_from_int(typ, 0x33)); + let right = self.shl(right, context.new_rvalue_from_int(typ, 2)); + let step2 = self.or(left, right); + + // Third step. + let left = self.and(step2, context.new_rvalue_from_int(typ, 0xAA)); + let left = self.lshr(left, context.new_rvalue_from_int(typ, 1)); + let right = self.and(step2, context.new_rvalue_from_int(typ, 0x55)); + let right = self.shl(right, context.new_rvalue_from_int(typ, 1)); + let step3 = self.or(left, right); + + step3 + }, + 16 => { + // First step. + let left = self.and(value, context.new_rvalue_from_int(typ, 0x5555)); + let left = self.shl(left, context.new_rvalue_from_int(typ, 1)); + let right = self.and(value, context.new_rvalue_from_int(typ, 0xAAAA)); + let right = self.lshr(right, context.new_rvalue_from_int(typ, 1)); + let step1 = self.or(left, right); + + // Second step. + let left = self.and(step1, context.new_rvalue_from_int(typ, 0x3333)); + let left = self.shl(left, context.new_rvalue_from_int(typ, 2)); + let right = self.and(step1, context.new_rvalue_from_int(typ, 0xCCCC)); + let right = self.lshr(right, context.new_rvalue_from_int(typ, 2)); + let step2 = self.or(left, right); + + // Third step. + let left = self.and(step2, context.new_rvalue_from_int(typ, 0x0F0F)); + let left = self.shl(left, context.new_rvalue_from_int(typ, 4)); + let right = self.and(step2, context.new_rvalue_from_int(typ, 0xF0F0)); + let right = self.lshr(right, context.new_rvalue_from_int(typ, 4)); + let step3 = self.or(left, right); + + // Fourth step. + let left = self.and(step3, context.new_rvalue_from_int(typ, 0x00FF)); + let left = self.shl(left, context.new_rvalue_from_int(typ, 8)); + let right = self.and(step3, context.new_rvalue_from_int(typ, 0xFF00)); + let right = self.lshr(right, context.new_rvalue_from_int(typ, 8)); + let step4 = self.or(left, right); + + step4 + }, + 32 => { + // TODO(antoyo): Refactor with other implementations. + // First step. + let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 1)); + let right = self.and(value, context.new_rvalue_from_long(typ, 0xAAAAAAAA)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 1)); + let step1 = self.or(left, right); + + // Second step. + let left = self.and(step1, context.new_rvalue_from_long(typ, 0x33333333)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 2)); + let right = self.and(step1, context.new_rvalue_from_long(typ, 0xCCCCCCCC)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 2)); + let step2 = self.or(left, right); + + // Third step. + let left = self.and(step2, context.new_rvalue_from_long(typ, 0x0F0F0F0F)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 4)); + let right = self.and(step2, context.new_rvalue_from_long(typ, 0xF0F0F0F0)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 4)); + let step3 = self.or(left, right); + + // Fourth step. + let left = self.and(step3, context.new_rvalue_from_long(typ, 0x00FF00FF)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 8)); + let right = self.and(step3, context.new_rvalue_from_long(typ, 0xFF00FF00)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 8)); + let step4 = self.or(left, right); + + // Fifth step. + let left = self.and(step4, context.new_rvalue_from_long(typ, 0x0000FFFF)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 16)); + let right = self.and(step4, context.new_rvalue_from_long(typ, 0xFFFF0000)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 16)); + let step5 = self.or(left, right); + + step5 + }, + 64 => { + // First step. + let left = self.shl(value, context.new_rvalue_from_long(typ, 32)); + let right = self.lshr(value, context.new_rvalue_from_long(typ, 32)); + let step1 = self.or(left, right); + + // Second step. + let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 15)); + let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO(antoyo): transmute the number instead? + let right = self.lshr(right, context.new_rvalue_from_long(typ, 17)); + let step2 = self.or(left, right); + + // Third step. + let left = self.lshr(step2, context.new_rvalue_from_long(typ, 10)); + let left = self.xor(step2, left); + let temp = self.and(left, context.new_rvalue_from_long(typ, 0x003F801F003F801F)); + + let left = self.shl(temp, context.new_rvalue_from_long(typ, 10)); + let left = self.or(temp, left); + let step3 = self.xor(left, step2); + + // Fourth step. + let left = self.lshr(step3, context.new_rvalue_from_long(typ, 4)); + let left = self.xor(step3, left); + let temp = self.and(left, context.new_rvalue_from_long(typ, 0x0E0384210E038421)); + + let left = self.shl(temp, context.new_rvalue_from_long(typ, 4)); + let left = self.or(temp, left); + let step4 = self.xor(left, step3); + + // Fifth step. + let left = self.lshr(step4, context.new_rvalue_from_long(typ, 2)); + let left = self.xor(step4, left); + let temp = self.and(left, context.new_rvalue_from_long(typ, 0x2248884222488842)); + + let left = self.shl(temp, context.new_rvalue_from_long(typ, 2)); + let left = self.or(temp, left); + let step5 = self.xor(left, step4); + + step5 + }, + 128 => { + // TODO(antoyo): find a more efficient implementation? + let sixty_four = self.gcc_int(typ, 64); + let right_shift = self.gcc_lshr(value, sixty_four); + let high = self.gcc_int_cast(right_shift, self.u64_type); + let low = self.gcc_int_cast(value, self.u64_type); + + let reversed_high = self.bit_reverse(64, high); + let reversed_low = self.bit_reverse(64, low); + + let new_low = self.gcc_int_cast(reversed_high, typ); + let new_high = self.shl(self.gcc_int_cast(reversed_low, typ), sixty_four); + + self.gcc_or(new_low, new_high) + }, + _ => { + panic!("cannot bit reverse with width = {}", width); + }, + }; + + self.gcc_int_cast(result, result_type) + } + + fn count_leading_zeroes(&mut self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> { + // TODO(antoyo): use width? + let arg_type = arg.get_type(); + let count_leading_zeroes = + // TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here + // instead of using is_uint(). + if arg_type.is_uint(&self.cx) { + "__builtin_clz" + } + else if arg_type.is_ulong(&self.cx) { + "__builtin_clzl" + } + else if arg_type.is_ulonglong(&self.cx) { + "__builtin_clzll" + } + else if width == 128 { + // Algorithm from: https://stackoverflow.com/a/28433850/389119 + let array_type = self.context.new_array_type(None, arg_type, 3); + let result = self.current_func() + .new_local(None, array_type, "count_loading_zeroes_results"); + + let sixty_four = self.const_uint(arg_type, 64); + let shift = self.lshr(arg, sixty_four); + let high = self.gcc_int_cast(shift, self.u64_type); + let low = self.gcc_int_cast(arg, self.u64_type); + + let zero = self.context.new_rvalue_zero(self.usize_type); + let one = self.context.new_rvalue_one(self.usize_type); + let two = self.context.new_rvalue_from_long(self.usize_type, 2); + + let clzll = self.context.get_builtin_function("__builtin_clzll"); + + let first_elem = self.context.new_array_access(None, result, zero); + let first_value = self.gcc_int_cast(self.context.new_call(None, clzll, &[high]), arg_type); + self.llbb() + .add_assignment(None, first_elem, first_value); + + let second_elem = self.context.new_array_access(None, result, one); + let cast = self.gcc_int_cast(self.context.new_call(None, clzll, &[low]), arg_type); + let second_value = self.add(cast, sixty_four); + self.llbb() + .add_assignment(None, second_elem, second_value); + + let third_elem = self.context.new_array_access(None, result, two); + let third_value = self.const_uint(arg_type, 128); + self.llbb() + .add_assignment(None, third_elem, third_value); + + let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high); + let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low); + let not_low_and_not_high = not_low & not_high; + let index = not_high + not_low_and_not_high; + // NOTE: the following cast is necessary to avoid a GIMPLE verification failure in + // gcc. + // TODO(antoyo): do the correct verification in libgccjit to avoid an error at the + // compilation stage. + let index = self.context.new_cast(None, index, self.i32_type); + + let res = self.context.new_array_access(None, result, index); + + return self.gcc_int_cast(res.to_rvalue(), arg_type); + } + else { + let count_leading_zeroes = self.context.get_builtin_function("__builtin_clzll"); + let arg = self.context.new_cast(None, arg, self.ulonglong_type); + let diff = self.ulonglong_type.get_size() as i64 - arg_type.get_size() as i64; + let diff = self.context.new_rvalue_from_long(self.int_type, diff * 8); + let res = self.context.new_call(None, count_leading_zeroes, &[arg]) - diff; + return self.context.new_cast(None, res, arg_type); + }; + let count_leading_zeroes = self.context.get_builtin_function(count_leading_zeroes); + let res = self.context.new_call(None, count_leading_zeroes, &[arg]); + self.context.new_cast(None, res, arg_type) + } + + fn count_trailing_zeroes(&mut self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> { + let result_type = arg.get_type(); + let arg = + if result_type.is_signed(self.cx) { + let new_type = result_type.to_unsigned(self.cx); + self.gcc_int_cast(arg, new_type) + } + else { + arg + }; + let arg_type = arg.get_type(); + let (count_trailing_zeroes, expected_type) = + // TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here + // instead of using is_uint(). + if arg_type.is_uchar(&self.cx) || arg_type.is_ushort(&self.cx) || arg_type.is_uint(&self.cx) { + // NOTE: we don't need to & 0xFF for uchar because the result is undefined on zero. + ("__builtin_ctz", self.cx.uint_type) + } + else if arg_type.is_ulong(&self.cx) { + ("__builtin_ctzl", self.cx.ulong_type) + } + else if arg_type.is_ulonglong(&self.cx) { + ("__builtin_ctzll", self.cx.ulonglong_type) + } + else if arg_type.is_u128(&self.cx) { + // Adapted from the algorithm to count leading zeroes from: https://stackoverflow.com/a/28433850/389119 + let array_type = self.context.new_array_type(None, arg_type, 3); + let result = self.current_func() + .new_local(None, array_type, "count_loading_zeroes_results"); + + let sixty_four = self.gcc_int(arg_type, 64); + let shift = self.gcc_lshr(arg, sixty_four); + let high = self.gcc_int_cast(shift, self.u64_type); + let low = self.gcc_int_cast(arg, self.u64_type); + + let zero = self.context.new_rvalue_zero(self.usize_type); + let one = self.context.new_rvalue_one(self.usize_type); + let two = self.context.new_rvalue_from_long(self.usize_type, 2); + + let ctzll = self.context.get_builtin_function("__builtin_ctzll"); + + let first_elem = self.context.new_array_access(None, result, zero); + let first_value = self.gcc_int_cast(self.context.new_call(None, ctzll, &[low]), arg_type); + self.llbb() + .add_assignment(None, first_elem, first_value); + + let second_elem = self.context.new_array_access(None, result, one); + let second_value = self.gcc_add(self.gcc_int_cast(self.context.new_call(None, ctzll, &[high]), arg_type), sixty_four); + self.llbb() + .add_assignment(None, second_elem, second_value); + + let third_elem = self.context.new_array_access(None, result, two); + let third_value = self.gcc_int(arg_type, 128); + self.llbb() + .add_assignment(None, third_elem, third_value); + + let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low); + let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high); + let not_low_and_not_high = not_low & not_high; + let index = not_low + not_low_and_not_high; + // NOTE: the following cast is necessary to avoid a GIMPLE verification failure in + // gcc. + // TODO(antoyo): do the correct verification in libgccjit to avoid an error at the + // compilation stage. + let index = self.context.new_cast(None, index, self.i32_type); + + let res = self.context.new_array_access(None, result, index); + + return self.gcc_int_cast(res.to_rvalue(), result_type); + } + else { + let count_trailing_zeroes = self.context.get_builtin_function("__builtin_ctzll"); + let arg_size = arg_type.get_size(); + let casted_arg = self.context.new_cast(None, arg, self.ulonglong_type); + let byte_diff = self.ulonglong_type.get_size() as i64 - arg_size as i64; + let diff = self.context.new_rvalue_from_long(self.int_type, byte_diff * 8); + let mask = self.context.new_rvalue_from_long(arg_type, -1); // To get the value with all bits set. + let masked = mask & self.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, arg); + let cond = self.context.new_comparison(None, ComparisonOp::Equals, masked, mask); + let diff = diff * self.context.new_cast(None, cond, self.int_type); + let res = self.context.new_call(None, count_trailing_zeroes, &[casted_arg]) - diff; + return self.context.new_cast(None, res, result_type); + }; + let count_trailing_zeroes = self.context.get_builtin_function(count_trailing_zeroes); + let arg = + if arg_type != expected_type { + self.context.new_cast(None, arg, expected_type) + } + else { + arg + }; + let res = self.context.new_call(None, count_trailing_zeroes, &[arg]); + self.context.new_cast(None, res, result_type) + } + + fn pop_count(&mut self, value: RValue<'gcc>) -> RValue<'gcc> { + // TODO(antoyo): use the optimized version with fewer operations. + let result_type = value.get_type(); + let value_type = result_type.to_unsigned(self.cx); + + let value = + if result_type.is_signed(self.cx) { + self.gcc_int_cast(value, value_type) + } + else { + value + }; + + if value_type.is_u128(&self.cx) { + // TODO(antoyo): implement in the normal algorithm below to have a more efficient + // implementation (that does not require a call to __popcountdi2). + let popcount = self.context.get_builtin_function("__builtin_popcountll"); + let sixty_four = self.gcc_int(value_type, 64); + let right_shift = self.gcc_lshr(value, sixty_four); + let high = self.gcc_int_cast(right_shift, self.cx.ulonglong_type); + let high = self.context.new_call(None, popcount, &[high]); + let low = self.gcc_int_cast(value, self.cx.ulonglong_type); + let low = self.context.new_call(None, popcount, &[low]); + let res = high + low; + return self.gcc_int_cast(res, result_type); + } + + // First step. + let mask = self.context.new_rvalue_from_long(value_type, 0x5555555555555555); + let left = value & mask; + let shifted = value >> self.context.new_rvalue_from_int(value_type, 1); + let right = shifted & mask; + let value = left + right; + + // Second step. + let mask = self.context.new_rvalue_from_long(value_type, 0x3333333333333333); + let left = value & mask; + let shifted = value >> self.context.new_rvalue_from_int(value_type, 2); + let right = shifted & mask; + let value = left + right; + + // Third step. + let mask = self.context.new_rvalue_from_long(value_type, 0x0F0F0F0F0F0F0F0F); + let left = value & mask; + let shifted = value >> self.context.new_rvalue_from_int(value_type, 4); + let right = shifted & mask; + let value = left + right; + + if value_type.is_u8(&self.cx) { + return self.context.new_cast(None, value, result_type); + } + + // Fourth step. + let mask = self.context.new_rvalue_from_long(value_type, 0x00FF00FF00FF00FF); + let left = value & mask; + let shifted = value >> self.context.new_rvalue_from_int(value_type, 8); + let right = shifted & mask; + let value = left + right; + + if value_type.is_u16(&self.cx) { + return self.context.new_cast(None, value, result_type); + } + + // Fifth step. + let mask = self.context.new_rvalue_from_long(value_type, 0x0000FFFF0000FFFF); + let left = value & mask; + let shifted = value >> self.context.new_rvalue_from_int(value_type, 16); + let right = shifted & mask; + let value = left + right; + + if value_type.is_u32(&self.cx) { + return self.context.new_cast(None, value, result_type); + } + + // Sixth step. + let mask = self.context.new_rvalue_from_long(value_type, 0x00000000FFFFFFFF); + let left = value & mask; + let shifted = value >> self.context.new_rvalue_from_int(value_type, 32); + let right = shifted & mask; + let value = left + right; + + self.context.new_cast(None, value, result_type) + } + + // Algorithm from: https://blog.regehr.org/archives/1063 + fn rotate_left(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> { + let max = self.const_uint(shift.get_type(), width); + let shift = self.urem(shift, max); + let lhs = self.shl(value, shift); + let result_neg = self.neg(shift); + let result_and = + self.and( + result_neg, + self.const_uint(shift.get_type(), width - 1), + ); + let rhs = self.lshr(value, result_and); + self.or(lhs, rhs) + } + + // Algorithm from: https://blog.regehr.org/archives/1063 + fn rotate_right(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> { + let max = self.const_uint(shift.get_type(), width); + let shift = self.urem(shift, max); + let lhs = self.lshr(value, shift); + let result_neg = self.neg(shift); + let result_and = + self.and( + result_neg, + self.const_uint(shift.get_type(), width - 1), + ); + let rhs = self.shl(value, result_and); + self.or(lhs, rhs) + } + + fn saturating_add(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> { + let result_type = lhs.get_type(); + if signed { + // Based on algorithm from: https://stackoverflow.com/a/56531252/389119 + let func = self.current_func.borrow().expect("func"); + let res = func.new_local(None, result_type, "saturating_sum"); + let supports_native_type = self.is_native_int_type(result_type); + let overflow = + if supports_native_type { + let func_name = + match width { + 8 => "__builtin_add_overflow", + 16 => "__builtin_add_overflow", + 32 => "__builtin_sadd_overflow", + 64 => "__builtin_saddll_overflow", + 128 => "__builtin_add_overflow", + _ => unreachable!(), + }; + let overflow_func = self.context.get_builtin_function(func_name); + self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None) + } + else { + let func_name = + match width { + 128 => "__rust_i128_addo", + _ => unreachable!(), + }; + let param_a = self.context.new_parameter(None, result_type, "a"); + let param_b = self.context.new_parameter(None, result_type, "b"); + let result_field = self.context.new_field(None, result_type, "result"); + let overflow_field = self.context.new_field(None, self.bool_type, "overflow"); + let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]); + let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false); + let result = self.context.new_call(None, func, &[lhs, rhs]); + let overflow = result.access_field(None, overflow_field); + let int_result = result.access_field(None, result_field); + self.llbb().add_assignment(None, res, int_result); + overflow + }; + + let then_block = func.new_block("then"); + let after_block = func.new_block("after"); + + // Return `result_type`'s maximum or minimum value on overflow + // NOTE: convert the type to unsigned to have an unsigned shift. + let unsigned_type = result_type.to_unsigned(&self.cx); + let shifted = self.gcc_lshr(self.gcc_int_cast(lhs, unsigned_type), self.gcc_int(unsigned_type, width as i64 - 1)); + let uint_max = self.gcc_not(self.gcc_int(unsigned_type, 0)); + let int_max = self.gcc_lshr(uint_max, self.gcc_int(unsigned_type, 1)); + then_block.add_assignment(None, res, self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type)); + then_block.end_with_jump(None, after_block); + + self.llbb().end_with_conditional(None, overflow, then_block, after_block); + + // NOTE: since jumps were added in a place rustc does not + // expect, the current block in the state need to be updated. + self.switch_to_block(after_block); + + res.to_rvalue() + } + else { + // Algorithm from: http://locklessinc.com/articles/sat_arithmetic/ + let res = self.gcc_add(lhs, rhs); + let cond = self.gcc_icmp(IntPredicate::IntULT, res, lhs); + let value = self.gcc_neg(self.gcc_int_cast(cond, result_type)); + self.gcc_or(res, value) + } + } + + // Algorithm from: https://locklessinc.com/articles/sat_arithmetic/ + fn saturating_sub(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> { + let result_type = lhs.get_type(); + if signed { + // Based on algorithm from: https://stackoverflow.com/a/56531252/389119 + let func = self.current_func.borrow().expect("func"); + let res = func.new_local(None, result_type, "saturating_diff"); + let supports_native_type = self.is_native_int_type(result_type); + let overflow = + if supports_native_type { + let func_name = + match width { + 8 => "__builtin_sub_overflow", + 16 => "__builtin_sub_overflow", + 32 => "__builtin_ssub_overflow", + 64 => "__builtin_ssubll_overflow", + 128 => "__builtin_sub_overflow", + _ => unreachable!(), + }; + let overflow_func = self.context.get_builtin_function(func_name); + self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None) + } + else { + let func_name = + match width { + 128 => "__rust_i128_subo", + _ => unreachable!(), + }; + let param_a = self.context.new_parameter(None, result_type, "a"); + let param_b = self.context.new_parameter(None, result_type, "b"); + let result_field = self.context.new_field(None, result_type, "result"); + let overflow_field = self.context.new_field(None, self.bool_type, "overflow"); + let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]); + let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false); + let result = self.context.new_call(None, func, &[lhs, rhs]); + let overflow = result.access_field(None, overflow_field); + let int_result = result.access_field(None, result_field); + self.llbb().add_assignment(None, res, int_result); + overflow + }; + + let then_block = func.new_block("then"); + let after_block = func.new_block("after"); + + // Return `result_type`'s maximum or minimum value on overflow + // NOTE: convert the type to unsigned to have an unsigned shift. + let unsigned_type = result_type.to_unsigned(&self.cx); + let shifted = self.gcc_lshr(self.gcc_int_cast(lhs, unsigned_type), self.gcc_int(unsigned_type, width as i64 - 1)); + let uint_max = self.gcc_not(self.gcc_int(unsigned_type, 0)); + let int_max = self.gcc_lshr(uint_max, self.gcc_int(unsigned_type, 1)); + then_block.add_assignment(None, res, self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type)); + then_block.end_with_jump(None, after_block); + + self.llbb().end_with_conditional(None, overflow, then_block, after_block); + + // NOTE: since jumps were added in a place rustc does not + // expect, the current block in the state need to be updated. + self.switch_to_block(after_block); + + res.to_rvalue() + } + else { + let res = self.gcc_sub(lhs, rhs); + let comparison = self.gcc_icmp(IntPredicate::IntULE, res, lhs); + let value = self.gcc_neg(self.gcc_int_cast(comparison, result_type)); + self.gcc_and(res, value) + } + } +} + +fn try_intrinsic<'a, 'b, 'gcc, 'tcx>(bx: &'b mut Builder<'a, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) { + if bx.sess().panic_strategy() == PanicStrategy::Abort { + bx.call(bx.type_void(), None, try_func, &[data], None); + // Return 0 unconditionally from the intrinsic call; + // we can never unwind. + let ret_align = bx.tcx.data_layout.i32_align.abi; + bx.store(bx.const_i32(0), dest, ret_align); + } + else if wants_msvc_seh(bx.sess()) { + unimplemented!(); + } + else { + #[cfg(feature="master")] + codegen_gnu_try(bx, try_func, data, _catch_func, dest); + #[cfg(not(feature="master"))] + unimplemented!(); + } +} + +// Definition of the standard `try` function for Rust using the GNU-like model +// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke` +// instructions). +// +// This codegen is a little surprising because we always call a shim +// function instead of inlining the call to `invoke` manually here. This is done +// because in LLVM we're only allowed to have one personality per function +// definition. The call to the `try` intrinsic is being inlined into the +// function calling it, and that function may already have other personality +// functions in play. By calling a shim we're guaranteed that our shim will have +// the right personality function. +#[cfg(feature="master")] +fn codegen_gnu_try<'gcc>(bx: &mut Builder<'_, 'gcc, '_>, try_func: RValue<'gcc>, data: RValue<'gcc>, catch_func: RValue<'gcc>, dest: RValue<'gcc>) { + let cx: &CodegenCx<'gcc, '_> = bx.cx; + let (llty, func) = get_rust_try_fn(cx, &mut |mut bx| { + // Codegens the shims described above: + // + // bx: + // invoke %try_func(%data) normal %normal unwind %catch + // + // normal: + // ret 0 + // + // catch: + // (%ptr, _) = landingpad + // call %catch_func(%data, %ptr) + // ret 1 + let then = bx.append_sibling_block("then"); + let catch = bx.append_sibling_block("catch"); + + let func = bx.current_func(); + let try_func = func.get_param(0).to_rvalue(); + let data = func.get_param(1).to_rvalue(); + let catch_func = func.get_param(2).to_rvalue(); + let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + + let current_block = bx.block.clone(); + + bx.switch_to_block(then); + bx.ret(bx.const_i32(0)); + + // Type indicator for the exception being thrown. + // + // The value is a pointer to the exception object + // being thrown. + bx.switch_to_block(catch); + bx.set_personality_fn(bx.eh_personality()); + + let eh_pointer_builtin = bx.cx.context.get_target_builtin_function("__builtin_eh_pointer"); + let zero = bx.cx.context.new_rvalue_zero(bx.int_type); + let ptr = bx.cx.context.new_call(None, eh_pointer_builtin, &[zero]); + let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); + bx.call(catch_ty, None, catch_func, &[data, ptr], None); + bx.ret(bx.const_i32(1)); + + // NOTE: the blocks must be filled before adding the try/catch, otherwise gcc will not + // generate a try/catch. + // FIXME(antoyo): add a check in the libgccjit API to prevent this. + bx.switch_to_block(current_block); + bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None); + }); + + let func = unsafe { std::mem::transmute(func) }; + + // Note that no invoke is used here because by definition this function + // can't panic (that's what it's catching). + let ret = bx.call(llty, None, func, &[try_func, data, catch_func], None); + let i32_align = bx.tcx().data_layout.i32_align.abi; + bx.store(ret, dest, i32_align); +} + + +// Helper function used to get a handle to the `__rust_try` function used to +// catch exceptions. +// +// This function is only generated once and is then cached. +#[cfg(feature="master")] +fn get_rust_try_fn<'a, 'gcc, 'tcx>(cx: &'a CodegenCx<'gcc, 'tcx>, codegen: &mut dyn FnMut(Builder<'a, 'gcc, 'tcx>)) -> (Type<'gcc>, Function<'gcc>) { + if let Some(llfn) = cx.rust_try_fn.get() { + return llfn; + } + + // Define the type up front for the signature of the rust_try function. + let tcx = cx.tcx; + let i8p = tcx.mk_mut_ptr(tcx.types.i8); + // `unsafe fn(*mut i8) -> ()` + let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig( + iter::once(i8p), + tcx.mk_unit(), + false, + rustc_hir::Unsafety::Unsafe, + Abi::Rust, + ))); + // `unsafe fn(*mut i8, *mut i8) -> ()` + let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig( + [i8p, i8p].iter().cloned(), + tcx.mk_unit(), + false, + rustc_hir::Unsafety::Unsafe, + Abi::Rust, + ))); + // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32` + let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig( + [try_fn_ty, i8p, catch_fn_ty], + tcx.types.i32, + false, + rustc_hir::Unsafety::Unsafe, + Abi::Rust, + )); + let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen); + cx.rust_try_fn.set(Some(rust_try)); + rust_try +} + +// Helper function to give a Block to a closure to codegen a shim function. +// This is currently primarily used for the `try` intrinsic functions above. +#[cfg(feature="master")] +fn gen_fn<'a, 'gcc, 'tcx>(cx: &'a CodegenCx<'gcc, 'tcx>, name: &str, rust_fn_sig: ty::PolyFnSig<'tcx>, codegen: &mut dyn FnMut(Builder<'a, 'gcc, 'tcx>)) -> (Type<'gcc>, Function<'gcc>) { + let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty()); + let (typ, _, _, _) = fn_abi.gcc_type(cx); + // FIXME(eddyb) find a nicer way to do this. + cx.linkage.set(FunctionType::Internal); + let func = cx.declare_fn(name, fn_abi); + let func_val = unsafe { std::mem::transmute(func) }; + cx.set_frame_pointer_type(func_val); + cx.apply_target_cpu_attr(func_val); + let block = Builder::append_block(cx, func_val, "entry-block"); + let bx = Builder::build(cx, block); + codegen(bx); + (typ, func) +} diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs new file mode 100644 index 00000000000..b59c3a64f57 --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs @@ -0,0 +1,1053 @@ +#[cfg(feature="master")] +use gccjit::{ComparisonOp, UnaryOp}; +use gccjit::ToRValue; +use gccjit::{BinaryOp, RValue, Type}; + +use rustc_codegen_ssa::base::compare_simd_types; +use rustc_codegen_ssa::common::{IntPredicate, TypeKind}; +#[cfg(feature="master")] +use rustc_codegen_ssa::errors::ExpectedPointerMutability; +use rustc_codegen_ssa::errors::InvalidMonomorphization; +use rustc_codegen_ssa::mir::operand::OperandRef; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::traits::{BaseTypeMethods, BuilderMethods}; +use rustc_hir as hir; +use rustc_middle::span_bug; +use rustc_middle::ty::layout::HasTyCtxt; +use rustc_middle::ty::{self, Ty}; +use rustc_span::{sym, Span, Symbol}; +use rustc_target::abi::Align; + +use crate::builder::Builder; +#[cfg(feature="master")] +use crate::context::CodegenCx; +#[cfg(feature="master")] +use crate::errors::{InvalidMonomorphizationExpectedSignedUnsigned, InvalidMonomorphizationInsertedType}; +use crate::errors::{ + InvalidMonomorphizationExpectedSimd, + InvalidMonomorphizationInvalidBitmask, + InvalidMonomorphizationInvalidFloatVector, InvalidMonomorphizationMaskType, + InvalidMonomorphizationMismatchedLengths, InvalidMonomorphizationNotFloat, + InvalidMonomorphizationReturnElement, InvalidMonomorphizationReturnIntegerType, + InvalidMonomorphizationReturnLength, InvalidMonomorphizationReturnLengthInputType, + InvalidMonomorphizationReturnType, InvalidMonomorphizationSimdShuffle, + InvalidMonomorphizationUnrecognized, InvalidMonomorphizationUnsupportedElement, + InvalidMonomorphizationUnsupportedOperation, +}; + +pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( + bx: &mut Builder<'a, 'gcc, 'tcx>, + name: Symbol, + callee_ty: Ty<'tcx>, + args: &[OperandRef<'tcx, RValue<'gcc>>], + ret_ty: Ty<'tcx>, + llret_ty: Type<'gcc>, + span: Span, +) -> Result<RValue<'gcc>, ()> { + // macros for error handling: + macro_rules! return_error { + ($err:expr) => {{ + bx.sess().emit_err($err); + return Err(()); + }}; + } + macro_rules! require { + ($cond:expr, $err:expr) => { + if !$cond { + return_error!($err); + } + }; + } + macro_rules! require_simd { + ($ty: expr, $position: expr) => { + require!( + $ty.is_simd(), + InvalidMonomorphizationExpectedSimd { + span, + name, + position: $position, + found_ty: $ty + } + ) + }; + } + + let tcx = bx.tcx(); + let sig = + tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), callee_ty.fn_sig(tcx)); + let arg_tys = sig.inputs(); + + if name == sym::simd_select_bitmask { + require_simd!(arg_tys[1], "argument"); + let (len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); + + let expected_int_bits = (len.max(8) - 1).next_power_of_two(); + let expected_bytes = len / 8 + ((len % 8 > 0) as u64); + + let mask_ty = arg_tys[0]; + let mut mask = match mask_ty.kind() { + ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(), + ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(), + ty::Array(elem, len) + if matches!(elem.kind(), ty::Uint(ty::UintTy::U8)) + && len.try_eval_target_usize(bx.tcx, ty::ParamEnv::reveal_all()) + == Some(expected_bytes) => + { + let place = PlaceRef::alloca(bx, args[0].layout); + args[0].val.store(bx, place); + let int_ty = bx.type_ix(expected_bytes * 8); + let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty)); + bx.load(int_ty, ptr, Align::ONE) + } + _ => return_error!(InvalidMonomorphizationInvalidBitmask { + span, + name, + ty: mask_ty, + expected_int_bits, + expected_bytes + }), + }; + + let arg1 = args[1].immediate(); + let arg1_type = arg1.get_type(); + let arg1_vector_type = arg1_type.unqualified().dyncast_vector().expect("vector type"); + let arg1_element_type = arg1_vector_type.get_element_type(); + + // NOTE: since the arguments can be vectors of floats, make sure the mask is a vector of + // integer. + let mask_element_type = bx.type_ix(arg1_element_type.get_size() as u64 * 8); + let vector_mask_type = bx.context.new_vector_type(mask_element_type, arg1_vector_type.get_num_units() as u64); + + let mut elements = vec![]; + let one = bx.context.new_rvalue_one(mask.get_type()); + for _ in 0..len { + let element = bx.context.new_cast(None, mask & one, mask_element_type); + elements.push(element); + mask = mask >> one; + } + let vector_mask = bx.context.new_rvalue_from_vector(None, vector_mask_type, &elements); + + return Ok(bx.vector_select(vector_mask, arg1, args[2].immediate())); + } + + // every intrinsic below takes a SIMD vector as its first argument + require_simd!(arg_tys[0], "input"); + let in_ty = arg_tys[0]; + + let comparison = match name { + sym::simd_eq => Some(hir::BinOpKind::Eq), + sym::simd_ne => Some(hir::BinOpKind::Ne), + sym::simd_lt => Some(hir::BinOpKind::Lt), + sym::simd_le => Some(hir::BinOpKind::Le), + sym::simd_gt => Some(hir::BinOpKind::Gt), + sym::simd_ge => Some(hir::BinOpKind::Ge), + _ => None, + }; + + let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx()); + if let Some(cmp_op) = comparison { + require_simd!(ret_ty, "return"); + + let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx()); + require!( + in_len == out_len, + InvalidMonomorphizationReturnLengthInputType { + span, + name, + in_len, + in_ty, + ret_ty, + out_len + } + ); + require!( + bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer, + InvalidMonomorphizationReturnIntegerType { span, name, ret_ty, out_ty } + ); + + return Ok(compare_simd_types( + bx, + args[0].immediate(), + args[1].immediate(), + in_elem, + llret_ty, + cmp_op, + )); + } + + if let Some(stripped) = name.as_str().strip_prefix("simd_shuffle") { + let n: u64 = if stripped.is_empty() { + // Make sure this is actually an array, since typeck only checks the length-suffixed + // version of this intrinsic. + match args[2].layout.ty.kind() { + ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => { + len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else( + || span_bug!(span, "could not evaluate shuffle index array length"), + ) + } + _ => return_error!(InvalidMonomorphizationSimdShuffle { + span, + name, + ty: args[2].layout.ty + }), + } + } else { + stripped.parse().unwrap_or_else(|_| { + span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?") + }) + }; + + require_simd!(ret_ty, "return"); + + let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx()); + require!( + out_len == n, + InvalidMonomorphizationReturnLength { span, name, in_len: n, ret_ty, out_len } + ); + require!( + in_elem == out_ty, + InvalidMonomorphizationReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty } + ); + + let vector = args[2].immediate(); + + return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), vector)); + } + + #[cfg(feature = "master")] + if name == sym::simd_insert { + require!( + in_elem == arg_tys[2], + InvalidMonomorphizationInsertedType { span, name, in_elem, in_ty, out_ty: arg_tys[2] } + ); + let vector = args[0].immediate(); + let index = args[1].immediate(); + let value = args[2].immediate(); + let variable = bx.current_func().new_local(None, vector.get_type(), "new_vector"); + bx.llbb().add_assignment(None, variable, vector); + let lvalue = bx.context.new_vector_access(None, variable.to_rvalue(), index); + // TODO(antoyo): if simd_insert is constant, use BIT_REF. + bx.llbb().add_assignment(None, lvalue, value); + return Ok(variable.to_rvalue()); + } + + #[cfg(feature = "master")] + if name == sym::simd_extract { + require!( + ret_ty == in_elem, + InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty } + ); + let vector = args[0].immediate(); + return Ok(bx.context.new_vector_access(None, vector, args[1].immediate()).to_rvalue()); + } + + if name == sym::simd_select { + let m_elem_ty = in_elem; + let m_len = in_len; + require_simd!(arg_tys[1], "argument"); + let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); + require!( + m_len == v_len, + InvalidMonomorphizationMismatchedLengths { span, name, m_len, v_len } + ); + match m_elem_ty.kind() { + ty::Int(_) => {} + _ => return_error!(InvalidMonomorphizationMaskType { span, name, ty: m_elem_ty }), + } + return Ok(bx.vector_select(args[0].immediate(), args[1].immediate(), args[2].immediate())); + } + + #[cfg(feature="master")] + if name == sym::simd_cast || name == sym::simd_as { + require_simd!(ret_ty, "return"); + let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); + require!( + in_len == out_len, + InvalidMonomorphizationReturnLengthInputType { + span, + name, + in_len, + in_ty, + ret_ty, + out_len + } + ); + // casting cares about nominal type, not just structural type + if in_elem == out_elem { + return Ok(args[0].immediate()); + } + + enum Style { + Float, + Int, + Unsupported, + } + + let in_style = + match in_elem.kind() { + ty::Int(_) | ty::Uint(_) => Style::Int, + ty::Float(_) => Style::Float, + _ => Style::Unsupported, + }; + + let out_style = + match out_elem.kind() { + ty::Int(_) | ty::Uint(_) => Style::Int, + ty::Float(_) => Style::Float, + _ => Style::Unsupported, + }; + + match (in_style, out_style) { + (Style::Unsupported, Style::Unsupported) => { + require!( + false, + InvalidMonomorphization::UnsupportedCast { + span, + name, + in_ty, + in_elem, + ret_ty, + out_elem + } + ); + }, + _ => return Ok(bx.context.convert_vector(None, args[0].immediate(), llret_ty)), + } + } + + macro_rules! arith_binary { + ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => { + $(if name == sym::$name { + match in_elem.kind() { + $($(ty::$p(_))|* => { + return Ok(bx.$call(args[0].immediate(), args[1].immediate())) + })* + _ => {}, + } + return_error!(InvalidMonomorphizationUnsupportedOperation { span, name, in_ty, in_elem }) + })* + } + } + + if name == sym::simd_bitmask { + // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a + // vector mask and returns the most significant bit (MSB) of each lane in the form + // of either: + // * an unsigned integer + // * an array of `u8` + // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits. + // + // The bit order of the result depends on the byte endianness, LSB-first for little + // endian and MSB-first for big endian. + + let vector = args[0].immediate(); + let vector_type = vector.get_type().dyncast_vector().expect("vector type"); + let elem_type = vector_type.get_element_type(); + + let expected_int_bits = in_len.max(8); + let expected_bytes = expected_int_bits / 8 + ((expected_int_bits % 8 > 0) as u64); + + // FIXME(antoyo): that's not going to work for masks bigger than 128 bits. + let result_type = bx.type_ix(expected_int_bits); + let mut result = bx.context.new_rvalue_zero(result_type); + + let elem_size = elem_type.get_size() * 8; + let sign_shift = bx.context.new_rvalue_from_int(elem_type, elem_size as i32 - 1); + let one = bx.context.new_rvalue_one(elem_type); + + let mut shift = 0; + for i in 0..in_len { + let elem = bx.extract_element(vector, bx.context.new_rvalue_from_int(bx.int_type, i as i32)); + let shifted = elem >> sign_shift; + let masked = shifted & one; + result = result | (bx.context.new_cast(None, masked, result_type) << bx.context.new_rvalue_from_int(result_type, shift)); + shift += 1; + } + + match ret_ty.kind() { + ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => { + // Zero-extend iN to the bitmask type: + return Ok(result); + } + ty::Array(elem, len) + if matches!(elem.kind(), ty::Uint(ty::UintTy::U8)) + && len.try_eval_target_usize(bx.tcx, ty::ParamEnv::reveal_all()) + == Some(expected_bytes) => + { + // Zero-extend iN to the array length: + let ze = bx.zext(result, bx.type_ix(expected_bytes * 8)); + + // Convert the integer to a byte array + let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE); + bx.store(ze, ptr, Align::ONE); + let array_ty = bx.type_array(bx.type_i8(), expected_bytes); + let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty)); + return Ok(bx.load(array_ty, ptr, Align::ONE)); + } + _ => return_error!(InvalidMonomorphization::CannotReturn { + span, + name, + ret_ty, + expected_int_bits, + expected_bytes + }), + } + } + + fn simd_simple_float_intrinsic<'gcc, 'tcx>( + name: Symbol, + in_elem: Ty<'_>, + in_ty: Ty<'_>, + in_len: u64, + bx: &mut Builder<'_, 'gcc, 'tcx>, + span: Span, + args: &[OperandRef<'tcx, RValue<'gcc>>], + ) -> Result<RValue<'gcc>, ()> { + macro_rules! return_error { + ($err:expr) => {{ + bx.sess().emit_err($err); + return Err(()); + }}; + } + let (elem_ty_str, elem_ty) = + if let ty::Float(f) = in_elem.kind() { + let elem_ty = bx.cx.type_float_from_ty(*f); + match f.bit_width() { + 32 => ("f", elem_ty), + 64 => ("", elem_ty), + _ => { + return_error!(InvalidMonomorphizationInvalidFloatVector { span, name, elem_ty: f.name_str(), vec_ty: in_ty }); + } + } + } + else { + return_error!(InvalidMonomorphizationNotFloat { span, name, ty: in_ty }); + }; + + let vec_ty = bx.cx.type_vector(elem_ty, in_len); + + let intr_name = + match name { + sym::simd_ceil => "ceil", + sym::simd_fabs => "fabs", // TODO(antoyo): pand with 170141183420855150465331762880109871103 + sym::simd_fcos => "cos", + sym::simd_fexp2 => "exp2", + sym::simd_fexp => "exp", + sym::simd_flog10 => "log10", + sym::simd_flog2 => "log2", + sym::simd_flog => "log", + sym::simd_floor => "floor", + sym::simd_fma => "fma", + sym::simd_fpowi => "__builtin_powi", + sym::simd_fpow => "pow", + sym::simd_fsin => "sin", + sym::simd_fsqrt => "sqrt", + sym::simd_round => "round", + sym::simd_trunc => "trunc", + _ => return_error!(InvalidMonomorphizationUnrecognized { span, name }) + }; + let builtin_name = format!("{}{}", intr_name, elem_ty_str); + let funcs = bx.cx.functions.borrow(); + let function = funcs.get(&builtin_name).unwrap_or_else(|| panic!("unable to find builtin function {}", builtin_name)); + + // TODO(antoyo): add platform-specific behavior here for architectures that have these + // intrinsics as instructions (for instance, gpus) + let mut vector_elements = vec![]; + for i in 0..in_len { + let index = bx.context.new_rvalue_from_long(bx.ulong_type, i as i64); + // we have to treat fpowi specially, since fpowi's second argument is always an i32 + let arguments = if name == sym::simd_fpowi { + vec![ + bx.extract_element(args[0].immediate(), index).to_rvalue(), + args[1].immediate(), + ] + } else { + args.iter() + .map(|arg| bx.extract_element(arg.immediate(), index).to_rvalue()) + .collect() + }; + vector_elements.push(bx.context.new_call(None, *function, &arguments)); + } + let c = bx.context.new_rvalue_from_vector(None, vec_ty, &vector_elements); + Ok(c) + } + + if std::matches!( + name, + sym::simd_ceil + | sym::simd_fabs + | sym::simd_fcos + | sym::simd_fexp2 + | sym::simd_fexp + | sym::simd_flog10 + | sym::simd_flog2 + | sym::simd_flog + | sym::simd_floor + | sym::simd_fma + | sym::simd_fpow + | sym::simd_fpowi + | sym::simd_fsin + | sym::simd_fsqrt + | sym::simd_round + | sym::simd_trunc + ) { + return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args); + } + + #[cfg(feature="master")] + fn vector_ty<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, elem_ty: Ty<'tcx>, vec_len: u64) -> Type<'gcc> { + // FIXME: use cx.layout_of(ty).llvm_type() ? + let elem_ty = match *elem_ty.kind() { + ty::Int(v) => cx.type_int_from_ty(v), + ty::Uint(v) => cx.type_uint_from_ty(v), + ty::Float(v) => cx.type_float_from_ty(v), + _ => unreachable!(), + }; + cx.type_vector(elem_ty, vec_len) + } + + #[cfg(feature="master")] + fn gather<'a, 'gcc, 'tcx>(default: RValue<'gcc>, pointers: RValue<'gcc>, mask: RValue<'gcc>, pointer_count: usize, bx: &mut Builder<'a, 'gcc, 'tcx>, in_len: u64, underlying_ty: Ty<'tcx>, invert: bool) -> RValue<'gcc> { + let vector_type = + if pointer_count > 1 { + bx.context.new_vector_type(bx.usize_type, in_len) + } + else { + vector_ty(bx, underlying_ty, in_len) + }; + let elem_type = vector_type.dyncast_vector().expect("vector type").get_element_type(); + + let mut values = vec![]; + for i in 0..in_len { + let index = bx.context.new_rvalue_from_long(bx.i32_type, i as i64); + let int = bx.context.new_vector_access(None, pointers, index).to_rvalue(); + + let ptr_type = elem_type.make_pointer(); + let ptr = bx.context.new_bitcast(None, int, ptr_type); + let value = ptr.dereference(None).to_rvalue(); + values.push(value); + } + + let vector = bx.context.new_rvalue_from_vector(None, vector_type, &values); + + let mut mask_types = vec![]; + let mut mask_values = vec![]; + for i in 0..in_len { + let index = bx.context.new_rvalue_from_long(bx.i32_type, i as i64); + mask_types.push(bx.context.new_field(None, bx.i32_type, "m")); + let mask_value = bx.context.new_vector_access(None, mask, index).to_rvalue(); + let masked = bx.context.new_rvalue_from_int(bx.i32_type, in_len as i32) & mask_value; + let value = index + masked; + mask_values.push(value); + } + let mask_type = bx.context.new_struct_type(None, "mask_type", &mask_types); + let mask = bx.context.new_struct_constructor(None, mask_type.as_type(), None, &mask_values); + + if invert { + bx.shuffle_vector(vector, default, mask) + } + else { + bx.shuffle_vector(default, vector, mask) + } + } + + #[cfg(feature="master")] + if name == sym::simd_gather { + // simd_gather(values: <N x T>, pointers: <N x *_ T>, + // mask: <N x i{M}>) -> <N x T> + // * N: number of elements in the input vectors + // * T: type of the element to load + // * M: any integer width is supported, will be truncated to i1 + + // All types must be simd vector types + require_simd!(in_ty, "first"); + require_simd!(arg_tys[1], "second"); + require_simd!(arg_tys[2], "third"); + require_simd!(ret_ty, "return"); + + // Of the same length: + let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx()); + require!( + in_len == out_len, + InvalidMonomorphization::SecondArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[1], + out_len + } + ); + require!( + in_len == out_len2, + InvalidMonomorphization::ThirdArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[2], + out_len: out_len2 + } + ); + + // The return type must match the first argument type + require!( + ret_ty == in_ty, + InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty } + ); + + // This counts how many pointers + fn ptr_count(t: Ty<'_>) -> usize { + match t.kind() { + ty::RawPtr(p) => 1 + ptr_count(p.ty), + _ => 0, + } + } + + // Non-ptr type + fn non_ptr(t: Ty<'_>) -> Ty<'_> { + match t.kind() { + ty::RawPtr(p) => non_ptr(p.ty), + _ => t, + } + } + + // The second argument must be a simd vector with an element type that's a pointer + // to the element type of the first argument + let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); + let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (pointer_count, underlying_ty) = match element_ty1.kind() { + ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)), + _ => { + require!( + false, + InvalidMonomorphization::ExpectedElementType { + span, + name, + expected_element: element_ty1, + second_arg: arg_tys[1], + in_elem, + in_ty, + mutability: ExpectedPointerMutability::Not, + } + ); + unreachable!(); + } + }; + assert!(pointer_count > 0); + assert_eq!(pointer_count - 1, ptr_count(element_ty0)); + assert_eq!(underlying_ty, non_ptr(element_ty0)); + + // The element type of the third argument must be a signed integer type of any width: + let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); + match element_ty2.kind() { + ty::Int(_) => (), + _ => { + require!( + false, + InvalidMonomorphization::ThirdArgElementType { + span, + name, + expected_element: element_ty2, + third_arg: arg_tys[2] + } + ); + } + } + + return Ok(gather(args[0].immediate(), args[1].immediate(), args[2].immediate(), pointer_count, bx, in_len, underlying_ty, false)); + } + + #[cfg(feature="master")] + if name == sym::simd_scatter { + // simd_scatter(values: <N x T>, pointers: <N x *mut T>, + // mask: <N x i{M}>) -> () + // * N: number of elements in the input vectors + // * T: type of the element to load + // * M: any integer width is supported, will be truncated to i1 + + // All types must be simd vector types + require_simd!(in_ty, "first"); + require_simd!(arg_tys[1], "second"); + require_simd!(arg_tys[2], "third"); + + // Of the same length: + let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx()); + require!( + in_len == element_len1, + InvalidMonomorphization::SecondArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[1], + out_len: element_len1 + } + ); + require!( + in_len == element_len2, + InvalidMonomorphization::ThirdArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[2], + out_len: element_len2 + } + ); + + // This counts how many pointers + fn ptr_count(t: Ty<'_>) -> usize { + match t.kind() { + ty::RawPtr(p) => 1 + ptr_count(p.ty), + _ => 0, + } + } + + // Non-ptr type + fn non_ptr(t: Ty<'_>) -> Ty<'_> { + match t.kind() { + ty::RawPtr(p) => non_ptr(p.ty), + _ => t, + } + } + + // The second argument must be a simd vector with an element type that's a pointer + // to the element type of the first argument + let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); + let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); + let (pointer_count, underlying_ty) = match element_ty1.kind() { + ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => { + (ptr_count(element_ty1), non_ptr(element_ty1)) + } + _ => { + require!( + false, + InvalidMonomorphization::ExpectedElementType { + span, + name, + expected_element: element_ty1, + second_arg: arg_tys[1], + in_elem, + in_ty, + mutability: ExpectedPointerMutability::Mut, + } + ); + unreachable!(); + } + }; + assert!(pointer_count > 0); + assert_eq!(pointer_count - 1, ptr_count(element_ty0)); + assert_eq!(underlying_ty, non_ptr(element_ty0)); + + // The element type of the third argument must be a signed integer type of any width: + match element_ty2.kind() { + ty::Int(_) => (), + _ => { + require!( + false, + InvalidMonomorphization::ThirdArgElementType { + span, + name, + expected_element: element_ty2, + third_arg: arg_tys[2] + } + ); + } + } + + let result = gather(args[0].immediate(), args[1].immediate(), args[2].immediate(), pointer_count, bx, in_len, underlying_ty, true); + + let pointers = args[1].immediate(); + + let vector_type = + if pointer_count > 1 { + bx.context.new_vector_type(bx.usize_type, in_len) + } + else { + vector_ty(bx, underlying_ty, in_len) + }; + let elem_type = vector_type.dyncast_vector().expect("vector type").get_element_type(); + + for i in 0..in_len { + let index = bx.context.new_rvalue_from_int(bx.int_type, i as i32); + let value = bx.context.new_vector_access(None, result, index); + + let int = bx.context.new_vector_access(None, pointers, index).to_rvalue(); + let ptr_type = elem_type.make_pointer(); + let ptr = bx.context.new_bitcast(None, int, ptr_type); + bx.llbb().add_assignment(None, ptr.dereference(None), value); + } + + return Ok(bx.context.new_rvalue_zero(bx.i32_type)); + } + + arith_binary! { + simd_add: Uint, Int => add, Float => fadd; + simd_sub: Uint, Int => sub, Float => fsub; + simd_mul: Uint, Int => mul, Float => fmul; + simd_div: Uint => udiv, Int => sdiv, Float => fdiv; + simd_rem: Uint => urem, Int => srem, Float => frem; + simd_shl: Uint, Int => shl; + simd_shr: Uint => lshr, Int => ashr; + simd_and: Uint, Int => and; + simd_or: Uint, Int => or; // FIXME(antoyo): calling `or` might not work on vectors. + simd_xor: Uint, Int => xor; + simd_fmin: Float => vector_fmin; + simd_fmax: Float => vector_fmax; + } + + macro_rules! arith_unary { + ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => { + $(if name == sym::$name { + match in_elem.kind() { + $($(ty::$p(_))|* => { + return Ok(bx.$call(args[0].immediate())) + })* + _ => {}, + } + return_error!(InvalidMonomorphizationUnsupportedOperation { span, name, in_ty, in_elem }) + })* + } + } + + arith_unary! { + simd_neg: Int => neg, Float => fneg; + } + + #[cfg(feature = "master")] + if name == sym::simd_saturating_add || name == sym::simd_saturating_sub { + let lhs = args[0].immediate(); + let rhs = args[1].immediate(); + let is_add = name == sym::simd_saturating_add; + let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _; + let (signed, elem_width, elem_ty) = + match *in_elem.kind() { + ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_int_from_ty(i)), + ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_uint_from_ty(i)), + _ => { + return_error!(InvalidMonomorphizationExpectedSignedUnsigned { + span, + name, + elem_ty: arg_tys[0].simd_size_and_type(bx.tcx()).1, + vec_ty: arg_tys[0], + }); + } + }; + + let result = + match (signed, is_add) { + (false, true) => { + let res = lhs + rhs; + let cmp = bx.context.new_comparison(None, ComparisonOp::LessThan, res, lhs); + res | cmp + }, + (true, true) => { + // Algorithm from: https://codereview.stackexchange.com/questions/115869/saturated-signed-addition + // TODO(antoyo): improve using conditional operators if possible. + let arg_type = lhs.get_type(); + // TODO(antoyo): convert lhs and rhs to unsigned. + let sum = lhs + rhs; + let vector_type = arg_type.dyncast_vector().expect("vector type"); + let unit = vector_type.get_num_units(); + let a = bx.context.new_rvalue_from_int(elem_ty, ((elem_width as i32) << 3) - 1); + let width = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![a; unit]); + + let xor1 = lhs ^ rhs; + let xor2 = lhs ^ sum; + let and = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, xor1) & xor2; + let mask = and >> width; + + let one = bx.context.new_rvalue_one(elem_ty); + let ones = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![one; unit]); + let shift1 = ones << width; + let shift2 = sum >> width; + let mask_min = shift1 ^ shift2; + + let and1 = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, mask) & sum; + let and2 = mask & mask_min; + + and1 + and2 + }, + (false, false) => { + let res = lhs - rhs; + let cmp = bx.context.new_comparison(None, ComparisonOp::LessThanEquals, res, lhs); + res & cmp + }, + (true, false) => { + let arg_type = lhs.get_type(); + // TODO(antoyo): this uses the same algorithm from saturating add, but add the + // negative of the right operand. Find a proper subtraction algorithm. + let rhs = bx.context.new_unary_op(None, UnaryOp::Minus, arg_type, rhs); + + // TODO(antoyo): convert lhs and rhs to unsigned. + let sum = lhs + rhs; + let vector_type = arg_type.dyncast_vector().expect("vector type"); + let unit = vector_type.get_num_units(); + let a = bx.context.new_rvalue_from_int(elem_ty, ((elem_width as i32) << 3) - 1); + let width = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![a; unit]); + + let xor1 = lhs ^ rhs; + let xor2 = lhs ^ sum; + let and = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, xor1) & xor2; + let mask = and >> width; + + let one = bx.context.new_rvalue_one(elem_ty); + let ones = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![one; unit]); + let shift1 = ones << width; + let shift2 = sum >> width; + let mask_min = shift1 ^ shift2; + + let and1 = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, mask) & sum; + let and2 = mask & mask_min; + + and1 + and2 + } + }; + + return Ok(result); + } + + macro_rules! arith_red { + ($name:ident : $vec_op:expr, $float_reduce:ident, $ordered:expr, $op:ident, + $identity:expr) => { + if name == sym::$name { + require!( + ret_ty == in_elem, + InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty } + ); + return match in_elem.kind() { + ty::Int(_) | ty::Uint(_) => { + let r = bx.vector_reduce_op(args[0].immediate(), $vec_op); + if $ordered { + // if overflow occurs, the result is the + // mathematical result modulo 2^n: + Ok(bx.$op(args[1].immediate(), r)) + } else { + Ok(bx.vector_reduce_op(args[0].immediate(), $vec_op)) + } + } + ty::Float(_) => { + if $ordered { + // ordered arithmetic reductions take an accumulator + let acc = args[1].immediate(); + Ok(bx.$float_reduce(acc, args[0].immediate())) + } else { + Ok(bx.vector_reduce_op(args[0].immediate(), $vec_op)) + } + } + _ => return_error!(InvalidMonomorphizationUnsupportedElement { + span, + name, + in_ty, + elem_ty: in_elem, + ret_ty + }), + }; + } + }; + } + + arith_red!( + simd_reduce_add_unordered: BinaryOp::Plus, + vector_reduce_fadd_fast, + false, + add, + 0.0 // TODO: Use this argument. + ); + arith_red!( + simd_reduce_mul_unordered: BinaryOp::Mult, + vector_reduce_fmul_fast, + false, + mul, + 1.0 + ); + arith_red!( + simd_reduce_add_ordered: BinaryOp::Plus, + vector_reduce_fadd, + true, + add, + 0.0 + ); + arith_red!( + simd_reduce_mul_ordered: BinaryOp::Mult, + vector_reduce_fmul, + true, + mul, + 1.0 + ); + + + macro_rules! minmax_red { + ($name:ident: $int_red:ident, $float_red:ident) => { + if name == sym::$name { + require!( + ret_ty == in_elem, + InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty } + ); + return match in_elem.kind() { + ty::Int(_) | ty::Uint(_) => Ok(bx.$int_red(args[0].immediate())), + ty::Float(_) => Ok(bx.$float_red(args[0].immediate())), + _ => return_error!(InvalidMonomorphizationUnsupportedElement { span, name, in_ty, elem_ty: in_elem, ret_ty }), + }; + } + }; + } + + minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin); + minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax); + // TODO(sadlerap): revisit these intrinsics to generate more optimal reductions + minmax_red!(simd_reduce_min_nanless: vector_reduce_min, vector_reduce_fmin); + minmax_red!(simd_reduce_max_nanless: vector_reduce_max, vector_reduce_fmax); + + macro_rules! bitwise_red { + ($name:ident : $op:expr, $boolean:expr) => { + if name == sym::$name { + let input = if !$boolean { + require!( + ret_ty == in_elem, + InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty } + ); + args[0].immediate() + } else { + match in_elem.kind() { + ty::Int(_) | ty::Uint(_) => {} + _ => return_error!(InvalidMonomorphizationUnsupportedElement { + span, + name, + in_ty, + elem_ty: in_elem, + ret_ty + }), + } + + args[0].immediate() + }; + return match in_elem.kind() { + ty::Int(_) | ty::Uint(_) => { + let r = bx.vector_reduce_op(input, $op); + Ok(if !$boolean { r } else { bx.icmp(IntPredicate::IntNE, r, bx.context.new_rvalue_zero(r.get_type())) }) + } + _ => return_error!(InvalidMonomorphizationUnsupportedElement { + span, + name, + in_ty, + elem_ty: in_elem, + ret_ty + }), + }; + } + }; + } + + bitwise_red!(simd_reduce_and: BinaryOp::BitwiseAnd, false); + bitwise_red!(simd_reduce_or: BinaryOp::BitwiseOr, false); + bitwise_red!(simd_reduce_xor: BinaryOp::BitwiseXor, false); + bitwise_red!(simd_reduce_all: BinaryOp::BitwiseAnd, true); + bitwise_red!(simd_reduce_any: BinaryOp::BitwiseOr, true); + + unimplemented!("simd {}", name); +} diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs new file mode 100644 index 00000000000..1b7feb5f8a1 --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/lib.rs @@ -0,0 +1,340 @@ +/* + * TODO(antoyo): implement equality in libgccjit based on https://zpz.github.io/blog/overloading-equality-operator-in-cpp-class-hierarchy/ (for type equality?) + * TODO(antoyo): support #[inline] attributes. + * TODO(antoyo): support LTO (gcc's equivalent to Full LTO is -flto -flto-partition=one — https://documentation.suse.com/sbp/all/html/SBP-GCC-10/index.html). + * + * TODO(antoyo): remove the patches. + */ + +#![feature( + rustc_private, + decl_macro, + associated_type_bounds, + never_type, + trusted_len, + hash_raw_entry +)] +#![allow(broken_intra_doc_links)] +#![recursion_limit="256"] +#![warn(rust_2018_idioms)] +#![warn(unused_lifetimes)] +#![deny(rustc::untranslatable_diagnostic)] +#![deny(rustc::diagnostic_outside_of_impl)] + +extern crate rustc_apfloat; +extern crate rustc_ast; +extern crate rustc_attr; +extern crate rustc_codegen_ssa; +extern crate rustc_data_structures; +extern crate rustc_errors; +extern crate rustc_hir; +extern crate rustc_macros; +extern crate rustc_metadata; +extern crate rustc_middle; +extern crate rustc_session; +extern crate rustc_span; +extern crate rustc_target; +extern crate tempfile; + +// This prevents duplicating functions and statics that are already part of the host rustc process. +#[allow(unused_extern_crates)] +extern crate rustc_driver; + +mod abi; +mod allocator; +mod archive; +mod asm; +mod attributes; +mod back; +mod base; +mod builder; +mod callee; +mod common; +mod consts; +mod context; +mod coverageinfo; +mod debuginfo; +mod declare; +mod errors; +mod int; +mod intrinsic; +mod mono_item; +mod type_; +mod type_of; + +use std::any::Any; +use std::sync::{Arc, Mutex}; + +use crate::errors::LTONotSupported; +use gccjit::{Context, OptimizationLevel, CType}; +use rustc_ast::expand::allocator::AllocatorKind; +use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen}; +use rustc_codegen_ssa::base::codegen_crate; +use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryFn}; +use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule}; +use rustc_codegen_ssa::target_features::supported_target_features; +use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods}; +use rustc_data_structures::fx::FxHashMap; +use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, Handler, SubdiagnosticMessage}; +use rustc_macros::fluent_messages; +use rustc_metadata::EncodedMetadata; +use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; +use rustc_middle::ty::TyCtxt; +use rustc_middle::ty::query::Providers; +use rustc_session::config::{Lto, OptLevel, OutputFilenames}; +use rustc_session::Session; +use rustc_span::Symbol; +use rustc_span::fatal_error::FatalError; +use tempfile::TempDir; + +fluent_messages! { "../locales/en-US.ftl" } + +pub struct PrintOnPanic<F: Fn() -> String>(pub F); + +impl<F: Fn() -> String> Drop for PrintOnPanic<F> { + fn drop(&mut self) { + if ::std::thread::panicking() { + println!("{}", (self.0)()); + } + } +} + +#[derive(Clone)] +pub struct GccCodegenBackend { + supports_128bit_integers: Arc<Mutex<bool>>, +} + +impl CodegenBackend for GccCodegenBackend { + fn locale_resource(&self) -> &'static str { + crate::DEFAULT_LOCALE_RESOURCE + } + + fn init(&self, sess: &Session) { + if sess.lto() != Lto::No { + sess.emit_warning(LTONotSupported {}); + } + + let temp_dir = TempDir::new().expect("cannot create temporary directory"); + let temp_file = temp_dir.into_path().join("result.asm"); + let check_context = Context::default(); + check_context.set_print_errors_to_stderr(false); + let _int128_ty = check_context.new_c_type(CType::UInt128t); + // NOTE: we cannot just call compile() as this would require other files than libgccjit.so. + check_context.compile_to_file(gccjit::OutputKind::Assembler, temp_file.to_str().expect("path to str")); + *self.supports_128bit_integers.lock().expect("lock") = check_context.get_last_error() == Ok(None); + } + + fn provide(&self, providers: &mut Providers) { + // FIXME(antoyo) compute list of enabled features from cli flags + providers.global_backend_features = |_tcx, ()| vec![]; + } + + fn codegen_crate<'tcx>(&self, tcx: TyCtxt<'tcx>, metadata: EncodedMetadata, need_metadata_module: bool) -> Box<dyn Any> { + let target_cpu = target_cpu(tcx.sess); + let res = codegen_crate(self.clone(), tcx, target_cpu.to_string(), metadata, need_metadata_module); + + Box::new(res) + } + + fn join_codegen(&self, ongoing_codegen: Box<dyn Any>, sess: &Session, _outputs: &OutputFilenames) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed> { + let (codegen_results, work_products) = ongoing_codegen + .downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<GccCodegenBackend>>() + .expect("Expected GccCodegenBackend's OngoingCodegen, found Box<Any>") + .join(sess); + + Ok((codegen_results, work_products)) + } + + fn link(&self, sess: &Session, codegen_results: CodegenResults, outputs: &OutputFilenames) -> Result<(), ErrorGuaranteed> { + use rustc_codegen_ssa::back::link::link_binary; + + link_binary( + sess, + &crate::archive::ArArchiveBuilderBuilder, + &codegen_results, + outputs, + ) + } + + fn target_features(&self, sess: &Session, allow_unstable: bool) -> Vec<Symbol> { + target_features(sess, allow_unstable) + } +} + +impl ExtraBackendMethods for GccCodegenBackend { + fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, module_name: &str, kind: AllocatorKind, alloc_error_handler_kind: AllocatorKind) -> Self::Module { + let mut mods = GccContext { + context: Context::default(), + }; + unsafe { allocator::codegen(tcx, &mut mods, module_name, kind, alloc_error_handler_kind); } + mods + } + + fn compile_codegen_unit(&self, tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen<Self::Module>, u64) { + base::compile_codegen_unit(tcx, cgu_name, *self.supports_128bit_integers.lock().expect("lock")) + } + + fn target_machine_factory(&self, _sess: &Session, _opt_level: OptLevel, _features: &[String]) -> TargetMachineFactoryFn<Self> { + // TODO(antoyo): set opt level. + Arc::new(|_| { + Ok(()) + }) + } +} + +pub struct ModuleBuffer; + +impl ModuleBufferMethods for ModuleBuffer { + fn data(&self) -> &[u8] { + unimplemented!(); + } +} + +pub struct ThinBuffer; + +impl ThinBufferMethods for ThinBuffer { + fn data(&self) -> &[u8] { + unimplemented!(); + } +} + +pub struct GccContext { + context: Context<'static>, +} + +unsafe impl Send for GccContext {} +// FIXME(antoyo): that shouldn't be Sync. Parallel compilation is currently disabled with "-Zno-parallel-llvm". Try to disable it here. +unsafe impl Sync for GccContext {} + +impl WriteBackendMethods for GccCodegenBackend { + type Module = GccContext; + type TargetMachine = (); + type TargetMachineError = (); + type ModuleBuffer = ModuleBuffer; + type ThinData = (); + type ThinBuffer = ThinBuffer; + + fn run_fat_lto(_cgcx: &CodegenContext<Self>, mut modules: Vec<FatLTOInput<Self>>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<LtoModuleCodegen<Self>, FatalError> { + // TODO(antoyo): implement LTO by sending -flto to libgccjit and adding the appropriate gcc linker plugins. + // NOTE: implemented elsewhere. + // TODO(antoyo): what is implemented elsewhere ^ ? + let module = + match modules.remove(0) { + FatLTOInput::InMemory(module) => module, + FatLTOInput::Serialized { .. } => { + unimplemented!(); + } + }; + Ok(LtoModuleCodegen::Fat { module, _serialized_bitcode: vec![] }) + } + + fn run_thin_lto(_cgcx: &CodegenContext<Self>, _modules: Vec<(String, Self::ThinBuffer)>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> { + unimplemented!(); + } + + fn print_pass_timings(&self) { + unimplemented!(); + } + + unsafe fn optimize(_cgcx: &CodegenContext<Self>, _diag_handler: &Handler, module: &ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<(), FatalError> { + module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level)); + Ok(()) + } + + fn optimize_fat(_cgcx: &CodegenContext<Self>, _module: &mut ModuleCodegen<Self::Module>) -> Result<(), FatalError> { + // TODO(antoyo) + Ok(()) + } + + unsafe fn optimize_thin(_cgcx: &CodegenContext<Self>, _thin: ThinModule<Self>) -> Result<ModuleCodegen<Self::Module>, FatalError> { + unimplemented!(); + } + + unsafe fn codegen(cgcx: &CodegenContext<Self>, diag_handler: &Handler, module: ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<CompiledModule, FatalError> { + back::write::codegen(cgcx, diag_handler, module, config) + } + + fn prepare_thin(_module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) { + unimplemented!(); + } + + fn serialize_module(_module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) { + unimplemented!(); + } + + fn run_link(cgcx: &CodegenContext<Self>, diag_handler: &Handler, modules: Vec<ModuleCodegen<Self::Module>>) -> Result<ModuleCodegen<Self::Module>, FatalError> { + back::write::link(cgcx, diag_handler, modules) + } +} + +/// This is the entrypoint for a hot plugged rustc_codegen_gccjit +#[no_mangle] +pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> { + Box::new(GccCodegenBackend { + supports_128bit_integers: Arc::new(Mutex::new(false)), + }) +} + +fn to_gcc_opt_level(optlevel: Option<OptLevel>) -> OptimizationLevel { + match optlevel { + None => OptimizationLevel::None, + Some(level) => { + match level { + OptLevel::No => OptimizationLevel::None, + OptLevel::Less => OptimizationLevel::Limited, + OptLevel::Default => OptimizationLevel::Standard, + OptLevel::Aggressive => OptimizationLevel::Aggressive, + OptLevel::Size | OptLevel::SizeMin => OptimizationLevel::Limited, + } + }, + } +} + +fn handle_native(name: &str) -> &str { + if name != "native" { + return name; + } + + unimplemented!(); +} + +pub fn target_cpu(sess: &Session) -> &str { + match sess.opts.cg.target_cpu { + Some(ref name) => handle_native(name), + None => handle_native(sess.target.cpu.as_ref()), + } +} + +pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> { + supported_target_features(sess) + .iter() + .filter_map( + |&(feature, gate)| { + if sess.is_nightly_build() || allow_unstable || gate.is_none() { Some(feature) } else { None } + }, + ) + .filter(|_feature| { + // TODO(antoyo): implement a way to get enabled feature in libgccjit. + // Probably using the equivalent of __builtin_cpu_supports. + // TODO(antoyo): maybe use whatever outputs the following command: + // gcc -march=native -Q --help=target + #[cfg(feature="master")] + { + // NOTE: the CPU in the CI doesn't support sse4a, so disable it to make the stdarch tests pass in the CI. + (_feature.contains("sse") || _feature.contains("avx")) && !_feature.contains("avx512") && !_feature.contains("sse4a") + } + #[cfg(not(feature="master"))] + { + false + } + /* + adx, aes, avx, avx2, avx512bf16, avx512bitalg, avx512bw, avx512cd, avx512dq, avx512er, avx512f, avx512ifma, + avx512pf, avx512vbmi, avx512vbmi2, avx512vl, avx512vnni, avx512vp2intersect, avx512vpopcntdq, + bmi1, bmi2, cmpxchg16b, ermsb, f16c, fma, fxsr, gfni, lzcnt, movbe, pclmulqdq, popcnt, rdrand, rdseed, rtm, + sha, sse, sse2, sse3, sse4.1, sse4.2, sse4a, ssse3, tbm, vaes, vpclmulqdq, xsave, xsavec, xsaveopt, xsaves + */ + //false + }) + .map(|feature| Symbol::intern(feature)) + .collect() +} diff --git a/compiler/rustc_codegen_gcc/src/mono_item.rs b/compiler/rustc_codegen_gcc/src/mono_item.rs new file mode 100644 index 00000000000..c1f6340866c --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/mono_item.rs @@ -0,0 +1,66 @@ +#[cfg(feature="master")] +use gccjit::{VarAttribute, FnAttribute}; +use rustc_codegen_ssa::traits::PreDefineMethods; +use rustc_hir::def_id::{DefId, LOCAL_CRATE}; +use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; +use rustc_middle::mir::mono::{Linkage, Visibility}; +use rustc_middle::ty::{self, Instance, TypeVisitableExt}; +use rustc_middle::ty::layout::{FnAbiOf, LayoutOf}; + +use crate::attributes; +use crate::base; +use crate::context::CodegenCx; +use crate::type_of::LayoutGccExt; + +impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> { + #[cfg_attr(not(feature="master"), allow(unused_variables))] + fn predefine_static(&self, def_id: DefId, _linkage: Linkage, visibility: Visibility, symbol_name: &str) { + let attrs = self.tcx.codegen_fn_attrs(def_id); + let instance = Instance::mono(self.tcx, def_id); + let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); + let gcc_type = self.layout_of(ty).gcc_type(self); + + let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); + let global = self.define_global(symbol_name, gcc_type, is_tls, attrs.link_section); + #[cfg(feature="master")] + global.add_attribute(VarAttribute::Visibility(base::visibility_to_gcc(visibility))); + + // TODO(antoyo): set linkage. + self.instances.borrow_mut().insert(instance, global); + } + + #[cfg_attr(not(feature="master"), allow(unused_variables))] + fn predefine_fn(&self, instance: Instance<'tcx>, linkage: Linkage, visibility: Visibility, symbol_name: &str) { + assert!(!instance.substs.needs_infer()); + + let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty()); + self.linkage.set(base::linkage_to_gcc(linkage)); + let decl = self.declare_fn(symbol_name, &fn_abi); + //let attrs = self.tcx.codegen_fn_attrs(instance.def_id()); + + attributes::from_fn_attrs(self, decl, instance); + + // If we're compiling the compiler-builtins crate, e.g., the equivalent of + // compiler-rt, then we want to implicitly compile everything with hidden + // visibility as we're going to link this object all over the place but + // don't want the symbols to get exported. + if linkage != Linkage::Internal + && linkage != Linkage::Private + && self.tcx.is_compiler_builtins(LOCAL_CRATE) + { + #[cfg(feature="master")] + decl.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); + } + else { + #[cfg(feature="master")] + decl.add_attribute(FnAttribute::Visibility(base::visibility_to_gcc(visibility))); + } + + // TODO(antoyo): call set_link_section() to allow initializing argc/argv. + // TODO(antoyo): set unique comdat. + // TODO(antoyo): use inline attribute from there in linkage.set() above. + + self.functions.borrow_mut().insert(symbol_name.to_string(), decl); + self.function_instances.borrow_mut().insert(instance, unsafe { std::mem::transmute(decl) }); + } +} diff --git a/compiler/rustc_codegen_gcc/src/type_.rs b/compiler/rustc_codegen_gcc/src/type_.rs new file mode 100644 index 00000000000..daa661f35c4 --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/type_.rs @@ -0,0 +1,295 @@ +use gccjit::{RValue, Struct, Type}; +use rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, TypeMembershipMethods}; +use rustc_codegen_ssa::common::TypeKind; +use rustc_middle::{bug, ty}; +use rustc_middle::ty::layout::TyAndLayout; +use rustc_target::abi::{AddressSpace, Align, Integer, Size}; + +use crate::common::TypeReflection; +use crate::context::CodegenCx; +use crate::type_of::LayoutGccExt; + +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + pub fn type_ix(&self, num_bits: u64) -> Type<'gcc> { + // gcc only supports 1, 2, 4 or 8-byte integers. + // FIXME(antoyo): this is misleading to use the next power of two as rustc_codegen_ssa + // sometimes use 96-bit numbers and the following code will give an integer of a different + // size. + let bytes = (num_bits / 8).next_power_of_two() as i32; + match bytes { + 1 => self.i8_type, + 2 => self.i16_type, + 4 => self.i32_type, + 8 => self.i64_type, + 16 => self.i128_type, + _ => panic!("unexpected num_bits: {}", num_bits), + } + } + + pub fn type_void(&self) -> Type<'gcc> { + self.context.new_type::<()>() + } + + pub fn type_size_t(&self) -> Type<'gcc> { + self.context.new_type::<usize>() + } + + pub fn type_u8(&self) -> Type<'gcc> { + self.u8_type + } + + pub fn type_u16(&self) -> Type<'gcc> { + self.u16_type + } + + pub fn type_u32(&self) -> Type<'gcc> { + self.u32_type + } + + pub fn type_u64(&self) -> Type<'gcc> { + self.u64_type + } + + pub fn type_u128(&self) -> Type<'gcc> { + self.u128_type + } + + pub fn type_pointee_for_align(&self, align: Align) -> Type<'gcc> { + // FIXME(eddyb) We could find a better approximation if ity.align < align. + let ity = Integer::approximate_align(self, align); + self.type_from_integer(ity) + } + + pub fn type_vector(&self, ty: Type<'gcc>, len: u64) -> Type<'gcc> { + self.context.new_vector_type(ty, len) + } + + pub fn type_float_from_ty(&self, t: ty::FloatTy) -> Type<'gcc> { + match t { + ty::FloatTy::F32 => self.type_f32(), + ty::FloatTy::F64 => self.type_f64(), + } + } +} + +impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { + fn type_i1(&self) -> Type<'gcc> { + self.bool_type + } + + fn type_i8(&self) -> Type<'gcc> { + self.i8_type + } + + fn type_i16(&self) -> Type<'gcc> { + self.i16_type + } + + fn type_i32(&self) -> Type<'gcc> { + self.i32_type + } + + fn type_i64(&self) -> Type<'gcc> { + self.i64_type + } + + fn type_i128(&self) -> Type<'gcc> { + self.i128_type + } + + fn type_isize(&self) -> Type<'gcc> { + self.isize_type + } + + fn type_f32(&self) -> Type<'gcc> { + self.context.new_type::<f32>() + } + + fn type_f64(&self) -> Type<'gcc> { + self.context.new_type::<f64>() + } + + fn type_func(&self, params: &[Type<'gcc>], return_type: Type<'gcc>) -> Type<'gcc> { + self.context.new_function_pointer_type(None, return_type, params, false) + } + + fn type_struct(&self, fields: &[Type<'gcc>], packed: bool) -> Type<'gcc> { + let types = fields.to_vec(); + if let Some(typ) = self.struct_types.borrow().get(fields) { + return typ.clone(); + } + let fields: Vec<_> = fields.iter().enumerate() + .map(|(index, field)| self.context.new_field(None, *field, &format!("field{}_TODO", index))) + .collect(); + let typ = self.context.new_struct_type(None, "struct", &fields).as_type(); + if packed { + #[cfg(feature="master")] + typ.set_packed(); + } + self.struct_types.borrow_mut().insert(types, typ); + typ + } + + fn type_kind(&self, typ: Type<'gcc>) -> TypeKind { + if self.is_int_type_or_bool(typ) { + TypeKind::Integer + } + else if typ.is_compatible_with(self.float_type) { + TypeKind::Float + } + else if typ.is_compatible_with(self.double_type) { + TypeKind::Double + } + else if typ.is_vector() { + TypeKind::Vector + } + else { + // TODO(antoyo): support other types. + TypeKind::Void + } + } + + fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> { + ty.make_pointer() + } + + fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> { + // TODO(antoyo): use address_space, perhaps with TYPE_ADDR_SPACE? + ty.make_pointer() + } + + fn element_type(&self, ty: Type<'gcc>) -> Type<'gcc> { + if let Some(typ) = ty.dyncast_array() { + typ + } + else if let Some(vector_type) = ty.dyncast_vector() { + vector_type.get_element_type() + } + else if let Some(typ) = ty.get_pointee() { + typ + } + else { + unreachable!() + } + } + + fn vector_length(&self, _ty: Type<'gcc>) -> usize { + unimplemented!(); + } + + fn float_width(&self, typ: Type<'gcc>) -> usize { + let f32 = self.context.new_type::<f32>(); + let f64 = self.context.new_type::<f64>(); + if typ.is_compatible_with(f32) { + 32 + } + else if typ.is_compatible_with(f64) { + 64 + } + else { + panic!("Cannot get width of float type {:?}", typ); + } + // TODO(antoyo): support other sizes. + } + + fn int_width(&self, typ: Type<'gcc>) -> u64 { + self.gcc_int_width(typ) + } + + fn val_ty(&self, value: RValue<'gcc>) -> Type<'gcc> { + value.get_type() + } + + fn type_array(&self, ty: Type<'gcc>, len: u64) -> Type<'gcc> { + // TODO: remove this as well? + /*if let Some(struct_type) = ty.is_struct() { + if struct_type.get_field_count() == 0 { + // NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a + // size of usize::MAX in test_binary_search, we workaround this by setting the size to + // zero for ZSTs. + // FIXME(antoyo): fix gccjit API. + len = 0; + } + }*/ + + self.context.new_array_type(None, ty, len) + } +} + +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + pub fn type_padding_filler(&self, size: Size, align: Align) -> Type<'gcc> { + let unit = Integer::approximate_align(self, align); + let size = size.bytes(); + let unit_size = unit.size().bytes(); + assert_eq!(size % unit_size, 0); + self.type_array(self.type_from_integer(unit), size / unit_size) + } + + pub fn set_struct_body(&self, typ: Struct<'gcc>, fields: &[Type<'gcc>], packed: bool) { + let fields: Vec<_> = fields.iter().enumerate() + .map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index))) + .collect(); + typ.set_fields(None, &fields); + if packed { + #[cfg(feature="master")] + typ.as_type().set_packed(); + } + } + + pub fn type_named_struct(&self, name: &str) -> Struct<'gcc> { + self.context.new_opaque_struct_type(None, name) + } +} + +pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>) -> (Vec<Type<'gcc>>, bool) { + let field_count = layout.fields.count(); + + let mut packed = false; + let mut offset = Size::ZERO; + let mut prev_effective_align = layout.align.abi; + let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2); + for i in layout.fields.index_by_increasing_offset() { + let target_offset = layout.fields.offset(i as usize); + let field = layout.field(cx, i); + let effective_field_align = + layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset); + packed |= effective_field_align < field.align.abi; + + assert!(target_offset >= offset); + let padding = target_offset - offset; + let padding_align = prev_effective_align.min(effective_field_align); + assert_eq!(offset.align_to(padding_align) + padding, target_offset); + result.push(cx.type_padding_filler(padding, padding_align)); + + result.push(field.gcc_type(cx)); + offset = target_offset + field.size; + prev_effective_align = effective_field_align; + } + if layout.is_sized() && field_count > 0 { + if offset > layout.size { + bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset); + } + let padding = layout.size - offset; + let padding_align = prev_effective_align; + assert_eq!(offset.align_to(padding_align) + padding, layout.size); + result.push(cx.type_padding_filler(padding, padding_align)); + assert_eq!(result.len(), 1 + field_count * 2); + } + + (result, packed) +} + +impl<'gcc, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'gcc, 'tcx> { + fn set_type_metadata(&self, _function: RValue<'gcc>, _typeid: String) { + // Unsupported. + } + + fn typeid_metadata(&self, _typeid: String) -> RValue<'gcc> { + // Unsupported. + self.context.new_rvalue_from_int(self.int_type, 0) + } + + fn set_kcfi_type_metadata(&self, _function: RValue<'gcc>, _kcfi_typeid: u32) { + // Unsupported. + } +} diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs new file mode 100644 index 00000000000..5df8c1a209d --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/type_of.rs @@ -0,0 +1,391 @@ +use std::fmt::Write; + +use gccjit::{Struct, Type}; +use crate::rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods}; +use rustc_middle::bug; +use rustc_middle::ty::{self, Ty, TypeVisitableExt}; +use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout}; +use rustc_middle::ty::print::with_no_trimmed_paths; +use rustc_target::abi::{self, Abi, Align, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants}; +use rustc_target::abi::call::{CastTarget, FnAbi, Reg}; + +use crate::abi::{FnAbiGccExt, GccType}; +use crate::context::CodegenCx; +use crate::type_::struct_fields; + +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + fn type_from_unsigned_integer(&self, i: Integer) -> Type<'gcc> { + use Integer::*; + match i { + I8 => self.type_u8(), + I16 => self.type_u16(), + I32 => self.type_u32(), + I64 => self.type_u64(), + I128 => self.type_u128(), + } + } + + #[cfg(feature="master")] + pub fn type_int_from_ty(&self, t: ty::IntTy) -> Type<'gcc> { + match t { + ty::IntTy::Isize => self.type_isize(), + ty::IntTy::I8 => self.type_i8(), + ty::IntTy::I16 => self.type_i16(), + ty::IntTy::I32 => self.type_i32(), + ty::IntTy::I64 => self.type_i64(), + ty::IntTy::I128 => self.type_i128(), + } + } + + #[cfg(feature="master")] + pub fn type_uint_from_ty(&self, t: ty::UintTy) -> Type<'gcc> { + match t { + ty::UintTy::Usize => self.type_isize(), + ty::UintTy::U8 => self.type_i8(), + ty::UintTy::U16 => self.type_i16(), + ty::UintTy::U32 => self.type_i32(), + ty::UintTy::U64 => self.type_i64(), + ty::UintTy::U128 => self.type_i128(), + } + } +} + +impl<'a, 'tcx> CodegenCx<'a, 'tcx> { + pub fn align_of(&self, ty: Ty<'tcx>) -> Align { + self.layout_of(ty).align.abi + } +} + +fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>, defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>) -> Type<'gcc> { + match layout.abi { + Abi::Scalar(_) => bug!("handled elsewhere"), + Abi::Vector { ref element, count } => { + let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO); + let element = + // NOTE: gcc doesn't allow pointer types in vectors. + if element.get_pointee().is_some() { + cx.usize_type + } + else { + element + }; + return cx.context.new_vector_type(element, count); + }, + Abi::ScalarPair(..) => { + return cx.type_struct( + &[ + layout.scalar_pair_element_gcc_type(cx, 0, false), + layout.scalar_pair_element_gcc_type(cx, 1, false), + ], + false, + ); + } + Abi::Uninhabited | Abi::Aggregate { .. } => {} + } + + let name = match layout.ty.kind() { + // FIXME(eddyb) producing readable type names for trait objects can result + // in problematically distinct types due to HRTB and subtyping (see #47638). + // ty::Dynamic(..) | + ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Generator(..) | ty::Str + if !cx.sess().fewer_names() => + { + let mut name = with_no_trimmed_paths!(layout.ty.to_string()); + if let (&ty::Adt(def, _), &Variants::Single { index }) = + (layout.ty.kind(), &layout.variants) + { + if def.is_enum() && !def.variants().is_empty() { + write!(&mut name, "::{}", def.variant(index).name).unwrap(); + } + } + if let (&ty::Generator(_, _, _), &Variants::Single { index }) = + (layout.ty.kind(), &layout.variants) + { + write!(&mut name, "::{}", ty::GeneratorSubsts::variant_name(index)).unwrap(); + } + Some(name) + } + ty::Adt(..) => { + // If `Some` is returned then a named struct is created in LLVM. Name collisions are + // avoided by LLVM (with increasing suffixes). If rustc doesn't generate names then that + // can improve perf. + // FIXME(antoyo): I don't think that's true for libgccjit. + Some(String::new()) + } + _ => None, + }; + + match layout.fields { + FieldsShape::Primitive | FieldsShape::Union(_) => { + let fill = cx.type_padding_filler(layout.size, layout.align.abi); + let packed = false; + match name { + None => cx.type_struct(&[fill], packed), + Some(ref name) => { + let gcc_type = cx.type_named_struct(name); + cx.set_struct_body(gcc_type, &[fill], packed); + gcc_type.as_type() + }, + } + } + FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).gcc_type(cx), count), + FieldsShape::Arbitrary { .. } => + match name { + None => { + let (gcc_fields, packed) = struct_fields(cx, layout); + cx.type_struct(&gcc_fields, packed) + }, + Some(ref name) => { + let gcc_type = cx.type_named_struct(name); + *defer = Some((gcc_type, layout)); + gcc_type.as_type() + }, + }, + } +} + +pub trait LayoutGccExt<'tcx> { + fn is_gcc_immediate(&self) -> bool; + fn is_gcc_scalar_pair(&self) -> bool; + fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; + fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; + fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>; + fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc>; + fn gcc_field_index(&self, index: usize) -> u64; + fn pointee_info_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, offset: Size) -> Option<PointeeInfo>; +} + +impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { + fn is_gcc_immediate(&self) -> bool { + match self.abi { + Abi::Scalar(_) | Abi::Vector { .. } => true, + Abi::ScalarPair(..) => false, + Abi::Uninhabited | Abi::Aggregate { .. } => self.is_zst(), + } + } + + fn is_gcc_scalar_pair(&self) -> bool { + match self.abi { + Abi::ScalarPair(..) => true, + Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false, + } + } + + /// Gets the GCC type corresponding to a Rust type, i.e., `rustc_middle::ty::Ty`. + /// The pointee type of the pointer in `PlaceRef` is always this type. + /// For sized types, it is also the right LLVM type for an `alloca` + /// containing a value of that type, and most immediates (except `bool`). + /// Unsized types, however, are represented by a "minimal unit", e.g. + /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this + /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`. + /// If the type is an unsized struct, the regular layout is generated, + /// with the inner-most trailing unsized field using the "minimal unit" + /// of that field's type - this is useful for taking the address of + /// that field and ensuring the struct has the right alignment. + fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { + if let Abi::Scalar(ref scalar) = self.abi { + // Use a different cache for scalars because pointers to DSTs + // can be either fat or thin (data pointers of fat pointers). + if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) { + return ty; + } + let ty = + match *self.ty.kind() { + ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { + cx.type_ptr_to(cx.layout_of(ty).gcc_type(cx)) + } + ty::Adt(def, _) if def.is_box() => { + cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).gcc_type(cx)) + } + ty::FnPtr(sig) => cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty())), + _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO), + }; + cx.scalar_types.borrow_mut().insert(self.ty, ty); + return ty; + } + + // Check the cache. + let variant_index = + match self.variants { + Variants::Single { index } => Some(index), + _ => None, + }; + let cached_type = cx.types.borrow().get(&(self.ty, variant_index)).cloned(); + if let Some(ty) = cached_type { + return ty; + } + + assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty); + + // Make sure lifetimes are erased, to avoid generating distinct LLVM + // types for Rust types that only differ in the choice of lifetimes. + let normal_ty = cx.tcx.erase_regions(self.ty); + + let mut defer = None; + let ty = + if self.ty != normal_ty { + let mut layout = cx.layout_of(normal_ty); + if let Some(v) = variant_index { + layout = layout.for_variant(cx, v); + } + layout.gcc_type(cx) + } + else { + uncached_gcc_type(cx, *self, &mut defer) + }; + + cx.types.borrow_mut().insert((self.ty, variant_index), ty); + + if let Some((deferred_ty, layout)) = defer { + let (fields, packed) = struct_fields(cx, layout); + cx.set_struct_body(deferred_ty, &fields, packed); + } + + ty + } + + fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { + if let Abi::Scalar(ref scalar) = self.abi { + if scalar.is_bool() { + return cx.type_i1(); + } + } + self.gcc_type(cx) + } + + fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc> { + match scalar.primitive() { + Int(i, true) => cx.type_from_integer(i), + Int(i, false) => cx.type_from_unsigned_integer(i), + F32 => cx.type_f32(), + F64 => cx.type_f64(), + Pointer(address_space) => { + // If we know the alignment, pick something better than i8. + let pointee = + if let Some(pointee) = self.pointee_info_at(cx, offset) { + cx.type_pointee_for_align(pointee.align) + } + else { + cx.type_i8() + }; + cx.type_ptr_to_ext(pointee, address_space) + } + } + } + + fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc> { + // TODO(antoyo): remove llvm hack: + // HACK(eddyb) special-case fat pointers until LLVM removes + // pointee types, to avoid bitcasting every `OperandRef::deref`. + match self.ty.kind() { + ty::Ref(..) | ty::RawPtr(_) => { + return self.field(cx, index).gcc_type(cx); + } + // only wide pointer boxes are handled as pointers + // thin pointer boxes with scalar allocators are handled by the general logic below + ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => { + let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty()); + return cx.layout_of(ptr_ty).scalar_pair_element_gcc_type(cx, index, immediate); + } + _ => {} + } + + let (a, b) = match self.abi { + Abi::ScalarPair(ref a, ref b) => (a, b), + _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self), + }; + let scalar = [a, b][index]; + + // Make sure to return the same type `immediate_gcc_type` would when + // dealing with an immediate pair. This means that `(bool, bool)` is + // effectively represented as `{i8, i8}` in memory and two `i1`s as an + // immediate, just like `bool` is typically `i8` in memory and only `i1` + // when immediate. We need to load/store `bool` as `i8` to avoid + // crippling LLVM optimizations or triggering other LLVM bugs with `i1`. + // TODO(antoyo): this bugs certainly don't happen in this case since the bool type is used instead of i1. + if scalar.is_bool() { + return cx.type_i1(); + } + + let offset = + if index == 0 { + Size::ZERO + } + else { + a.size(cx).align_to(b.align(cx).abi) + }; + self.scalar_gcc_type_at(cx, scalar, offset) + } + + fn gcc_field_index(&self, index: usize) -> u64 { + match self.abi { + Abi::Scalar(_) | Abi::ScalarPair(..) => { + bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self) + } + _ => {} + } + match self.fields { + FieldsShape::Primitive | FieldsShape::Union(_) => { + bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self) + } + + FieldsShape::Array { .. } => index as u64, + + FieldsShape::Arbitrary { .. } => 1 + (self.fields.memory_index(index) as u64) * 2, + } + } + + fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo> { + if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) { + return pointee; + } + + let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset); + + cx.pointee_infos.borrow_mut().insert((self.ty, offset), result); + result + } +} + +impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { + fn backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> { + layout.gcc_type(self) + } + + fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> { + layout.immediate_gcc_type(self) + } + + fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool { + layout.is_gcc_immediate() + } + + fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool { + layout.is_gcc_scalar_pair() + } + + fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64 { + layout.gcc_field_index(index) + } + + fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, immediate: bool) -> Type<'gcc> { + layout.scalar_pair_element_gcc_type(self, index, immediate) + } + + fn cast_backend_type(&self, ty: &CastTarget) -> Type<'gcc> { + ty.gcc_type(self) + } + + fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> { + fn_abi.ptr_to_gcc_type(self) + } + + fn reg_backend_type(&self, _ty: &Reg) -> Type<'gcc> { + unimplemented!(); + } + + fn fn_decl_backend_type(&self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> { + // FIXME(antoyo): return correct type. + self.type_void() + } +}  | 
