about summary refs log tree commit diff
path: root/compiler/rustc_codegen_gcc/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_gcc/src')
-rw-r--r--compiler/rustc_codegen_gcc/src/abi.rs276
-rw-r--r--compiler/rustc_codegen_gcc/src/allocator.rs160
-rw-r--r--compiler/rustc_codegen_gcc/src/asm.rs1012
-rw-r--r--compiler/rustc_codegen_gcc/src/attributes.rs122
-rw-r--r--compiler/rustc_codegen_gcc/src/back/lto.rs734
-rw-r--r--compiler/rustc_codegen_gcc/src/back/mod.rs2
-rw-r--r--compiler/rustc_codegen_gcc/src/back/write.rs282
-rw-r--r--compiler/rustc_codegen_gcc/src/base.rs273
-rw-r--r--compiler/rustc_codegen_gcc/src/builder.rs2478
-rw-r--r--compiler/rustc_codegen_gcc/src/callee.rs151
-rw-r--r--compiler/rustc_codegen_gcc/src/common.rs463
-rw-r--r--compiler/rustc_codegen_gcc/src/consts.rs421
-rw-r--r--compiler/rustc_codegen_gcc/src/context.rs604
-rw-r--r--compiler/rustc_codegen_gcc/src/coverageinfo.rs11
-rw-r--r--compiler/rustc_codegen_gcc/src/debuginfo.rs319
-rw-r--r--compiler/rustc_codegen_gcc/src/declare.rs281
-rw-r--r--compiler/rustc_codegen_gcc/src/errors.rs79
-rw-r--r--compiler/rustc_codegen_gcc/src/gcc_util.rs223
-rw-r--r--compiler/rustc_codegen_gcc/src/int.rs1038
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/archs.rs9696
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs1557
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/mod.rs1292
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/simd.rs1439
-rw-r--r--compiler/rustc_codegen_gcc/src/lib.rs527
-rw-r--r--compiler/rustc_codegen_gcc/src/mono_item.rs79
-rw-r--r--compiler/rustc_codegen_gcc/src/type_.rs386
-rw-r--r--compiler/rustc_codegen_gcc/src/type_of.rs381
27 files changed, 24286 insertions, 0 deletions
diff --git a/compiler/rustc_codegen_gcc/src/abi.rs b/compiler/rustc_codegen_gcc/src/abi.rs
new file mode 100644
index 00000000000..890a25e6a7c
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/abi.rs
@@ -0,0 +1,276 @@
+#[cfg(feature = "master")]
+use gccjit::FnAttribute;
+use gccjit::{ToLValue, ToRValue, Type};
+use rustc_abi::{Reg, RegKind};
+use rustc_codegen_ssa::traits::{AbiBuilderMethods, BaseTypeCodegenMethods};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_middle::bug;
+use rustc_middle::ty::Ty;
+use rustc_middle::ty::layout::LayoutOf;
+#[cfg(feature = "master")]
+use rustc_session::config;
+#[cfg(feature = "master")]
+use rustc_target::callconv::{ArgAttributes, CastTarget, Conv, FnAbi, PassMode};
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::intrinsic::ArgAbiExt;
+use crate::type_of::LayoutGccExt;
+
+impl AbiBuilderMethods for Builder<'_, '_, '_> {
+    fn get_param(&mut self, index: usize) -> Self::Value {
+        let func = self.current_func();
+        let param = func.get_param(index as i32);
+        let on_stack = if let Some(on_stack_param_indices) =
+            self.on_stack_function_params.borrow().get(&func)
+        {
+            on_stack_param_indices.contains(&index)
+        } else {
+            false
+        };
+        if on_stack { param.to_lvalue().get_address(None) } else { param.to_rvalue() }
+    }
+}
+
+impl GccType for CastTarget {
+    fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> {
+        let rest_gcc_unit = self.rest.unit.gcc_type(cx);
+        let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
+            (0, 0)
+        } else {
+            (
+                self.rest.total.bytes() / self.rest.unit.size.bytes(),
+                self.rest.total.bytes() % self.rest.unit.size.bytes(),
+            )
+        };
+
+        if self.prefix.iter().all(|x| x.is_none()) {
+            // Simplify to a single unit when there is no prefix and size <= unit size
+            if self.rest.total <= self.rest.unit.size {
+                return rest_gcc_unit;
+            }
+
+            // Simplify to array when all chunks are the same size and type
+            if rem_bytes == 0 {
+                return cx.type_array(rest_gcc_unit, rest_count);
+            }
+        }
+
+        // Create list of fields in the main structure
+        let mut args: Vec<_> = self
+            .prefix
+            .iter()
+            .flat_map(|option_reg| option_reg.map(|reg| reg.gcc_type(cx)))
+            .chain((0..rest_count).map(|_| rest_gcc_unit))
+            .collect();
+
+        // Append final integer
+        if rem_bytes != 0 {
+            // Only integers can be really split further.
+            assert_eq!(self.rest.unit.kind, RegKind::Integer);
+            args.push(cx.type_ix(rem_bytes * 8));
+        }
+
+        cx.type_struct(&args, false)
+    }
+}
+
+pub trait GccType {
+    fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc>;
+}
+
+impl GccType for Reg {
+    fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> {
+        match self.kind {
+            RegKind::Integer => cx.type_ix(self.size.bits()),
+            RegKind::Float => match self.size.bits() {
+                32 => cx.type_f32(),
+                64 => cx.type_f64(),
+                _ => bug!("unsupported float: {:?}", self),
+            },
+            RegKind::Vector => unimplemented!(), //cx.type_vector(cx.type_i8(), self.size.bytes()),
+        }
+    }
+}
+
+pub struct FnAbiGcc<'gcc> {
+    pub return_type: Type<'gcc>,
+    pub arguments_type: Vec<Type<'gcc>>,
+    pub is_c_variadic: bool,
+    pub on_stack_param_indices: FxHashSet<usize>,
+    #[cfg(feature = "master")]
+    pub fn_attributes: Vec<FnAttribute<'gcc>>,
+}
+
+pub trait FnAbiGccExt<'gcc, 'tcx> {
+    // TODO(antoyo): return a function pointer type instead?
+    fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> FnAbiGcc<'gcc>;
+    fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+    #[cfg(feature = "master")]
+    fn gcc_cconv(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Option<FnAttribute<'gcc>>;
+}
+
+impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
+    fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> FnAbiGcc<'gcc> {
+        let mut on_stack_param_indices = FxHashSet::default();
+
+        // This capacity calculation is approximate.
+        let mut argument_tys = Vec::with_capacity(
+            self.args.len() + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 },
+        );
+
+        let return_type = match self.ret.mode {
+            PassMode::Ignore => cx.type_void(),
+            PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx),
+            PassMode::Cast { ref cast, .. } => cast.gcc_type(cx),
+            PassMode::Indirect { .. } => {
+                argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
+                cx.type_void()
+            }
+        };
+        #[cfg(feature = "master")]
+        let mut non_null_args = Vec::new();
+
+        #[cfg(feature = "master")]
+        let mut apply_attrs = |mut ty: Type<'gcc>, attrs: &ArgAttributes, arg_index: usize| {
+            if cx.sess().opts.optimize == config::OptLevel::No {
+                return ty;
+            }
+            if attrs.regular.contains(rustc_target::callconv::ArgAttribute::NoAlias) {
+                ty = ty.make_restrict()
+            }
+            if attrs.regular.contains(rustc_target::callconv::ArgAttribute::NonNull) {
+                non_null_args.push(arg_index as i32 + 1);
+            }
+            ty
+        };
+        #[cfg(not(feature = "master"))]
+        let apply_attrs = |ty: Type<'gcc>, _attrs: &ArgAttributes, _arg_index: usize| ty;
+
+        for arg in self.args.iter() {
+            let arg_ty = match arg.mode {
+                PassMode::Ignore => continue,
+                PassMode::Pair(a, b) => {
+                    let arg_pos = argument_tys.len();
+                    argument_tys.push(apply_attrs(
+                        arg.layout.scalar_pair_element_gcc_type(cx, 0),
+                        &a,
+                        arg_pos,
+                    ));
+                    argument_tys.push(apply_attrs(
+                        arg.layout.scalar_pair_element_gcc_type(cx, 1),
+                        &b,
+                        arg_pos + 1,
+                    ));
+                    continue;
+                }
+                PassMode::Cast { ref cast, pad_i32 } => {
+                    // add padding
+                    if pad_i32 {
+                        argument_tys.push(Reg::i32().gcc_type(cx));
+                    }
+                    let ty = cast.gcc_type(cx);
+                    apply_attrs(ty, &cast.attrs, argument_tys.len())
+                }
+                PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: true } => {
+                    // This is a "byval" argument, so we don't apply the `restrict` attribute on it.
+                    on_stack_param_indices.insert(argument_tys.len());
+                    arg.memory_ty(cx)
+                }
+                PassMode::Direct(attrs) => {
+                    apply_attrs(arg.layout.immediate_gcc_type(cx), &attrs, argument_tys.len())
+                }
+                PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
+                    apply_attrs(cx.type_ptr_to(arg.memory_ty(cx)), &attrs, argument_tys.len())
+                }
+                PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => {
+                    assert!(!on_stack);
+                    // Construct the type of a (wide) pointer to `ty`, and pass its two fields.
+                    // Any two ABI-compatible unsized types have the same metadata type and
+                    // moreover the same metadata value leads to the same dynamic size and
+                    // alignment, so this respects ABI compatibility.
+                    let ptr_ty = Ty::new_mut_ptr(cx.tcx, arg.layout.ty);
+                    let ptr_layout = cx.layout_of(ptr_ty);
+                    let typ1 = ptr_layout.scalar_pair_element_gcc_type(cx, 0);
+                    let typ2 = ptr_layout.scalar_pair_element_gcc_type(cx, 1);
+                    argument_tys.push(apply_attrs(typ1, &attrs, argument_tys.len()));
+                    argument_tys.push(apply_attrs(typ2, &meta_attrs, argument_tys.len()));
+                    continue;
+                }
+            };
+            argument_tys.push(arg_ty);
+        }
+
+        #[cfg(feature = "master")]
+        let fn_attrs = if non_null_args.is_empty() {
+            Vec::new()
+        } else {
+            vec![FnAttribute::NonNull(non_null_args)]
+        };
+
+        FnAbiGcc {
+            return_type,
+            arguments_type: argument_tys,
+            is_c_variadic: self.c_variadic,
+            on_stack_param_indices,
+            #[cfg(feature = "master")]
+            fn_attributes: fn_attrs,
+        }
+    }
+
+    fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+        // FIXME(antoyo): Should we do something with `FnAbiGcc::fn_attributes`?
+        let FnAbiGcc { return_type, arguments_type, is_c_variadic, on_stack_param_indices, .. } =
+            self.gcc_type(cx);
+        let pointer_type =
+            cx.context.new_function_pointer_type(None, return_type, &arguments_type, is_c_variadic);
+        cx.on_stack_params.borrow_mut().insert(
+            pointer_type.dyncast_function_ptr_type().expect("function ptr type"),
+            on_stack_param_indices,
+        );
+        pointer_type
+    }
+
+    #[cfg(feature = "master")]
+    fn gcc_cconv(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Option<FnAttribute<'gcc>> {
+        conv_to_fn_attribute(self.conv, &cx.tcx.sess.target.arch)
+    }
+}
+
+#[cfg(feature = "master")]
+pub fn conv_to_fn_attribute<'gcc>(conv: Conv, arch: &str) -> Option<FnAttribute<'gcc>> {
+    // TODO: handle the calling conventions returning None.
+    let attribute = match conv {
+        Conv::C
+        | Conv::Rust
+        | Conv::CCmseNonSecureCall
+        | Conv::CCmseNonSecureEntry
+        | Conv::RiscvInterrupt { .. } => return None,
+        Conv::Cold => return None,
+        Conv::PreserveMost => return None,
+        Conv::PreserveAll => return None,
+        Conv::GpuKernel => {
+            // TODO(antoyo): remove clippy allow attribute when this is implemented.
+            #[allow(clippy::if_same_then_else)]
+            if arch == "amdgpu" {
+                return None;
+            } else if arch == "nvptx64" {
+                return None;
+            } else {
+                panic!("Architecture {} does not support GpuKernel calling convention", arch);
+            }
+        }
+        Conv::AvrInterrupt => return None,
+        Conv::AvrNonBlockingInterrupt => return None,
+        Conv::ArmAapcs => return None,
+        Conv::Msp430Intr => return None,
+        Conv::X86Fastcall => return None,
+        Conv::X86Intr => return None,
+        Conv::X86Stdcall => return None,
+        Conv::X86ThisCall => return None,
+        Conv::X86VectorCall => return None,
+        Conv::X86_64SysV => FnAttribute::SysvAbi,
+        Conv::X86_64Win64 => FnAttribute::MsAbi,
+    };
+    Some(attribute)
+}
diff --git a/compiler/rustc_codegen_gcc/src/allocator.rs b/compiler/rustc_codegen_gcc/src/allocator.rs
new file mode 100644
index 00000000000..f4ebd42ee2d
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/allocator.rs
@@ -0,0 +1,160 @@
+use gccjit::{Context, FunctionType, GlobalKind, ToRValue, Type};
+#[cfg(feature = "master")]
+use gccjit::{FnAttribute, VarAttribute};
+use rustc_ast::expand::allocator::{
+    ALLOCATOR_METHODS, AllocatorKind, AllocatorTy, NO_ALLOC_SHIM_IS_UNSTABLE,
+    alloc_error_handler_name, default_fn_name, global_fn_name,
+};
+use rustc_middle::bug;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::OomStrategy;
+use rustc_symbol_mangling::mangle_internal_symbol;
+
+use crate::GccContext;
+#[cfg(feature = "master")]
+use crate::base::symbol_visibility_to_gcc;
+
+pub(crate) unsafe fn codegen(
+    tcx: TyCtxt<'_>,
+    mods: &mut GccContext,
+    _module_name: &str,
+    kind: AllocatorKind,
+    alloc_error_handler_kind: AllocatorKind,
+) {
+    let context = &mods.context;
+    let usize = match tcx.sess.target.pointer_width {
+        16 => context.new_type::<u16>(),
+        32 => context.new_type::<u32>(),
+        64 => context.new_type::<u64>(),
+        tws => bug!("Unsupported target word size for int: {}", tws),
+    };
+    let i8 = context.new_type::<i8>();
+    let i8p = i8.make_pointer();
+
+    if kind == AllocatorKind::Default {
+        for method in ALLOCATOR_METHODS {
+            let mut types = Vec::with_capacity(method.inputs.len());
+            for input in method.inputs.iter() {
+                match input.ty {
+                    AllocatorTy::Layout => {
+                        types.push(usize);
+                        types.push(usize);
+                    }
+                    AllocatorTy::Ptr => types.push(i8p),
+                    AllocatorTy::Usize => types.push(usize),
+
+                    AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
+                }
+            }
+            let output = match method.output {
+                AllocatorTy::ResultPtr => Some(i8p),
+                AllocatorTy::Unit => None,
+
+                AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
+                    panic!("invalid allocator output")
+                }
+            };
+            let from_name = mangle_internal_symbol(tcx, &global_fn_name(method.name));
+            let to_name = mangle_internal_symbol(tcx, &default_fn_name(method.name));
+
+            create_wrapper_function(tcx, context, &from_name, &to_name, &types, output);
+        }
+    }
+
+    // FIXME(bjorn3): Add noreturn attribute
+    create_wrapper_function(
+        tcx,
+        context,
+        &mangle_internal_symbol(tcx, "__rust_alloc_error_handler"),
+        &mangle_internal_symbol(tcx, alloc_error_handler_name(alloc_error_handler_kind)),
+        &[usize, usize],
+        None,
+    );
+
+    let name = mangle_internal_symbol(tcx, OomStrategy::SYMBOL);
+    let global = context.new_global(None, GlobalKind::Exported, i8, name);
+    #[cfg(feature = "master")]
+    global.add_attribute(VarAttribute::Visibility(symbol_visibility_to_gcc(
+        tcx.sess.default_visibility(),
+    )));
+    let value = tcx.sess.opts.unstable_opts.oom.should_panic();
+    let value = context.new_rvalue_from_int(i8, value as i32);
+    global.global_set_initializer_rvalue(value);
+
+    let name = mangle_internal_symbol(tcx, NO_ALLOC_SHIM_IS_UNSTABLE);
+    let global = context.new_global(None, GlobalKind::Exported, i8, name);
+    #[cfg(feature = "master")]
+    global.add_attribute(VarAttribute::Visibility(symbol_visibility_to_gcc(
+        tcx.sess.default_visibility(),
+    )));
+    let value = context.new_rvalue_from_int(i8, 0);
+    global.global_set_initializer_rvalue(value);
+}
+
+fn create_wrapper_function(
+    tcx: TyCtxt<'_>,
+    context: &Context<'_>,
+    from_name: &str,
+    to_name: &str,
+    types: &[Type<'_>],
+    output: Option<Type<'_>>,
+) {
+    let void = context.new_type::<()>();
+
+    let args: Vec<_> = types
+        .iter()
+        .enumerate()
+        .map(|(index, typ)| context.new_parameter(None, *typ, format!("param{}", index)))
+        .collect();
+    let func = context.new_function(
+        None,
+        FunctionType::Exported,
+        output.unwrap_or(void),
+        &args,
+        from_name,
+        false,
+    );
+
+    #[cfg(feature = "master")]
+    func.add_attribute(FnAttribute::Visibility(symbol_visibility_to_gcc(
+        tcx.sess.default_visibility(),
+    )));
+
+    if tcx.sess.must_emit_unwind_tables() {
+        // TODO(antoyo): emit unwind tables.
+    }
+
+    let args: Vec<_> = types
+        .iter()
+        .enumerate()
+        .map(|(index, typ)| context.new_parameter(None, *typ, format!("param{}", index)))
+        .collect();
+    let callee = context.new_function(
+        None,
+        FunctionType::Extern,
+        output.unwrap_or(void),
+        &args,
+        to_name,
+        false,
+    );
+    #[cfg(feature = "master")]
+    callee.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden));
+
+    let block = func.new_block("entry");
+
+    let args = args
+        .iter()
+        .enumerate()
+        .map(|(i, _)| func.get_param(i as i32).to_rvalue())
+        .collect::<Vec<_>>();
+    let ret = context.new_call(None, callee, &args);
+    //llvm::LLVMSetTailCall(ret, True);
+    if output.is_some() {
+        block.end_with_return(None, ret);
+    } else {
+        block.end_with_void_return(None);
+    }
+
+    // TODO(@Commeownist): Check if we need to emit some extra debugging info in certain circumstances
+    // as described in https://github.com/rust-lang/rust/commit/77a96ed5646f7c3ee8897693decc4626fe380643
+}
diff --git a/compiler/rustc_codegen_gcc/src/asm.rs b/compiler/rustc_codegen_gcc/src/asm.rs
new file mode 100644
index 00000000000..dbdf37ee6c9
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/asm.rs
@@ -0,0 +1,1012 @@
+use std::borrow::Cow;
+
+use gccjit::{LValue, RValue, ToRValue, Type};
+use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_codegen_ssa::mir::operand::OperandValue;
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{
+    AsmBuilderMethods, AsmCodegenMethods, BaseTypeCodegenMethods, BuilderMethods,
+    GlobalAsmOperandRef, InlineAsmOperandRef,
+};
+use rustc_middle::bug;
+use rustc_middle::ty::Instance;
+use rustc_span::Span;
+use rustc_target::asm::*;
+
+use crate::builder::Builder;
+use crate::callee::get_fn;
+use crate::context::CodegenCx;
+use crate::errors::UnwindingInlineAsm;
+use crate::type_of::LayoutGccExt;
+
+// Rust asm! and GCC Extended Asm semantics differ substantially.
+//
+// 1. Rust asm operands go along as one list of operands. Operands themselves indicate
+//    if they're "in" or "out". "In" and "out" operands can interleave. One operand can be
+//    both "in" and "out" (`inout(reg)`).
+//
+//    GCC asm has two different lists for "in" and "out" operands. In terms of gccjit,
+//    this means that all "out" operands must go before "in" operands. "In" and "out" operands
+//    cannot interleave.
+//
+// 2. Operand lists in both Rust and GCC are indexed. Index starts from 0. Indexes are important
+//    because the asm template refers to operands by index.
+//
+//    Mapping from Rust to GCC index would be 1-1 if it wasn't for...
+//
+// 3. Clobbers. GCC has a separate list of clobbers, and clobbers don't have indexes.
+//    Contrary, Rust expresses clobbers through "out" operands that aren't tied to
+//    a variable (`_`),  and such "clobbers" do have index. Input operands cannot also
+//    be clobbered.
+//
+// 4. Furthermore, GCC Extended Asm does not support explicit register constraints
+//    (like `out("eax")`) directly, offering so-called "local register variables"
+//    as a workaround. These variables need to be declared and initialized *before*
+//    the Extended Asm block but *after* normal local variables
+//    (see comment in `codegen_inline_asm` for explanation).
+//
+// With that in mind, let's see how we translate Rust syntax to GCC
+// (from now on, `CC` stands for "constraint code"):
+//
+// * `out(reg_class) var`   -> translated to output operand: `"=CC"(var)`
+// * `inout(reg_class) var` -> translated to output operand: `"+CC"(var)`
+// * `in(reg_class) var`    -> translated to input operand: `"CC"(var)`
+//
+// * `out(reg_class) _` -> translated to one `=r(tmp)`, where "tmp" is a temporary unused variable
+//
+// * `out("explicit register") _` -> not translated to any operands, register is simply added to clobbers list
+//
+// * `inout(reg_class) in_var => out_var` -> translated to two operands:
+//                              output: `"=CC"(in_var)`
+//                              input:  `"num"(out_var)` where num is the GCC index
+//                                       of the corresponding output operand
+//
+// * `inout(reg_class) in_var => _` -> same as `inout(reg_class) in_var => tmp`,
+//                                      where "tmp" is a temporary unused variable
+//
+// * `out/in/inout("explicit register") var` -> translated to one or two operands as described above
+//                                              with `"r"(var)` constraint,
+//                                              and one register variable assigned to the desired register.
+
+const ATT_SYNTAX_INS: &str = ".att_syntax noprefix\n\t";
+const INTEL_SYNTAX_INS: &str = "\n\t.intel_syntax noprefix";
+
+struct AsmOutOperand<'a, 'tcx, 'gcc> {
+    rust_idx: usize,
+    constraint: &'a str,
+    late: bool,
+    readwrite: bool,
+
+    tmp_var: LValue<'gcc>,
+    out_place: Option<PlaceRef<'tcx, RValue<'gcc>>>,
+}
+
+struct AsmInOperand<'a, 'tcx> {
+    rust_idx: usize,
+    constraint: Cow<'a, str>,
+    val: RValue<'tcx>,
+}
+
+impl AsmOutOperand<'_, '_, '_> {
+    fn to_constraint(&self) -> String {
+        let mut res = String::with_capacity(self.constraint.len() + self.late as usize + 1);
+
+        let sign = if self.readwrite { '+' } else { '=' };
+        res.push(sign);
+        if !self.late {
+            res.push('&');
+        }
+
+        res.push_str(self.constraint);
+        res
+    }
+}
+
+enum ConstraintOrRegister {
+    Constraint(&'static str),
+    Register(&'static str),
+}
+
+impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+    fn codegen_inline_asm(
+        &mut self,
+        template: &[InlineAsmTemplatePiece],
+        rust_operands: &[InlineAsmOperandRef<'tcx, Self>],
+        options: InlineAsmOptions,
+        span: &[Span],
+        instance: Instance<'_>,
+        dest: Option<Self::BasicBlock>,
+        _dest_catch_funclet: Option<(Self::BasicBlock, Option<&Self::Funclet>)>,
+    ) {
+        if options.contains(InlineAsmOptions::MAY_UNWIND) {
+            self.sess().dcx().create_err(UnwindingInlineAsm { span: span[0] }).emit();
+            return;
+        }
+
+        let asm_arch = self.tcx.sess.asm_arch.unwrap();
+        let is_x86 = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64);
+        let att_dialect = is_x86 && options.contains(InlineAsmOptions::ATT_SYNTAX);
+
+        // GCC index of an output operand equals its position in the array
+        let mut outputs = vec![];
+
+        // GCC index of an input operand equals its position in the array
+        // added to `outputs.len()`
+        let mut inputs = vec![];
+
+        // GCC index of a label equals its position in the array added to
+        // `outputs.len() + inputs.len()`.
+        let mut labels = vec![];
+
+        // Clobbers collected from `out("explicit register") _` and `inout("expl_reg") var => _`
+        let mut clobbers = vec![];
+
+        // We're trying to preallocate space for the template
+        let mut constants_len = 0;
+
+        // There are rules we must adhere to if we want GCC to do the right thing:
+        //
+        // * Every local variable that the asm block uses as an output must be declared *before*
+        //   the asm block.
+        // * There must be no instructions whatsoever between the register variables and the asm.
+        //
+        // Therefore, the backend must generate the instructions strictly in this order:
+        //
+        // 1. Output variables.
+        // 2. Register variables.
+        // 3. The asm block.
+        //
+        // We also must make sure that no input operands are emitted before output operands.
+        //
+        // This is why we work in passes, first emitting local vars, then local register vars.
+        // Also, we don't emit any asm operands immediately; we save them to
+        // the one of the buffers to be emitted later.
+
+        let mut input_registers = vec![];
+
+        for op in rust_operands {
+            if let InlineAsmOperandRef::In { reg, .. } = *op {
+                if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) {
+                    input_registers.push(reg_name);
+                }
+            }
+        }
+
+        // 1. Normal variables (and saving operands to buffers).
+        for (rust_idx, op) in rust_operands.iter().enumerate() {
+            match *op {
+                InlineAsmOperandRef::Out { reg, late, place } => {
+                    use ConstraintOrRegister::*;
+
+                    let (constraint, ty) = match (reg_to_gcc(reg), place) {
+                        (Constraint(constraint), Some(place)) => {
+                            (constraint, place.layout.gcc_type(self.cx))
+                        }
+                        // When `reg` is a class and not an explicit register but the out place is not specified,
+                        // we need to create an unused output variable to assign the output to. This var
+                        // needs to be of a type that's "compatible" with the register class, but specific type
+                        // doesn't matter.
+                        (Constraint(constraint), None) => {
+                            (constraint, dummy_output_type(self.cx, reg.reg_class()))
+                        }
+                        (Register(_), Some(_)) => {
+                            // left for the next pass
+                            continue;
+                        }
+                        (Register(reg_name), None) => {
+                            if input_registers.contains(&reg_name) {
+                                // the `clobber_abi` operand is converted into a series of
+                                // `lateout("reg") _` operands. Of course, a user could also
+                                // explicitly define such an output operand.
+                                //
+                                // GCC does not allow input registers to be clobbered, so if this out register
+                                // is also used as an in register, do not add it to the clobbers list.
+                                // it will be treated as a lateout register with `out_place: None`
+                                if !late {
+                                    bug!("input registers can only be used as lateout regisers");
+                                }
+                                ("r", dummy_output_type(self.cx, reg.reg_class()))
+                            } else {
+                                // `clobber_abi` can add lots of clobbers that are not supported by the target,
+                                // such as AVX-512 registers, so we just ignore unsupported registers
+                                let is_target_supported =
+                                    reg.reg_class().supported_types(asm_arch, true).iter().any(
+                                        |&(_, feature)| {
+                                            if let Some(feature) = feature {
+                                                self.tcx
+                                                    .asm_target_features(instance.def_id())
+                                                    .contains(&feature)
+                                            } else {
+                                                true // Register class is unconditionally supported
+                                            }
+                                        },
+                                    );
+
+                                if is_target_supported && !clobbers.contains(&reg_name) {
+                                    clobbers.push(reg_name);
+                                }
+                                continue;
+                            }
+                        }
+                    };
+
+                    let tmp_var = self.current_func().new_local(None, ty, "output_register");
+                    outputs.push(AsmOutOperand {
+                        constraint,
+                        rust_idx,
+                        late,
+                        readwrite: false,
+                        tmp_var,
+                        out_place: place,
+                    });
+                }
+
+                InlineAsmOperandRef::In { reg, value } => {
+                    if let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) {
+                        inputs.push(AsmInOperand {
+                            constraint: Cow::Borrowed(constraint),
+                            rust_idx,
+                            val: value.immediate(),
+                        });
+                    } else {
+                        // left for the next pass
+                        continue;
+                    }
+                }
+
+                InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
+                    let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) else {
+                        // left for the next pass
+                        continue;
+                    };
+
+                    // Rustc frontend guarantees that input and output types are "compatible",
+                    // so we can just use input var's type for the output variable.
+                    //
+                    // This decision is also backed by the fact that LLVM needs in and out
+                    // values to be of *exactly the same type*, not just "compatible".
+                    // I'm not sure if GCC is so picky too, but better safe than sorry.
+                    let ty = in_value.layout.gcc_type(self.cx);
+                    let tmp_var = self.current_func().new_local(None, ty, "output_register");
+
+                    // If the out_place is None (i.e `inout(reg) _` syntax was used), we translate
+                    // it to one "readwrite (+) output variable", otherwise we translate it to two
+                    // "out and tied in" vars as described above.
+                    let readwrite = out_place.is_none();
+                    outputs.push(AsmOutOperand {
+                        constraint,
+                        rust_idx,
+                        late,
+                        readwrite,
+                        tmp_var,
+                        out_place,
+                    });
+
+                    if !readwrite {
+                        let out_gcc_idx = outputs.len() - 1;
+                        let constraint = Cow::Owned(out_gcc_idx.to_string());
+
+                        inputs.push(AsmInOperand {
+                            constraint,
+                            rust_idx,
+                            val: in_value.immediate(),
+                        });
+                    }
+                }
+
+                InlineAsmOperandRef::Const { ref string } => {
+                    constants_len += string.len() + att_dialect as usize;
+                }
+
+                InlineAsmOperandRef::SymFn { instance } => {
+                    // TODO(@Amanieu): Additional mangling is needed on
+                    // some targets to add a leading underscore (Mach-O)
+                    // or byte count suffixes (x86 Windows).
+                    constants_len += self.tcx.symbol_name(instance).name.len();
+                }
+                InlineAsmOperandRef::SymStatic { def_id } => {
+                    // TODO(@Amanieu): Additional mangling is needed on
+                    // some targets to add a leading underscore (Mach-O).
+                    constants_len +=
+                        self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name.len();
+                }
+
+                InlineAsmOperandRef::Label { label } => {
+                    labels.push(label);
+                }
+            }
+        }
+
+        // 2. Register variables.
+        for (rust_idx, op) in rust_operands.iter().enumerate() {
+            match *op {
+                // `out("explicit register") var`
+                InlineAsmOperandRef::Out { reg, late, place } => {
+                    if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) {
+                        let out_place = if let Some(place) = place {
+                            place
+                        } else {
+                            // processed in the previous pass
+                            continue;
+                        };
+
+                        let ty = out_place.layout.gcc_type(self.cx);
+                        let tmp_var = self.current_func().new_local(None, ty, "output_register");
+                        tmp_var.set_register_name(reg_name);
+
+                        outputs.push(AsmOutOperand {
+                            constraint: "r",
+                            rust_idx,
+                            late,
+                            readwrite: false,
+                            tmp_var,
+                            out_place: Some(out_place),
+                        });
+                    }
+
+                    // processed in the previous pass
+                }
+
+                // `in("explicit register") var`
+                InlineAsmOperandRef::In { reg, value } => {
+                    if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) {
+                        let ty = value.layout.gcc_type(self.cx);
+                        let reg_var = self.current_func().new_local(None, ty, "input_register");
+                        reg_var.set_register_name(reg_name);
+                        self.llbb().add_assignment(None, reg_var, value.immediate());
+
+                        inputs.push(AsmInOperand {
+                            constraint: "r".into(),
+                            rust_idx,
+                            val: reg_var.to_rvalue(),
+                        });
+                    }
+
+                    // processed in the previous pass
+                }
+
+                // `inout("explicit register") in_var => out_var`
+                InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
+                    if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) {
+                        // See explanation in the first pass.
+                        let ty = in_value.layout.gcc_type(self.cx);
+                        let tmp_var = self.current_func().new_local(None, ty, "output_register");
+                        tmp_var.set_register_name(reg_name);
+
+                        outputs.push(AsmOutOperand {
+                            constraint: "r",
+                            rust_idx,
+                            late,
+                            readwrite: false,
+                            tmp_var,
+                            out_place,
+                        });
+
+                        let constraint = Cow::Owned((outputs.len() - 1).to_string());
+                        inputs.push(AsmInOperand {
+                            constraint,
+                            rust_idx,
+                            val: in_value.immediate(),
+                        });
+                    }
+
+                    // processed in the previous pass
+                }
+
+                InlineAsmOperandRef::SymFn { instance } => {
+                    inputs.push(AsmInOperand {
+                        constraint: "X".into(),
+                        rust_idx,
+                        val: get_fn(self.cx, instance).get_address(None),
+                    });
+                }
+
+                InlineAsmOperandRef::SymStatic { def_id } => {
+                    inputs.push(AsmInOperand {
+                        constraint: "X".into(),
+                        rust_idx,
+                        val: self.cx.get_static(def_id).get_address(None),
+                    });
+                }
+
+                InlineAsmOperandRef::Const { .. } => {
+                    // processed in the previous pass
+                }
+
+                InlineAsmOperandRef::Label { .. } => {
+                    // processed in the previous pass
+                }
+            }
+        }
+
+        // 3. Build the template string
+
+        let mut template_str =
+            String::with_capacity(estimate_template_length(template, constants_len, att_dialect));
+        if att_dialect {
+            template_str.push_str(ATT_SYNTAX_INS);
+        }
+
+        for piece in template {
+            match *piece {
+                InlineAsmTemplatePiece::String(ref string) => {
+                    for char in string.chars() {
+                        // TODO(antoyo): might also need to escape | if rustc doesn't do it.
+                        let escaped_char = match char {
+                            '%' => "%%",
+                            '{' => "%{",
+                            '}' => "%}",
+                            _ => {
+                                template_str.push(char);
+                                continue;
+                            }
+                        };
+                        template_str.push_str(escaped_char);
+                    }
+                }
+                InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
+                    let mut push_to_template = |modifier, gcc_idx| {
+                        use std::fmt::Write;
+
+                        template_str.push('%');
+                        if let Some(modifier) = modifier {
+                            template_str.push(modifier);
+                        }
+                        write!(template_str, "{}", gcc_idx).expect("pushing to string failed");
+                    };
+
+                    match rust_operands[operand_idx] {
+                        InlineAsmOperandRef::Out { reg, .. } => {
+                            let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
+                            let gcc_index = outputs
+                                .iter()
+                                .position(|op| operand_idx == op.rust_idx)
+                                .expect("wrong rust index");
+                            push_to_template(modifier, gcc_index);
+                        }
+
+                        InlineAsmOperandRef::In { reg, .. } => {
+                            let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
+                            let in_gcc_index = inputs
+                                .iter()
+                                .position(|op| operand_idx == op.rust_idx)
+                                .expect("wrong rust index");
+                            let gcc_index = in_gcc_index + outputs.len();
+                            push_to_template(modifier, gcc_index);
+                        }
+
+                        InlineAsmOperandRef::InOut { reg, .. } => {
+                            let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
+
+                            // The input register is tied to the output, so we can just use the index of the output register
+                            let gcc_index = outputs
+                                .iter()
+                                .position(|op| operand_idx == op.rust_idx)
+                                .expect("wrong rust index");
+                            push_to_template(modifier, gcc_index);
+                        }
+
+                        InlineAsmOperandRef::SymFn { instance } => {
+                            // TODO(@Amanieu): Additional mangling is needed on
+                            // some targets to add a leading underscore (Mach-O)
+                            // or byte count suffixes (x86 Windows).
+                            let name = self.tcx.symbol_name(instance).name;
+                            template_str.push_str(name);
+                        }
+
+                        InlineAsmOperandRef::SymStatic { def_id } => {
+                            // TODO(@Amanieu): Additional mangling is needed on
+                            // some targets to add a leading underscore (Mach-O).
+                            let instance = Instance::mono(self.tcx, def_id);
+                            let name = self.tcx.symbol_name(instance).name;
+                            template_str.push_str(name);
+                        }
+
+                        InlineAsmOperandRef::Const { ref string } => {
+                            template_str.push_str(string);
+                        }
+
+                        InlineAsmOperandRef::Label { label } => {
+                            let label_gcc_index =
+                                labels.iter().position(|&l| l == label).expect("wrong rust index");
+                            let gcc_index = label_gcc_index + outputs.len() + inputs.len();
+                            push_to_template(Some('l'), gcc_index);
+                        }
+                    }
+                }
+            }
+        }
+
+        if att_dialect {
+            template_str.push_str(INTEL_SYNTAX_INS);
+        }
+
+        // 4. Generate Extended Asm block
+
+        let block = self.llbb();
+        let extended_asm = if let Some(dest) = dest {
+            assert!(!labels.is_empty());
+            block.end_with_extended_asm_goto(None, &template_str, &labels, Some(dest))
+        } else {
+            block.add_extended_asm(None, &template_str)
+        };
+
+        for op in &outputs {
+            extended_asm.add_output_operand(None, &op.to_constraint(), op.tmp_var);
+        }
+
+        for op in &inputs {
+            extended_asm.add_input_operand(None, &op.constraint, op.val);
+        }
+
+        for clobber in clobbers.iter() {
+            extended_asm.add_clobber(clobber);
+        }
+
+        if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
+            // TODO(@Commeownist): I'm not 100% sure this one clobber is sufficient
+            // on all architectures. For instance, what about FP stack?
+            extended_asm.add_clobber("cc");
+        }
+        if !options.contains(InlineAsmOptions::NOMEM) {
+            extended_asm.add_clobber("memory");
+        }
+        if !options.contains(InlineAsmOptions::PURE) {
+            extended_asm.set_volatile_flag(true);
+        }
+        if !options.contains(InlineAsmOptions::NOSTACK) {
+            // TODO(@Commeownist): figure out how to align stack
+        }
+        if dest.is_none() && options.contains(InlineAsmOptions::NORETURN) {
+            let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable");
+            let builtin_unreachable: RValue<'gcc> =
+                unsafe { std::mem::transmute(builtin_unreachable) };
+            self.call(self.type_void(), None, None, builtin_unreachable, &[], None, None);
+        }
+
+        // Write results to outputs.
+        //
+        // We need to do this because:
+        //  1. Turning `PlaceRef` into `RValue` is error-prone and has nasty edge cases
+        //     (especially with current `rustc_backend_ssa` API).
+        //  2. Not every output operand has an `out_place`, and it's required by `add_output_operand`.
+        //
+        // Instead, we generate a temporary output variable for each output operand, and then this loop,
+        // generates `out_place = tmp_var;` assignments if out_place exists.
+        for op in &outputs {
+            if let Some(place) = op.out_place {
+                OperandValue::Immediate(op.tmp_var.to_rvalue()).store(self, place);
+            }
+        }
+    }
+}
+
+fn estimate_template_length(
+    template: &[InlineAsmTemplatePiece],
+    constants_len: usize,
+    att_dialect: bool,
+) -> usize {
+    let len: usize = template
+        .iter()
+        .map(|piece| {
+            match *piece {
+                InlineAsmTemplatePiece::String(ref string) => string.len(),
+                InlineAsmTemplatePiece::Placeholder { .. } => {
+                    // '%' + 1 char modifier + 1 char index
+                    3
+                }
+            }
+        })
+        .sum();
+
+    // increase it by 5% to account for possible '%' signs that'll be duplicated
+    // I pulled the number out of blue, but should be fair enough
+    // as the upper bound
+    let mut res = (len as f32 * 1.05) as usize + constants_len;
+
+    if att_dialect {
+        res += INTEL_SYNTAX_INS.len() + ATT_SYNTAX_INS.len();
+    }
+    res
+}
+
+/// Converts a register class to a GCC constraint code.
+fn reg_to_gcc(reg_or_reg_class: InlineAsmRegOrRegClass) -> ConstraintOrRegister {
+    match reg_or_reg_class {
+        InlineAsmRegOrRegClass::Reg(reg) => {
+            ConstraintOrRegister::Register(explicit_reg_to_gcc(reg))
+        }
+        InlineAsmRegOrRegClass::RegClass(reg_class) => {
+            ConstraintOrRegister::Constraint(reg_class_to_gcc(reg_class))
+        }
+    }
+}
+
+fn explicit_reg_to_gcc(reg: InlineAsmReg) -> &'static str {
+    // For explicit registers, we have to create a register variable: https://stackoverflow.com/a/31774784/389119
+    match reg {
+        InlineAsmReg::X86(reg) => {
+            // TODO(antoyo): add support for vector register.
+            match reg.reg_class() {
+                X86InlineAsmRegClass::reg_byte => {
+                    // GCC does not support the `b` suffix, so we just strip it
+                    // see https://github.com/rust-lang/rustc_codegen_gcc/issues/485
+                    reg.name().trim_end_matches('b')
+                }
+                _ => match reg.name() {
+                    // Some of registers' names does not map 1-1 from rust to gcc
+                    "st(0)" => "st",
+
+                    name => name,
+                },
+            }
+        }
+
+        _ => unimplemented!(),
+    }
+}
+
+/// They can be retrieved from https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html
+fn reg_class_to_gcc(reg_class: InlineAsmRegClass) -> &'static str {
+    match reg_class {
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w",
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "t",
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => "d",
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => "r",
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => "w",
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => "e",
+        InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w",
+        InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::preg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => "f",
+        InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => "a",
+        InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => "d",
+        InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => "f",
+        InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "d", // more specific than "r"
+        InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f",
+        InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r",
+        // https://github.com/gcc-mirror/gcc/blob/master/gcc/config/nvptx/nvptx.md -> look for
+        // "define_constraint".
+        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
+        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
+        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
+
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::vreg) => "v",
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
+        | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+        | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "Yk",
+        InlineAsmRegClass::X86(
+            X86InlineAsmRegClass::kreg0
+            | X86InlineAsmRegClass::x87_reg
+            | X86InlineAsmRegClass::mmx_reg
+            | X86InlineAsmRegClass::tmm_reg,
+        ) => unreachable!("clobber-only"),
+        InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+            bug!("GCC backend does not support SPIR-V")
+        }
+        InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r",
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg_addr) => "a",
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f",
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::vreg) => "v",
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::areg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::Sparc(SparcInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::Sparc(SparcInlineAsmRegClass::yreg) => unreachable!("clobber-only"),
+        InlineAsmRegClass::Err => unreachable!(),
+    }
+}
+
+/// Type to use for outputs that are discarded. It doesn't really matter what
+/// the type is, as long as it is valid for the constraint code.
+fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegClass) -> Type<'gcc> {
+    match reg {
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
+        | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+            cx.type_vector(cx.type_i64(), 2)
+        }
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+            cx.type_vector(cx.type_i64(), 2)
+        }
+        InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::preg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => cx.type_f32(),
+        InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
+        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
+        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
+        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::vreg) => {
+            cx.type_vector(cx.type_i32(), 4)
+        }
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
+        | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
+        | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+        | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
+        | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg)
+        | InlineAsmRegClass::X86(X86InlineAsmRegClass::mmx_reg)
+        | InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg0)
+        | InlineAsmRegClass::X86(X86InlineAsmRegClass::tmm_reg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
+        InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(),
+        InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(),
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => cx.type_i8(),
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => cx.type_i8(),
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => cx.type_i16(),
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => cx.type_i16(),
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => cx.type_i16(),
+        InlineAsmRegClass::S390x(
+            S390xInlineAsmRegClass::reg | S390xInlineAsmRegClass::reg_addr,
+        ) => cx.type_i32(),
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::vreg) => cx.type_vector(cx.type_i64(), 2),
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::areg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::Sparc(SparcInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::Sparc(SparcInlineAsmRegClass::yreg) => unreachable!("clobber-only"),
+        InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => cx.type_i16(),
+        InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(),
+        InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(),
+        InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => cx.type_f32(),
+        InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+            bug!("GCC backend does not support SPIR-V")
+        }
+        InlineAsmRegClass::Err => unreachable!(),
+    }
+}
+
+impl<'gcc, 'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+    fn codegen_global_asm(
+        &self,
+        template: &[InlineAsmTemplatePiece],
+        operands: &[GlobalAsmOperandRef<'tcx>],
+        options: InlineAsmOptions,
+        _line_spans: &[Span],
+    ) {
+        let asm_arch = self.tcx.sess.asm_arch.unwrap();
+
+        // Default to Intel syntax on x86
+        let att_dialect = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64)
+            && options.contains(InlineAsmOptions::ATT_SYNTAX);
+
+        // Build the template string
+        let mut template_str = ".pushsection .text\n".to_owned();
+        if att_dialect {
+            template_str.push_str(".att_syntax\n");
+        }
+        for piece in template {
+            match *piece {
+                InlineAsmTemplatePiece::String(ref string) => {
+                    let mut index = 0;
+                    while index < string.len() {
+                        // NOTE: gcc does not allow inline comment, so remove them.
+                        let comment_index = string[index..]
+                            .find("//")
+                            .map(|comment_index| comment_index + index)
+                            .unwrap_or(string.len());
+                        template_str.push_str(&string[index..comment_index]);
+                        index = string[comment_index..]
+                            .find('\n')
+                            .map(|index| index + comment_index)
+                            .unwrap_or(string.len());
+                    }
+                }
+                InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => {
+                    match operands[operand_idx] {
+                        GlobalAsmOperandRef::Const { ref string } => {
+                            // Const operands get injected directly into the
+                            // template. Note that we don't need to escape %
+                            // here unlike normal inline assembly.
+                            template_str.push_str(string);
+                        }
+
+                        GlobalAsmOperandRef::SymFn { instance } => {
+                            let function = get_fn(self, instance);
+                            self.add_used_function(function);
+                            // TODO(@Amanieu): Additional mangling is needed on
+                            // some targets to add a leading underscore (Mach-O)
+                            // or byte count suffixes (x86 Windows).
+                            let name = self.tcx.symbol_name(instance).name;
+                            template_str.push_str(name);
+                        }
+
+                        GlobalAsmOperandRef::SymStatic { def_id } => {
+                            // TODO(antoyo): set the global variable as used.
+                            // TODO(@Amanieu): Additional mangling is needed on
+                            // some targets to add a leading underscore (Mach-O).
+                            let instance = Instance::mono(self.tcx, def_id);
+                            let name = self.tcx.symbol_name(instance).name;
+                            template_str.push_str(name);
+                        }
+                    }
+                }
+            }
+        }
+
+        if att_dialect {
+            template_str.push_str("\n\t.intel_syntax noprefix");
+        }
+        // NOTE: seems like gcc will put the asm in the wrong section, so set it to .text manually.
+        template_str.push_str("\n.popsection");
+        self.context.add_top_level_asm(None, &template_str);
+    }
+
+    fn mangled_name(&self, instance: Instance<'tcx>) -> String {
+        // TODO(@Amanieu): Additional mangling is needed on
+        // some targets to add a leading underscore (Mach-O)
+        // or byte count suffixes (x86 Windows).
+        self.tcx.symbol_name(instance).name.to_string()
+    }
+}
+
+fn modifier_to_gcc(
+    arch: InlineAsmArch,
+    reg: InlineAsmRegClass,
+    modifier: Option<char>,
+) -> Option<char> {
+    // The modifiers can be retrieved from
+    // https://gcc.gnu.org/onlinedocs/gcc/Modifiers.html#Modifiers
+    match reg {
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
+        | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+            if modifier == Some('v') { None } else { modifier }
+        }
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => None,
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => None,
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'),
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+            if modifier.is_none() {
+                Some('q')
+            } else {
+                modifier
+            }
+        }
+        InlineAsmRegClass::Hexagon(_) => None,
+        InlineAsmRegClass::LoongArch(_) => None,
+        InlineAsmRegClass::Mips(_) => None,
+        InlineAsmRegClass::Nvptx(_) => None,
+        InlineAsmRegClass::PowerPC(_) => None,
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
+        | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => None,
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
+        | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
+            None => {
+                if arch == InlineAsmArch::X86_64 {
+                    Some('q')
+                } else {
+                    Some('k')
+                }
+            }
+            Some('l') => Some('b'),
+            Some('h') => Some('h'),
+            Some('x') => Some('w'),
+            Some('e') => Some('k'),
+            Some('r') => Some('q'),
+            _ => unreachable!(),
+        },
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => None,
+        InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::xmm_reg)
+        | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::ymm_reg)
+        | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
+            (X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
+            (X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
+            (X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
+            (_, Some('x')) => Some('x'),
+            (_, Some('y')) => Some('t'),
+            (_, Some('z')) => Some('g'),
+            _ => unreachable!(),
+        },
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
+        InlineAsmRegClass::X86(
+            X86InlineAsmRegClass::x87_reg
+            | X86InlineAsmRegClass::mmx_reg
+            | X86InlineAsmRegClass::kreg0
+            | X86InlineAsmRegClass::tmm_reg,
+        ) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => None,
+        InlineAsmRegClass::Bpf(_) => None,
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair)
+        | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw)
+        | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier {
+            Some('h') => Some('B'),
+            Some('l') => Some('A'),
+            _ => None,
+        },
+        InlineAsmRegClass::Avr(_) => None,
+        InlineAsmRegClass::S390x(_) => None,
+        InlineAsmRegClass::Sparc(_) => None,
+        InlineAsmRegClass::Msp430(_) => None,
+        InlineAsmRegClass::M68k(_) => None,
+        InlineAsmRegClass::CSKY(_) => None,
+        InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+            bug!("LLVM backend does not support SPIR-V")
+        }
+        InlineAsmRegClass::Err => unreachable!(),
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/attributes.rs b/compiler/rustc_codegen_gcc/src/attributes.rs
new file mode 100644
index 00000000000..69b04dd5796
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/attributes.rs
@@ -0,0 +1,122 @@
+#[cfg(feature = "master")]
+use gccjit::FnAttribute;
+use gccjit::Function;
+#[cfg(feature = "master")]
+use rustc_attr_parsing::InlineAttr;
+use rustc_attr_parsing::InstructionSetAttr;
+#[cfg(feature = "master")]
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::ty;
+
+use crate::context::CodegenCx;
+use crate::gcc_util::to_gcc_features;
+
+/// Get GCC attribute for the provided inline heuristic.
+#[cfg(feature = "master")]
+#[inline]
+fn inline_attr<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    inline: InlineAttr,
+) -> Option<FnAttribute<'gcc>> {
+    match inline {
+        InlineAttr::Hint => Some(FnAttribute::Inline),
+        InlineAttr::Always | InlineAttr::Force { .. } => Some(FnAttribute::AlwaysInline),
+        InlineAttr::Never => {
+            if cx.sess().target.arch != "amdgpu" {
+                Some(FnAttribute::NoInline)
+            } else {
+                None
+            }
+        }
+        InlineAttr::None => None,
+    }
+}
+
+/// Composite function which sets GCC attributes for function depending on its AST (`#[attribute]`)
+/// attributes.
+pub fn from_fn_attrs<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    #[cfg_attr(not(feature = "master"), allow(unused_variables))] func: Function<'gcc>,
+    instance: ty::Instance<'tcx>,
+) {
+    let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id());
+
+    #[cfg(feature = "master")]
+    {
+        let inline = if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
+            InlineAttr::Never
+        } else if codegen_fn_attrs.inline == InlineAttr::None
+            && instance.def.requires_inline(cx.tcx)
+        {
+            InlineAttr::Hint
+        } else {
+            codegen_fn_attrs.inline
+        };
+        if let Some(attr) = inline_attr(cx, inline) {
+            if let FnAttribute::AlwaysInline = attr {
+                func.add_attribute(FnAttribute::Inline);
+            }
+            func.add_attribute(attr);
+        }
+
+        if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
+            func.add_attribute(FnAttribute::Cold);
+        }
+        if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_PURE) {
+            func.add_attribute(FnAttribute::Pure);
+        }
+        if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_CONST) {
+            func.add_attribute(FnAttribute::Const);
+        }
+    }
+
+    let mut function_features = codegen_fn_attrs
+        .target_features
+        .iter()
+        .map(|features| features.name.as_str())
+        .flat_map(|feat| to_gcc_features(cx.tcx.sess, feat).into_iter())
+        .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match *x {
+            InstructionSetAttr::ArmA32 => "-thumb-mode", // TODO(antoyo): support removing feature.
+            InstructionSetAttr::ArmT32 => "thumb-mode",
+        }))
+        .collect::<Vec<_>>();
+
+    // TODO(antoyo): cg_llvm adds global features to each function so that LTO keep them.
+    // Check if GCC requires the same.
+    let mut global_features = cx.tcx.global_backend_features(()).iter().map(|s| s.as_str());
+    function_features.extend(&mut global_features);
+    let target_features = function_features
+        .iter()
+        .filter_map(|feature| {
+            // FIXME(antoyo): for some reasons, disabling SSE results in the following error when
+            // compiling Rust for Linux:
+            // SSE register return with SSE disabled
+            // TODO(antoyo): support soft-float and retpoline-external-thunk.
+            if feature.contains("soft-float")
+                || feature.contains("retpoline-external-thunk")
+                || *feature == "-sse"
+            {
+                return None;
+            }
+
+            if feature.starts_with('-') {
+                Some(format!("no{}", feature))
+            } else if let Some(stripped) = feature.strip_prefix('+') {
+                Some(stripped.to_string())
+            } else {
+                Some(feature.to_string())
+            }
+        })
+        .collect::<Vec<_>>()
+        .join(",");
+    if !target_features.is_empty() {
+        #[cfg(feature = "master")]
+        match cx.sess().target.arch.as_ref() {
+            "x86" | "x86_64" | "powerpc" => {
+                func.add_attribute(FnAttribute::Target(&target_features))
+            }
+            // The target attribute is not supported on other targets in GCC.
+            _ => (),
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/back/lto.rs b/compiler/rustc_codegen_gcc/src/back/lto.rs
new file mode 100644
index 00000000000..e5221c7da31
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/back/lto.rs
@@ -0,0 +1,734 @@
+/// GCC requires to use the same toolchain for the whole compilation when doing LTO.
+/// So, we need the same version/commit of the linker (gcc) and lto front-end binaries (lto1,
+/// lto-wrapper, liblto_plugin.so).
+// FIXME(antoyo): the executables compiled with LTO are bigger than those compiled without LTO.
+// Since it is the opposite for cg_llvm, check if this is normal.
+//
+// Maybe we embed the bitcode in the final binary?
+// It doesn't look like we try to generate fat objects for the final binary.
+// Check if the way we combine the object files make it keep the LTO sections on the final link.
+// Maybe that's because the combined object files contain the IR (true) and the final link
+// does not remove it?
+//
+// TODO(antoyo): for performance, check which optimizations the C++ frontend enables.
+//
+// Fix these warnings:
+// /usr/bin/ld: warning: type of symbol `_RNvNvNvNtCs5JWOrf9uCus_5rayon11thread_pool19WORKER_THREAD_STATE7___getit5___KEY' changed from 1 to 6 in /tmp/ccKeUSiR.ltrans0.ltrans.o
+// /usr/bin/ld: warning: type of symbol `_RNvNvNvNvNtNtNtCsAj5i4SGTR7_3std4sync4mpmc5waker17current_thread_id5DUMMY7___getit5___KEY' changed from 1 to 6 in /tmp/ccKeUSiR.ltrans0.ltrans.o
+// /usr/bin/ld: warning: incremental linking of LTO and non-LTO objects; using -flinker-output=nolto-rel which will bypass whole program optimization
+use std::ffi::{CStr, CString};
+use std::fs::{self, File};
+use std::path::{Path, PathBuf};
+use std::sync::Arc;
+
+use gccjit::{Context, OutputKind};
+use object::read::archive::ArchiveFile;
+use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared};
+use rustc_codegen_ssa::back::symbol_export;
+use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput};
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file};
+use rustc_data_structures::memmap::Mmap;
+use rustc_errors::{DiagCtxtHandle, FatalError};
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_middle::bug;
+use rustc_middle::dep_graph::WorkProduct;
+use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel};
+use rustc_session::config::{CrateType, Lto};
+use rustc_target::spec::RelocModel;
+use tempfile::{TempDir, tempdir};
+
+use crate::back::write::save_temp_bitcode;
+use crate::errors::{DynamicLinkingWithLTO, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib};
+use crate::{GccCodegenBackend, GccContext, SyncContext, to_gcc_opt_level};
+
+pub fn crate_type_allows_lto(crate_type: CrateType) -> bool {
+    match crate_type {
+        CrateType::Executable | CrateType::Dylib | CrateType::Staticlib | CrateType::Cdylib => true,
+        CrateType::Rlib | CrateType::ProcMacro => false,
+    }
+}
+
+struct LtoData {
+    // TODO(antoyo): use symbols_below_threshold.
+    //symbols_below_threshold: Vec<String>,
+    upstream_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
+    tmp_path: TempDir,
+}
+
+fn prepare_lto(
+    cgcx: &CodegenContext<GccCodegenBackend>,
+    dcx: DiagCtxtHandle<'_>,
+) -> Result<LtoData, FatalError> {
+    let export_threshold = match cgcx.lto {
+        // We're just doing LTO for our one crate
+        Lto::ThinLocal => SymbolExportLevel::Rust,
+
+        // We're doing LTO for the entire crate graph
+        Lto::Fat | Lto::Thin => symbol_export::crates_export_threshold(&cgcx.crate_types),
+
+        Lto::No => panic!("didn't request LTO but we're doing LTO"),
+    };
+
+    let tmp_path = match tempdir() {
+        Ok(tmp_path) => tmp_path,
+        Err(error) => {
+            eprintln!("Cannot create temporary directory: {}", error);
+            return Err(FatalError);
+        }
+    };
+
+    let symbol_filter = &|&(ref name, info): &(String, SymbolExportInfo)| {
+        if info.level.is_below_threshold(export_threshold) || info.used {
+            Some(name.clone())
+        } else {
+            None
+        }
+    };
+    let exported_symbols = cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
+    let mut symbols_below_threshold = {
+        let _timer = cgcx.prof.generic_activity("GCC_lto_generate_symbols_below_threshold");
+        exported_symbols[&LOCAL_CRATE].iter().filter_map(symbol_filter).collect::<Vec<String>>()
+    };
+    info!("{} symbols to preserve in this crate", symbols_below_threshold.len());
+
+    // If we're performing LTO for the entire crate graph, then for each of our
+    // upstream dependencies, find the corresponding rlib and load the bitcode
+    // from the archive.
+    //
+    // We save off all the bytecode and GCC module file path for later processing
+    // with either fat or thin LTO
+    let mut upstream_modules = Vec::new();
+    if cgcx.lto != Lto::ThinLocal {
+        // Make sure we actually can run LTO
+        for crate_type in cgcx.crate_types.iter() {
+            if !crate_type_allows_lto(*crate_type) {
+                dcx.emit_err(LtoDisallowed);
+                return Err(FatalError);
+            }
+            if *crate_type == CrateType::Dylib && !cgcx.opts.unstable_opts.dylib_lto {
+                dcx.emit_err(LtoDylib);
+                return Err(FatalError);
+            }
+        }
+
+        if cgcx.opts.cg.prefer_dynamic && !cgcx.opts.unstable_opts.dylib_lto {
+            dcx.emit_err(DynamicLinkingWithLTO);
+            return Err(FatalError);
+        }
+
+        for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() {
+            let exported_symbols =
+                cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
+            {
+                let _timer = cgcx.prof.generic_activity("GCC_lto_generate_symbols_below_threshold");
+                symbols_below_threshold
+                    .extend(exported_symbols[&cnum].iter().filter_map(symbol_filter));
+            }
+
+            let archive_data = unsafe {
+                Mmap::map(File::open(path).expect("couldn't open rlib")).expect("couldn't map rlib")
+            };
+            let archive = ArchiveFile::parse(&*archive_data).expect("wanted an rlib");
+            let obj_files = archive
+                .members()
+                .filter_map(|child| {
+                    child.ok().and_then(|c| {
+                        std::str::from_utf8(c.name()).ok().map(|name| (name.trim(), c))
+                    })
+                })
+                .filter(|&(name, _)| looks_like_rust_object_file(name));
+            for (name, child) in obj_files {
+                info!("adding bitcode from {}", name);
+                let path = tmp_path.path().join(name);
+                match save_as_file(child.data(&*archive_data).expect("corrupt rlib"), &path) {
+                    Ok(()) => {
+                        let buffer = ModuleBuffer::new(path);
+                        let module = SerializedModule::Local(buffer);
+                        upstream_modules.push((module, CString::new(name).unwrap()));
+                    }
+                    Err(e) => {
+                        dcx.emit_err(e);
+                        return Err(FatalError);
+                    }
+                }
+            }
+        }
+    }
+
+    Ok(LtoData { upstream_modules, tmp_path })
+}
+
+fn save_as_file(obj: &[u8], path: &Path) -> Result<(), LtoBitcodeFromRlib> {
+    fs::write(path, obj).map_err(|error| LtoBitcodeFromRlib {
+        gcc_err: format!("write object file to temp dir: {}", error),
+    })
+}
+
+/// Performs fat LTO by merging all modules into a single one and returning it
+/// for further optimization.
+pub(crate) fn run_fat(
+    cgcx: &CodegenContext<GccCodegenBackend>,
+    modules: Vec<FatLtoInput<GccCodegenBackend>>,
+    cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+) -> Result<LtoModuleCodegen<GccCodegenBackend>, FatalError> {
+    let dcx = cgcx.create_dcx();
+    let dcx = dcx.handle();
+    let lto_data = prepare_lto(cgcx, dcx)?;
+    /*let symbols_below_threshold =
+    lto_data.symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();*/
+    fat_lto(
+        cgcx,
+        dcx,
+        modules,
+        cached_modules,
+        lto_data.upstream_modules,
+        lto_data.tmp_path,
+        //&lto_data.symbols_below_threshold,
+    )
+}
+
+fn fat_lto(
+    cgcx: &CodegenContext<GccCodegenBackend>,
+    _dcx: DiagCtxtHandle<'_>,
+    modules: Vec<FatLtoInput<GccCodegenBackend>>,
+    cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+    mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
+    tmp_path: TempDir,
+    //symbols_below_threshold: &[String],
+) -> Result<LtoModuleCodegen<GccCodegenBackend>, FatalError> {
+    let _timer = cgcx.prof.generic_activity("GCC_fat_lto_build_monolithic_module");
+    info!("going for a fat lto");
+
+    // Sort out all our lists of incoming modules into two lists.
+    //
+    // * `serialized_modules` (also and argument to this function) contains all
+    //   modules that are serialized in-memory.
+    // * `in_memory` contains modules which are already parsed and in-memory,
+    //   such as from multi-CGU builds.
+    //
+    // All of `cached_modules` (cached from previous incremental builds) can
+    // immediately go onto the `serialized_modules` modules list and then we can
+    // split the `modules` array into these two lists.
+    let mut in_memory = Vec::new();
+    serialized_modules.extend(cached_modules.into_iter().map(|(buffer, wp)| {
+        info!("pushing cached module {:?}", wp.cgu_name);
+        (buffer, CString::new(wp.cgu_name).unwrap())
+    }));
+    for module in modules {
+        match module {
+            FatLtoInput::InMemory(m) => in_memory.push(m),
+            FatLtoInput::Serialized { name, buffer } => {
+                info!("pushing serialized module {:?}", name);
+                let buffer = SerializedModule::Local(buffer);
+                serialized_modules.push((buffer, CString::new(name).unwrap()));
+            }
+        }
+    }
+
+    // Find the "costliest" module and merge everything into that codegen unit.
+    // All the other modules will be serialized and reparsed into the new
+    // context, so this hopefully avoids serializing and parsing the largest
+    // codegen unit.
+    //
+    // Additionally use a regular module as the base here to ensure that various
+    // file copy operations in the backend work correctly. The only other kind
+    // of module here should be an allocator one, and if your crate is smaller
+    // than the allocator module then the size doesn't really matter anyway.
+    let costliest_module = in_memory
+        .iter()
+        .enumerate()
+        .filter(|&(_, module)| module.kind == ModuleKind::Regular)
+        .map(|(i, _module)| {
+            //let cost = unsafe { llvm::LLVMRustModuleCost(module.module_llvm.llmod()) };
+            // TODO(antoyo): compute the cost of a module if GCC allows this.
+            (0, i)
+        })
+        .max();
+
+    // If we found a costliest module, we're good to go. Otherwise all our
+    // inputs were serialized which could happen in the case, for example, that
+    // all our inputs were incrementally reread from the cache and we're just
+    // re-executing the LTO passes. If that's the case deserialize the first
+    // module and create a linker with it.
+    let mut module: ModuleCodegen<GccContext> = match costliest_module {
+        Some((_cost, i)) => in_memory.remove(i),
+        None => {
+            unimplemented!("Incremental");
+            /*assert!(!serialized_modules.is_empty(), "must have at least one serialized module");
+            let (buffer, name) = serialized_modules.remove(0);
+            info!("no in-memory regular modules to choose from, parsing {:?}", name);
+            ModuleCodegen {
+                module_llvm: GccContext::parse(cgcx, &name, buffer.data(), dcx)?,
+                name: name.into_string().unwrap(),
+                kind: ModuleKind::Regular,
+            }*/
+        }
+    };
+    {
+        info!("using {:?} as a base module", module.name);
+
+        // We cannot load and merge GCC contexts in memory like cg_llvm is doing.
+        // Instead, we combine the object files into a single object file.
+        for module in in_memory {
+            let path = tmp_path.path().to_path_buf().join(&module.name);
+            let path = path.to_str().expect("path");
+            let context = &module.module_llvm.context;
+            let config = cgcx.config(module.kind);
+            // NOTE: we need to set the optimization level here in order for LTO to do its job.
+            context.set_optimization_level(to_gcc_opt_level(config.opt_level));
+            context.add_command_line_option("-flto=auto");
+            context.add_command_line_option("-flto-partition=one");
+            context.compile_to_file(OutputKind::ObjectFile, path);
+            let buffer = ModuleBuffer::new(PathBuf::from(path));
+            let llmod_id = CString::new(&module.name[..]).unwrap();
+            serialized_modules.push((SerializedModule::Local(buffer), llmod_id));
+        }
+        // Sort the modules to ensure we produce deterministic results.
+        serialized_modules.sort_by(|module1, module2| module1.1.cmp(&module2.1));
+
+        // We add the object files and save in should_combine_object_files that we should combine
+        // them into a single object file when compiling later.
+        for (bc_decoded, name) in serialized_modules {
+            let _timer = cgcx
+                .prof
+                .generic_activity_with_arg_recorder("GCC_fat_lto_link_module", |recorder| {
+                    recorder.record_arg(format!("{:?}", name))
+                });
+            info!("linking {:?}", name);
+            match bc_decoded {
+                SerializedModule::Local(ref module_buffer) => {
+                    module.module_llvm.should_combine_object_files = true;
+                    module
+                        .module_llvm
+                        .context
+                        .add_driver_option(module_buffer.0.to_str().expect("path"));
+                }
+                SerializedModule::FromRlib(_) => unimplemented!("from rlib"),
+                SerializedModule::FromUncompressedFile(_) => {
+                    unimplemented!("from uncompressed file")
+                }
+            }
+        }
+        save_temp_bitcode(cgcx, &module, "lto.input");
+
+        // Internalize everything below threshold to help strip out more modules and such.
+        /*unsafe {
+        let ptr = symbols_below_threshold.as_ptr();
+        llvm::LLVMRustRunRestrictionPass(
+            llmod,
+            ptr as *const *const libc::c_char,
+            symbols_below_threshold.len() as libc::size_t,
+        );*/
+
+        save_temp_bitcode(cgcx, &module, "lto.after-restriction");
+        //}
+    }
+
+    // NOTE: save the temporary directory used by LTO so that it gets deleted after linking instead
+    // of now.
+    module.module_llvm.temp_dir = Some(tmp_path);
+
+    Ok(LtoModuleCodegen::Fat(module))
+}
+
+pub struct ModuleBuffer(PathBuf);
+
+impl ModuleBuffer {
+    pub fn new(path: PathBuf) -> ModuleBuffer {
+        ModuleBuffer(path)
+    }
+}
+
+impl ModuleBufferMethods for ModuleBuffer {
+    fn data(&self) -> &[u8] {
+        &[]
+    }
+}
+
+/// Performs thin LTO by performing necessary global analysis and returning two
+/// lists, one of the modules that need optimization and another for modules that
+/// can simply be copied over from the incr. comp. cache.
+pub(crate) fn run_thin(
+    cgcx: &CodegenContext<GccCodegenBackend>,
+    modules: Vec<(String, ThinBuffer)>,
+    cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+) -> Result<(Vec<LtoModuleCodegen<GccCodegenBackend>>, Vec<WorkProduct>), FatalError> {
+    let dcx = cgcx.create_dcx();
+    let dcx = dcx.handle();
+    let lto_data = prepare_lto(cgcx, dcx)?;
+    if cgcx.opts.cg.linker_plugin_lto.enabled() {
+        unreachable!(
+            "We should never reach this case if the LTO step \
+                      is deferred to the linker"
+        );
+    }
+    thin_lto(
+        cgcx,
+        dcx,
+        modules,
+        lto_data.upstream_modules,
+        lto_data.tmp_path,
+        cached_modules,
+        //&lto_data.symbols_below_threshold,
+    )
+}
+
+pub(crate) fn prepare_thin(
+    module: ModuleCodegen<GccContext>,
+    _emit_summary: bool,
+) -> (String, ThinBuffer) {
+    let name = module.name;
+    //let buffer = ThinBuffer::new(module.module_llvm.context, true, emit_summary);
+    let buffer = ThinBuffer::new(&module.module_llvm.context);
+    (name, buffer)
+}
+
+/// Prepare "thin" LTO to get run on these modules.
+///
+/// The general structure of ThinLTO is quite different from the structure of
+/// "fat" LTO above. With "fat" LTO all LLVM modules in question are merged into
+/// one giant LLVM module, and then we run more optimization passes over this
+/// big module after internalizing most symbols. Thin LTO, on the other hand,
+/// avoid this large bottleneck through more targeted optimization.
+///
+/// At a high level Thin LTO looks like:
+///
+///    1. Prepare a "summary" of each LLVM module in question which describes
+///       the values inside, cost of the values, etc.
+///    2. Merge the summaries of all modules in question into one "index"
+///    3. Perform some global analysis on this index
+///    4. For each module, use the index and analysis calculated previously to
+///       perform local transformations on the module, for example inlining
+///       small functions from other modules.
+///    5. Run thin-specific optimization passes over each module, and then code
+///       generate everything at the end.
+///
+/// The summary for each module is intended to be quite cheap, and the global
+/// index is relatively quite cheap to create as well. As a result, the goal of
+/// ThinLTO is to reduce the bottleneck on LTO and enable LTO to be used in more
+/// situations. For example one cheap optimization is that we can parallelize
+/// all codegen modules, easily making use of all the cores on a machine.
+///
+/// With all that in mind, the function here is designed at specifically just
+/// calculating the *index* for ThinLTO. This index will then be shared amongst
+/// all of the `LtoModuleCodegen` units returned below and destroyed once
+/// they all go out of scope.
+fn thin_lto(
+    cgcx: &CodegenContext<GccCodegenBackend>,
+    _dcx: DiagCtxtHandle<'_>,
+    modules: Vec<(String, ThinBuffer)>,
+    serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
+    tmp_path: TempDir,
+    cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+    //_symbols_below_threshold: &[String],
+) -> Result<(Vec<LtoModuleCodegen<GccCodegenBackend>>, Vec<WorkProduct>), FatalError> {
+    let _timer = cgcx.prof.generic_activity("LLVM_thin_lto_global_analysis");
+    info!("going for that thin, thin LTO");
+
+    /*let green_modules: FxHashMap<_, _> =
+    cached_modules.iter().map(|(_, wp)| (wp.cgu_name.clone(), wp.clone())).collect();*/
+
+    let full_scope_len = modules.len() + serialized_modules.len() + cached_modules.len();
+    let mut thin_buffers = Vec::with_capacity(modules.len());
+    let mut module_names = Vec::with_capacity(full_scope_len);
+    //let mut thin_modules = Vec::with_capacity(full_scope_len);
+
+    for (i, (name, buffer)) in modules.into_iter().enumerate() {
+        info!("local module: {} - {}", i, name);
+        let cname = CString::new(name.as_bytes()).unwrap();
+        /*thin_modules.push(llvm::ThinLTOModule {
+            identifier: cname.as_ptr(),
+            data: buffer.data().as_ptr(),
+            len: buffer.data().len(),
+        });*/
+        thin_buffers.push(buffer);
+        module_names.push(cname);
+    }
+
+    // FIXME: All upstream crates are deserialized internally in the
+    //        function below to extract their summary and modules. Note that
+    //        unlike the loop above we *must* decode and/or read something
+    //        here as these are all just serialized files on disk. An
+    //        improvement, however, to make here would be to store the
+    //        module summary separately from the actual module itself. Right
+    //        now this is store in one large bitcode file, and the entire
+    //        file is deflate-compressed. We could try to bypass some of the
+    //        decompression by storing the index uncompressed and only
+    //        lazily decompressing the bytecode if necessary.
+    //
+    //        Note that truly taking advantage of this optimization will
+    //        likely be further down the road. We'd have to implement
+    //        incremental ThinLTO first where we could actually avoid
+    //        looking at upstream modules entirely sometimes (the contents,
+    //        we must always unconditionally look at the index).
+    let mut serialized = Vec::with_capacity(serialized_modules.len() + cached_modules.len());
+
+    let cached_modules =
+        cached_modules.into_iter().map(|(sm, wp)| (sm, CString::new(wp.cgu_name).unwrap()));
+
+    for (module, name) in serialized_modules.into_iter().chain(cached_modules) {
+        info!("upstream or cached module {:?}", name);
+        /*thin_modules.push(llvm::ThinLTOModule {
+            identifier: name.as_ptr(),
+            data: module.data().as_ptr(),
+            len: module.data().len(),
+        });*/
+
+        match module {
+            SerializedModule::Local(_) => {
+                //let path = module_buffer.0.to_str().expect("path");
+                //let my_path = PathBuf::from(path);
+                //let exists = my_path.exists();
+                /*module.module_llvm.should_combine_object_files = true;
+                module
+                .module_llvm
+                .context
+                .add_driver_option(module_buffer.0.to_str().expect("path"));*/
+            }
+            SerializedModule::FromRlib(_) => unimplemented!("from rlib"),
+            SerializedModule::FromUncompressedFile(_) => {
+                unimplemented!("from uncompressed file")
+            }
+        }
+
+        serialized.push(module);
+        module_names.push(name);
+    }
+
+    // Sanity check
+    //assert_eq!(thin_modules.len(), module_names.len());
+
+    // Delegate to the C++ bindings to create some data here. Once this is a
+    // tried-and-true interface we may wish to try to upstream some of this
+    // to LLVM itself, right now we reimplement a lot of what they do
+    // upstream...
+    /*let data = llvm::LLVMRustCreateThinLTOData(
+        thin_modules.as_ptr(),
+        thin_modules.len() as u32,
+        symbols_below_threshold.as_ptr(),
+        symbols_below_threshold.len() as u32,
+    )
+    .ok_or_else(|| write::llvm_err(dcx, LlvmError::PrepareThinLtoContext))?;
+    */
+
+    let data = ThinData; //(Arc::new(tmp_path))/*(data)*/;
+
+    info!("thin LTO data created");
+
+    /*let (key_map_path, prev_key_map, curr_key_map) =
+        if let Some(ref incr_comp_session_dir) = cgcx.incr_comp_session_dir {
+            let path = incr_comp_session_dir.join(THIN_LTO_KEYS_INCR_COMP_FILE_NAME);
+            // If the previous file was deleted, or we get an IO error
+            // reading the file, then we'll just use `None` as the
+            // prev_key_map, which will force the code to be recompiled.
+            let prev =
+                if path.exists() { ThinLTOKeysMap::load_from_file(&path).ok() } else { None };
+            let curr = ThinLTOKeysMap::from_thin_lto_modules(&data, &thin_modules, &module_names);
+            (Some(path), prev, curr)
+        }
+        else {
+            // If we don't compile incrementally, we don't need to load the
+            // import data from LLVM.
+            assert!(green_modules.is_empty());
+            let curr = ThinLTOKeysMap::default();
+            (None, None, curr)
+        };
+    info!("thin LTO cache key map loaded");
+    info!("prev_key_map: {:#?}", prev_key_map);
+    info!("curr_key_map: {:#?}", curr_key_map);*/
+
+    // Throw our data in an `Arc` as we'll be sharing it across threads. We
+    // also put all memory referenced by the C++ data (buffers, ids, etc)
+    // into the arc as well. After this we'll create a thin module
+    // codegen per module in this data.
+    let shared =
+        Arc::new(ThinShared { data, thin_buffers, serialized_modules: serialized, module_names });
+
+    let copy_jobs = vec![];
+    let mut opt_jobs = vec![];
+
+    info!("checking which modules can be-reused and which have to be re-optimized.");
+    for (module_index, module_name) in shared.module_names.iter().enumerate() {
+        let module_name = module_name_to_str(module_name);
+        /*if let (Some(prev_key_map), true) =
+            (prev_key_map.as_ref(), green_modules.contains_key(module_name))
+        {
+            assert!(cgcx.incr_comp_session_dir.is_some());
+
+            // If a module exists in both the current and the previous session,
+            // and has the same LTO cache key in both sessions, then we can re-use it
+            if prev_key_map.keys.get(module_name) == curr_key_map.keys.get(module_name) {
+                let work_product = green_modules[module_name].clone();
+                copy_jobs.push(work_product);
+                info!(" - {}: re-used", module_name);
+                assert!(cgcx.incr_comp_session_dir.is_some());
+                continue;
+            }
+        }*/
+
+        info!(" - {}: re-compiled", module_name);
+        opt_jobs
+            .push(LtoModuleCodegen::Thin(ThinModule { shared: shared.clone(), idx: module_index }));
+    }
+
+    // Save the current ThinLTO import information for the next compilation
+    // session, overwriting the previous serialized data (if any).
+    /*if let Some(path) = key_map_path {
+        if let Err(err) = curr_key_map.save_to_file(&path) {
+            return Err(write::llvm_err(dcx, LlvmError::WriteThinLtoKey { err }));
+        }
+    }*/
+
+    // NOTE: save the temporary directory used by LTO so that it gets deleted after linking instead
+    // of now.
+    //module.module_llvm.temp_dir = Some(tmp_path);
+    // TODO: save the directory so that it gets deleted later.
+    std::mem::forget(tmp_path);
+
+    Ok((opt_jobs, copy_jobs))
+}
+
+pub unsafe fn optimize_thin_module(
+    thin_module: ThinModule<GccCodegenBackend>,
+    _cgcx: &CodegenContext<GccCodegenBackend>,
+) -> Result<ModuleCodegen<GccContext>, FatalError> {
+    //let dcx = cgcx.create_dcx();
+
+    //let module_name = &thin_module.shared.module_names[thin_module.idx];
+    /*let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, module_name.to_str().unwrap());
+    let tm = (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&dcx, e))?;*/
+
+    // Right now the implementation we've got only works over serialized
+    // modules, so we create a fresh new LLVM context and parse the module
+    // into that context. One day, however, we may do this for upstream
+    // crates but for locally codegened modules we may be able to reuse
+    // that LLVM Context and Module.
+    //let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
+    //let llmod_raw = parse_module(llcx, module_name, thin_module.data(), &dcx)? as *const _;
+    let mut should_combine_object_files = false;
+    let context = match thin_module.shared.thin_buffers.get(thin_module.idx) {
+        Some(thin_buffer) => Arc::clone(&thin_buffer.context),
+        None => {
+            let context = Context::default();
+            let len = thin_module.shared.thin_buffers.len();
+            let module = &thin_module.shared.serialized_modules[thin_module.idx - len];
+            match *module {
+                SerializedModule::Local(ref module_buffer) => {
+                    let path = module_buffer.0.to_str().expect("path");
+                    context.add_driver_option(path);
+                    should_combine_object_files = true;
+                    /*module.module_llvm.should_combine_object_files = true;
+                    module
+                        .module_llvm
+                        .context
+                        .add_driver_option(module_buffer.0.to_str().expect("path"));*/
+                }
+                SerializedModule::FromRlib(_) => unimplemented!("from rlib"),
+                SerializedModule::FromUncompressedFile(_) => {
+                    unimplemented!("from uncompressed file")
+                }
+            }
+            Arc::new(SyncContext::new(context))
+        }
+    };
+    let module = ModuleCodegen::new_regular(
+        thin_module.name().to_string(),
+        GccContext {
+            context,
+            should_combine_object_files,
+            // TODO(antoyo): use the correct relocation model here.
+            relocation_model: RelocModel::Pic,
+            temp_dir: None,
+        },
+    );
+    /*{
+        let target = &*module.module_llvm.tm;
+        let llmod = module.module_llvm.llmod();
+        save_temp_bitcode(cgcx, &module, "thin-lto-input");
+
+        // Up next comes the per-module local analyses that we do for Thin LTO.
+        // Each of these functions is basically copied from the LLVM
+        // implementation and then tailored to suit this implementation. Ideally
+        // each of these would be supported by upstream LLVM but that's perhaps
+        // a patch for another day!
+        //
+        // You can find some more comments about these functions in the LLVM
+        // bindings we've got (currently `PassWrapper.cpp`)
+        {
+            let _timer =
+                cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_rename", thin_module.name());
+            unsafe { llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target) };
+            save_temp_bitcode(cgcx, &module, "thin-lto-after-rename");
+        }
+
+        {
+            let _timer = cgcx
+                .prof
+                .generic_activity_with_arg("LLVM_thin_lto_resolve_weak", thin_module.name());
+            if !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) {
+                return Err(write::llvm_err(&dcx, LlvmError::PrepareThinLtoModule));
+            }
+            save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve");
+        }
+
+        {
+            let _timer = cgcx
+                .prof
+                .generic_activity_with_arg("LLVM_thin_lto_internalize", thin_module.name());
+            if !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) {
+                return Err(write::llvm_err(&dcx, LlvmError::PrepareThinLtoModule));
+            }
+            save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize");
+        }
+
+        {
+            let _timer =
+                cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_import", thin_module.name());
+            if !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target) {
+                return Err(write::llvm_err(&dcx, LlvmError::PrepareThinLtoModule));
+            }
+            save_temp_bitcode(cgcx, &module, "thin-lto-after-import");
+        }
+
+        // Alright now that we've done everything related to the ThinLTO
+        // analysis it's time to run some optimizations! Here we use the same
+        // `run_pass_manager` as the "fat" LTO above except that we tell it to
+        // populate a thin-specific pass manager, which presumably LLVM treats a
+        // little differently.
+        {
+            info!("running thin lto passes over {}", module.name);
+            run_pass_manager(cgcx, &dcx, &mut module, true)?;
+            save_temp_bitcode(cgcx, &module, "thin-lto-after-pm");
+        }
+    }*/
+    Ok(module)
+}
+
+pub struct ThinBuffer {
+    context: Arc<SyncContext>,
+}
+
+impl ThinBuffer {
+    pub(crate) fn new(context: &Arc<SyncContext>) -> Self {
+        Self { context: Arc::clone(context) }
+    }
+}
+
+impl ThinBufferMethods for ThinBuffer {
+    fn data(&self) -> &[u8] {
+        &[]
+    }
+
+    fn thin_link_data(&self) -> &[u8] {
+        unimplemented!();
+    }
+}
+
+pub struct ThinData; //(Arc<TempDir>);
+
+fn module_name_to_str(c_str: &CStr) -> &str {
+    c_str.to_str().unwrap_or_else(|e| {
+        bug!("Encountered non-utf8 GCC module name `{}`: {}", c_str.to_string_lossy(), e)
+    })
+}
diff --git a/compiler/rustc_codegen_gcc/src/back/mod.rs b/compiler/rustc_codegen_gcc/src/back/mod.rs
new file mode 100644
index 00000000000..10187eab0d7
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/back/mod.rs
@@ -0,0 +1,2 @@
+pub mod lto;
+pub mod write;
diff --git a/compiler/rustc_codegen_gcc/src/back/write.rs b/compiler/rustc_codegen_gcc/src/back/write.rs
new file mode 100644
index 00000000000..16c895322e8
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/back/write.rs
@@ -0,0 +1,282 @@
+use std::{env, fs};
+
+use gccjit::{Context, OutputKind};
+use rustc_codegen_ssa::back::link::ensure_removed;
+use rustc_codegen_ssa::back::write::{BitcodeSection, CodegenContext, EmitObj, ModuleConfig};
+use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
+use rustc_errors::DiagCtxtHandle;
+use rustc_fs_util::link_or_copy;
+use rustc_session::config::OutputType;
+use rustc_span::fatal_error::FatalError;
+use rustc_target::spec::SplitDebuginfo;
+
+use crate::base::add_pic_option;
+use crate::errors::CopyBitcode;
+use crate::{GccCodegenBackend, GccContext};
+
+pub(crate) unsafe fn codegen(
+    cgcx: &CodegenContext<GccCodegenBackend>,
+    dcx: DiagCtxtHandle<'_>,
+    module: ModuleCodegen<GccContext>,
+    config: &ModuleConfig,
+) -> Result<CompiledModule, FatalError> {
+    let _timer = cgcx.prof.generic_activity_with_arg("GCC_module_codegen", &*module.name);
+    {
+        let context = &module.module_llvm.context;
+
+        let should_combine_object_files = module.module_llvm.should_combine_object_files;
+
+        // NOTE: Only generate object files with GIMPLE when this environment variable is set for
+        // now because this requires a particular setup (same gcc/lto1/lto-wrapper commit as libgccjit).
+        // TODO(antoyo): remove this environment variable.
+        let fat_lto = env::var("EMBED_LTO_BITCODE").as_deref() == Ok("1");
+
+        let bc_out = cgcx.output_filenames.temp_path_for_cgu(
+            OutputType::Bitcode,
+            &module.name,
+            cgcx.invocation_temp.as_deref(),
+        );
+        let obj_out = cgcx.output_filenames.temp_path_for_cgu(
+            OutputType::Object,
+            &module.name,
+            cgcx.invocation_temp.as_deref(),
+        );
+
+        if config.bitcode_needed() {
+            if fat_lto {
+                let _timer = cgcx
+                    .prof
+                    .generic_activity_with_arg("GCC_module_codegen_make_bitcode", &*module.name);
+
+                // TODO(antoyo)
+                /*if let Some(bitcode_filename) = bc_out.file_name() {
+                    cgcx.prof.artifact_size(
+                        "llvm_bitcode",
+                        bitcode_filename.to_string_lossy(),
+                        data.len() as u64,
+                    );
+                }*/
+
+                if config.emit_bc || config.emit_obj == EmitObj::Bitcode {
+                    let _timer = cgcx.prof.generic_activity_with_arg(
+                        "GCC_module_codegen_emit_bitcode",
+                        &*module.name,
+                    );
+                    context.add_command_line_option("-flto=auto");
+                    context.add_command_line_option("-flto-partition=one");
+                    // TODO(antoyo): remove since we don't want fat objects when it is for Bitcode only.
+                    context.add_command_line_option("-ffat-lto-objects");
+                    context.compile_to_file(
+                        OutputKind::ObjectFile,
+                        bc_out.to_str().expect("path to str"),
+                    );
+                }
+
+                if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) {
+                    let _timer = cgcx.prof.generic_activity_with_arg(
+                        "GCC_module_codegen_embed_bitcode",
+                        &*module.name,
+                    );
+                    // TODO(antoyo): maybe we should call embed_bitcode to have the proper iOS fixes?
+                    //embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, data);
+
+                    context.add_command_line_option("-flto=auto");
+                    context.add_command_line_option("-flto-partition=one");
+                    context.add_command_line_option("-ffat-lto-objects");
+                    // TODO(antoyo): Send -plugin/usr/lib/gcc/x86_64-pc-linux-gnu/11.1.0/liblto_plugin.so to linker (this should be done when specifying the appropriate rustc cli argument).
+                    context.compile_to_file(
+                        OutputKind::ObjectFile,
+                        bc_out.to_str().expect("path to str"),
+                    );
+                }
+            } else {
+                if config.emit_bc || config.emit_obj == EmitObj::Bitcode {
+                    let _timer = cgcx.prof.generic_activity_with_arg(
+                        "GCC_module_codegen_emit_bitcode",
+                        &*module.name,
+                    );
+                    context.compile_to_file(
+                        OutputKind::ObjectFile,
+                        bc_out.to_str().expect("path to str"),
+                    );
+                }
+
+                if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) {
+                    // TODO(antoyo): we might want to emit to emit an error here, saying to set the
+                    // environment variable EMBED_LTO_BITCODE.
+                    let _timer = cgcx.prof.generic_activity_with_arg(
+                        "GCC_module_codegen_embed_bitcode",
+                        &*module.name,
+                    );
+                    // TODO(antoyo): maybe we should call embed_bitcode to have the proper iOS fixes?
+                    //embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, data);
+
+                    // TODO(antoyo): Send -plugin/usr/lib/gcc/x86_64-pc-linux-gnu/11.1.0/liblto_plugin.so to linker (this should be done when specifying the appropriate rustc cli argument).
+                    context.compile_to_file(
+                        OutputKind::ObjectFile,
+                        bc_out.to_str().expect("path to str"),
+                    );
+                }
+            }
+        }
+
+        if config.emit_ir {
+            let out = cgcx.output_filenames.temp_path_for_cgu(
+                OutputType::LlvmAssembly,
+                &module.name,
+                cgcx.invocation_temp.as_deref(),
+            );
+            std::fs::write(out, "").expect("write file");
+        }
+
+        if config.emit_asm {
+            let _timer =
+                cgcx.prof.generic_activity_with_arg("GCC_module_codegen_emit_asm", &*module.name);
+            let path = cgcx.output_filenames.temp_path_for_cgu(
+                OutputType::Assembly,
+                &module.name,
+                cgcx.invocation_temp.as_deref(),
+            );
+            context.compile_to_file(OutputKind::Assembler, path.to_str().expect("path to str"));
+        }
+
+        match config.emit_obj {
+            EmitObj::ObjectCode(_) => {
+                let _timer = cgcx
+                    .prof
+                    .generic_activity_with_arg("GCC_module_codegen_emit_obj", &*module.name);
+                if env::var("CG_GCCJIT_DUMP_MODULE_NAMES").as_deref() == Ok("1") {
+                    println!("Module {}", module.name);
+                }
+                if env::var("CG_GCCJIT_DUMP_ALL_MODULES").as_deref() == Ok("1")
+                    || env::var("CG_GCCJIT_DUMP_MODULE").as_deref() == Ok(&module.name)
+                {
+                    println!("Dumping reproducer {}", module.name);
+                    let _ = fs::create_dir("/tmp/reproducers");
+                    // FIXME(antoyo): segfault in dump_reproducer_to_file() might be caused by
+                    // transmuting an rvalue to an lvalue.
+                    // Segfault is actually in gcc::jit::reproducer::get_identifier_as_lvalue
+                    context.dump_reproducer_to_file(format!("/tmp/reproducers/{}.c", module.name));
+                    println!("Dumped reproducer {}", module.name);
+                }
+                if env::var("CG_GCCJIT_DUMP_TO_FILE").as_deref() == Ok("1") {
+                    let _ = fs::create_dir("/tmp/gccjit_dumps");
+                    let path = &format!("/tmp/gccjit_dumps/{}.c", module.name);
+                    context.set_debug_info(true);
+                    context.dump_to_file(path, true);
+                }
+                if should_combine_object_files {
+                    if fat_lto {
+                        context.add_command_line_option("-flto=auto");
+                        context.add_command_line_option("-flto-partition=one");
+
+                        // NOTE: without -fuse-linker-plugin, we get the following error:
+                        // lto1: internal compiler error: decompressed stream: Destination buffer is too small
+                        // TODO(antoyo): since we do not do LTO when the linker is invoked anymore, perhaps
+                        // the following flag is not necessary anymore.
+                        context.add_driver_option("-fuse-linker-plugin");
+                    }
+
+                    context.add_driver_option("-Wl,-r");
+                    // NOTE: we need -nostdlib, otherwise, we get the following error:
+                    // /usr/bin/ld: cannot find -lgcc_s: No such file or directory
+                    context.add_driver_option("-nostdlib");
+
+                    let path = obj_out.to_str().expect("path to str");
+
+                    if fat_lto {
+                        let lto_path = format!("{}.lto", path);
+                        // FIXME(antoyo): The LTO frontend generates the following warning:
+                        // ../build_sysroot/sysroot_src/library/core/src/num/dec2flt/lemire.rs:150:15: warning: type of ‘_ZN4core3num7dec2flt5table17POWER_OF_FIVE_12817ha449a68fb31379e4E’ does not match original declaration [-Wlto-type-mismatch]
+                        // 150 |     let (lo5, hi5) = POWER_OF_FIVE_128[index];
+                        //     |               ^
+                        // lto1: note: ‘_ZN4core3num7dec2flt5table17POWER_OF_FIVE_12817ha449a68fb31379e4E’ was previously declared here
+                        //
+                        // This option is to mute it to make the UI tests pass with LTO enabled.
+                        context.add_driver_option("-Wno-lto-type-mismatch");
+                        // NOTE: this doesn't actually generate an executable. With the above
+                        // flags, it combines the .o files together in another .o.
+                        context.compile_to_file(OutputKind::Executable, &lto_path);
+
+                        let context = Context::default();
+                        if cgcx.target_arch == "x86" || cgcx.target_arch == "x86_64" {
+                            // NOTE: it seems we need to use add_driver_option instead of
+                            // add_command_line_option here because we use the LTO frontend via gcc.
+                            context.add_driver_option("-masm=intel");
+                        }
+
+                        // NOTE: these two options are needed to invoke LTO to produce an object file.
+                        // We need to initiate a second compilation because the arguments "-x lto"
+                        // needs to be at the very beginning.
+                        context.add_driver_option("-x");
+                        context.add_driver_option("lto");
+                        add_pic_option(&context, module.module_llvm.relocation_model);
+                        context.add_driver_option(lto_path);
+
+                        context.compile_to_file(OutputKind::ObjectFile, path);
+                    } else {
+                        // NOTE: this doesn't actually generate an executable. With the above
+                        // flags, it combines the .o files together in another .o.
+                        context.compile_to_file(OutputKind::Executable, path);
+                    }
+                } else {
+                    context.compile_to_file(
+                        OutputKind::ObjectFile,
+                        obj_out.to_str().expect("path to str"),
+                    );
+                }
+            }
+
+            EmitObj::Bitcode => {
+                debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
+                if let Err(err) = link_or_copy(&bc_out, &obj_out) {
+                    dcx.emit_err(CopyBitcode { err });
+                }
+
+                if !config.emit_bc {
+                    debug!("removing_bitcode {:?}", bc_out);
+                    ensure_removed(dcx, &bc_out);
+                }
+            }
+
+            EmitObj::None => {}
+        }
+    }
+
+    Ok(module.into_compiled_module(
+        config.emit_obj != EmitObj::None,
+        cgcx.target_can_use_split_dwarf && cgcx.split_debuginfo == SplitDebuginfo::Unpacked,
+        config.emit_bc,
+        config.emit_asm,
+        config.emit_ir,
+        &cgcx.output_filenames,
+        cgcx.invocation_temp.as_deref(),
+    ))
+}
+
+pub(crate) fn link(
+    _cgcx: &CodegenContext<GccCodegenBackend>,
+    _dcx: DiagCtxtHandle<'_>,
+    mut _modules: Vec<ModuleCodegen<GccContext>>,
+) -> Result<ModuleCodegen<GccContext>, FatalError> {
+    unimplemented!();
+}
+
+pub(crate) fn save_temp_bitcode(
+    cgcx: &CodegenContext<GccCodegenBackend>,
+    _module: &ModuleCodegen<GccContext>,
+    _name: &str,
+) {
+    if !cgcx.save_temps {
+        return;
+    }
+    unimplemented!();
+    /*unsafe {
+        let ext = format!("{}.bc", name);
+        let cgu = Some(&module.name[..]);
+        let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
+        let cstr = path_to_c_string(&path);
+        let llmod = module.module_llvm.llmod();
+        llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr());
+    }*/
+}
diff --git a/compiler/rustc_codegen_gcc/src/base.rs b/compiler/rustc_codegen_gcc/src/base.rs
new file mode 100644
index 00000000000..9b495174a3f
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/base.rs
@@ -0,0 +1,273 @@
+use std::collections::HashSet;
+use std::env;
+use std::sync::Arc;
+use std::time::Instant;
+
+use gccjit::{CType, Context, FunctionType, GlobalKind};
+use rustc_codegen_ssa::ModuleCodegen;
+use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
+use rustc_codegen_ssa::mono_item::MonoItemExt;
+use rustc_codegen_ssa::traits::DebugInfoCodegenMethods;
+use rustc_middle::dep_graph;
+use rustc_middle::mir::mono::Linkage;
+#[cfg(feature = "master")]
+use rustc_middle::mir::mono::Visibility;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::DebugInfo;
+use rustc_span::Symbol;
+#[cfg(feature = "master")]
+use rustc_target::spec::SymbolVisibility;
+use rustc_target::spec::{PanicStrategy, RelocModel};
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::{GccContext, LockedTargetInfo, SyncContext, gcc_util, new_context};
+
+#[cfg(feature = "master")]
+pub fn visibility_to_gcc(visibility: Visibility) -> gccjit::Visibility {
+    match visibility {
+        Visibility::Default => gccjit::Visibility::Default,
+        Visibility::Hidden => gccjit::Visibility::Hidden,
+        Visibility::Protected => gccjit::Visibility::Protected,
+    }
+}
+
+#[cfg(feature = "master")]
+pub fn symbol_visibility_to_gcc(visibility: SymbolVisibility) -> gccjit::Visibility {
+    match visibility {
+        SymbolVisibility::Hidden => gccjit::Visibility::Hidden,
+        SymbolVisibility::Protected => gccjit::Visibility::Protected,
+        SymbolVisibility::Interposable => gccjit::Visibility::Default,
+    }
+}
+
+pub fn global_linkage_to_gcc(linkage: Linkage) -> GlobalKind {
+    match linkage {
+        Linkage::External => GlobalKind::Imported,
+        Linkage::AvailableExternally => GlobalKind::Imported,
+        Linkage::LinkOnceAny => unimplemented!(),
+        Linkage::LinkOnceODR => unimplemented!(),
+        Linkage::WeakAny => unimplemented!(),
+        Linkage::WeakODR => unimplemented!(),
+        Linkage::Internal => GlobalKind::Internal,
+        Linkage::ExternalWeak => GlobalKind::Imported, // TODO(antoyo): should be weak linkage.
+        Linkage::Common => unimplemented!(),
+    }
+}
+
+pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType {
+    match linkage {
+        Linkage::External => FunctionType::Exported,
+        // TODO(antoyo): set the attribute externally_visible.
+        Linkage::AvailableExternally => FunctionType::Extern,
+        Linkage::LinkOnceAny => unimplemented!(),
+        Linkage::LinkOnceODR => unimplemented!(),
+        Linkage::WeakAny => FunctionType::Exported, // FIXME(antoyo): should be similar to linkonce.
+        Linkage::WeakODR => unimplemented!(),
+        Linkage::Internal => FunctionType::Internal,
+        Linkage::ExternalWeak => unimplemented!(),
+        Linkage::Common => unimplemented!(),
+    }
+}
+
+pub fn compile_codegen_unit(
+    tcx: TyCtxt<'_>,
+    cgu_name: Symbol,
+    target_info: LockedTargetInfo,
+) -> (ModuleCodegen<GccContext>, u64) {
+    let prof_timer = tcx.prof.generic_activity("codegen_module");
+    let start_time = Instant::now();
+
+    let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
+    let (module, _) = tcx.dep_graph.with_task(
+        dep_node,
+        tcx,
+        (cgu_name, target_info),
+        module_codegen,
+        Some(dep_graph::hash_result),
+    );
+    let time_to_codegen = start_time.elapsed();
+    drop(prof_timer);
+
+    // We assume that the cost to run GCC on a CGU is proportional to
+    // the time we needed for codegenning it.
+    let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64;
+
+    fn module_codegen(
+        tcx: TyCtxt<'_>,
+        (cgu_name, target_info): (Symbol, LockedTargetInfo),
+    ) -> ModuleCodegen<GccContext> {
+        let cgu = tcx.codegen_unit(cgu_name);
+        // Instantiate monomorphizations without filling out definitions yet...
+        let context = new_context(tcx);
+
+        if tcx.sess.panic_strategy() == PanicStrategy::Unwind {
+            context.add_command_line_option("-fexceptions");
+            context.add_driver_option("-fexceptions");
+        }
+
+        let disabled_features: HashSet<_> = tcx
+            .sess
+            .opts
+            .cg
+            .target_feature
+            .split(',')
+            .filter(|feature| feature.starts_with('-'))
+            .map(|string| &string[1..])
+            .collect();
+
+        if !disabled_features.contains("avx") && tcx.sess.target.arch == "x86_64" {
+            // NOTE: we always enable AVX because the equivalent of llvm.x86.sse2.cmp.pd in GCC for
+            // SSE2 is multiple builtins, so we use the AVX __builtin_ia32_cmppd instead.
+            // FIXME(antoyo): use the proper builtins for llvm.x86.sse2.cmp.pd and similar.
+            context.add_command_line_option("-mavx");
+        }
+
+        for arg in &tcx.sess.opts.cg.llvm_args {
+            context.add_command_line_option(arg);
+        }
+        // NOTE: This is needed to compile the file src/intrinsic/archs.rs during a bootstrap of rustc.
+        context.add_command_line_option("-fno-var-tracking-assignments");
+        // NOTE: an optimization (https://github.com/rust-lang/rustc_codegen_gcc/issues/53).
+        context.add_command_line_option("-fno-semantic-interposition");
+        // NOTE: Rust relies on LLVM not doing TBAA (https://github.com/rust-lang/unsafe-code-guidelines/issues/292).
+        context.add_command_line_option("-fno-strict-aliasing");
+        // NOTE: Rust relies on LLVM doing wrapping on overflow.
+        context.add_command_line_option("-fwrapv");
+
+        if let Some(model) = tcx.sess.code_model() {
+            use rustc_target::spec::CodeModel;
+
+            context.add_command_line_option(match model {
+                CodeModel::Tiny => "-mcmodel=tiny",
+                CodeModel::Small => "-mcmodel=small",
+                CodeModel::Kernel => "-mcmodel=kernel",
+                CodeModel::Medium => "-mcmodel=medium",
+                CodeModel::Large => "-mcmodel=large",
+            });
+        }
+
+        add_pic_option(&context, tcx.sess.relocation_model());
+
+        let target_cpu = gcc_util::target_cpu(tcx.sess);
+        if target_cpu != "generic" {
+            context.add_command_line_option(format!("-march={}", target_cpu));
+        }
+
+        if tcx
+            .sess
+            .opts
+            .unstable_opts
+            .function_sections
+            .unwrap_or(tcx.sess.target.function_sections)
+        {
+            context.add_command_line_option("-ffunction-sections");
+            context.add_command_line_option("-fdata-sections");
+        }
+
+        if env::var("CG_GCCJIT_DUMP_RTL").as_deref() == Ok("1") {
+            context.add_command_line_option("-fdump-rtl-vregs");
+        }
+        if env::var("CG_GCCJIT_DUMP_RTL_ALL").as_deref() == Ok("1") {
+            context.add_command_line_option("-fdump-rtl-all");
+        }
+        if env::var("CG_GCCJIT_DUMP_TREE_ALL").as_deref() == Ok("1") {
+            context.add_command_line_option("-fdump-tree-all-eh");
+        }
+        if env::var("CG_GCCJIT_DUMP_IPA_ALL").as_deref() == Ok("1") {
+            context.add_command_line_option("-fdump-ipa-all-eh");
+        }
+        if env::var("CG_GCCJIT_DUMP_CODE").as_deref() == Ok("1") {
+            context.set_dump_code_on_compile(true);
+        }
+        if env::var("CG_GCCJIT_DUMP_GIMPLE").as_deref() == Ok("1") {
+            context.set_dump_initial_gimple(true);
+        }
+        if env::var("CG_GCCJIT_DUMP_EVERYTHING").as_deref() == Ok("1") {
+            context.set_dump_everything(true);
+        }
+        if env::var("CG_GCCJIT_KEEP_INTERMEDIATES").as_deref() == Ok("1") {
+            context.set_keep_intermediates(true);
+        }
+        if env::var("CG_GCCJIT_VERBOSE").as_deref() == Ok("1") {
+            context.add_driver_option("-v");
+        }
+
+        // NOTE: The codegen generates unreachable blocks.
+        context.set_allow_unreachable_blocks(true);
+
+        {
+            // TODO: to make it less error-prone (calling get_target_info() will add the flag
+            // -fsyntax-only), forbid the compilation when get_target_info() is called on a
+            // context.
+            let f16_type_supported = target_info.supports_target_dependent_type(CType::Float16);
+            let f32_type_supported = target_info.supports_target_dependent_type(CType::Float32);
+            let f64_type_supported = target_info.supports_target_dependent_type(CType::Float64);
+            let f128_type_supported = target_info.supports_target_dependent_type(CType::Float128);
+            let u128_type_supported = target_info.supports_target_dependent_type(CType::UInt128t);
+            // TODO: improve this to avoid passing that many arguments.
+            let cx = CodegenCx::new(
+                &context,
+                cgu,
+                tcx,
+                u128_type_supported,
+                f16_type_supported,
+                f32_type_supported,
+                f64_type_supported,
+                f128_type_supported,
+            );
+
+            let mono_items = cgu.items_in_deterministic_order(tcx);
+            for &(mono_item, data) in &mono_items {
+                mono_item.predefine::<Builder<'_, '_, '_>>(&cx, data.linkage, data.visibility);
+            }
+
+            // ... and now that we have everything pre-defined, fill out those definitions.
+            for &(mono_item, _) in &mono_items {
+                mono_item.define::<Builder<'_, '_, '_>>(&cx);
+            }
+
+            // If this codegen unit contains the main function, also create the
+            // wrapper here
+            maybe_create_entry_wrapper::<Builder<'_, '_, '_>>(&cx);
+
+            // Finalize debuginfo
+            if cx.sess().opts.debuginfo != DebugInfo::None {
+                cx.debuginfo_finalize();
+            }
+        }
+
+        ModuleCodegen::new_regular(
+            cgu_name.to_string(),
+            GccContext {
+                context: Arc::new(SyncContext::new(context)),
+                relocation_model: tcx.sess.relocation_model(),
+                should_combine_object_files: false,
+                temp_dir: None,
+            },
+        )
+    }
+
+    (module, cost)
+}
+
+pub fn add_pic_option<'gcc>(context: &Context<'gcc>, relocation_model: RelocModel) {
+    match relocation_model {
+        rustc_target::spec::RelocModel::Static => {
+            context.add_command_line_option("-fno-pie");
+            context.add_driver_option("-fno-pie");
+        }
+        rustc_target::spec::RelocModel::Pic => {
+            context.add_command_line_option("-fPIC");
+            // NOTE: we use both add_command_line_option and add_driver_option because the usage in
+            // this module (compile_codegen_unit) requires add_command_line_option while the usage
+            // in the back::write module (codegen) requires add_driver_option.
+            context.add_driver_option("-fPIC");
+        }
+        rustc_target::spec::RelocModel::Pie => {
+            context.add_command_line_option("-fPIE");
+            context.add_driver_option("-fPIE");
+        }
+        model => eprintln!("Unsupported relocation model: {:?}", model),
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
new file mode 100644
index 00000000000..5c70f4a7df9
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/builder.rs
@@ -0,0 +1,2478 @@
+use std::borrow::Cow;
+use std::cell::Cell;
+use std::convert::TryFrom;
+use std::ops::Deref;
+
+use gccjit::{
+    BinaryOp, Block, ComparisonOp, Context, Function, LValue, Location, RValue, ToRValue, Type,
+    UnaryOp,
+};
+use rustc_abi as abi;
+use rustc_abi::{Align, HasDataLayout, Size, TargetDataLayout, WrappingRange};
+use rustc_apfloat::{Float, Round, Status, ieee};
+use rustc_codegen_ssa::MemFlags;
+use rustc_codegen_ssa::common::{
+    AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind,
+};
+use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{
+    BackendTypes, BaseTypeCodegenMethods, BuilderMethods, ConstCodegenMethods,
+    LayoutTypeCodegenMethods, OverflowOp, StaticBuilderMethods,
+};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_middle::bug;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
+use rustc_middle::ty::layout::{
+    FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasTyCtxt, HasTypingEnv, LayoutError, LayoutOfHelpers,
+};
+use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
+use rustc_span::Span;
+use rustc_span::def_id::DefId;
+use rustc_target::callconv::FnAbi;
+use rustc_target::spec::{HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, Target, WasmCAbi, X86Abi};
+
+use crate::common::{SignType, TypeReflection, type_is_pointer};
+use crate::context::CodegenCx;
+use crate::intrinsic::llvm;
+use crate::type_of::LayoutGccExt;
+
+// TODO(antoyo)
+type Funclet = ();
+
+enum ExtremumOperation {
+    Max,
+    Min,
+}
+
+pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
+    pub cx: &'a CodegenCx<'gcc, 'tcx>,
+    pub block: Block<'gcc>,
+    pub location: Option<Location<'gcc>>,
+    value_counter: Cell<u64>,
+}
+
+impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+    fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
+        Builder { cx, block, location: None, value_counter: Cell::new(0) }
+    }
+
+    fn next_value_counter(&self) -> u64 {
+        self.value_counter.set(self.value_counter.get() + 1);
+        self.value_counter.get()
+    }
+
+    fn atomic_extremum(
+        &mut self,
+        operation: ExtremumOperation,
+        dst: RValue<'gcc>,
+        src: RValue<'gcc>,
+        order: AtomicOrdering,
+    ) -> RValue<'gcc> {
+        let size = get_maybe_pointer_size(src);
+
+        let func = self.current_func();
+
+        let load_ordering = match order {
+            // TODO(antoyo): does this make sense?
+            AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
+            _ => order,
+        };
+        let previous_value =
+            self.atomic_load(dst.get_type(), dst, load_ordering, Size::from_bytes(size));
+        let previous_var =
+            func.new_local(self.location, previous_value.get_type(), "previous_value");
+        let return_value = func.new_local(self.location, previous_value.get_type(), "return_value");
+        self.llbb().add_assignment(self.location, previous_var, previous_value);
+        self.llbb().add_assignment(self.location, return_value, previous_var.to_rvalue());
+
+        let while_block = func.new_block("while");
+        let after_block = func.new_block("after_while");
+        self.llbb().end_with_jump(self.location, while_block);
+
+        // NOTE: since jumps were added and compare_exchange doesn't expect this, the current block in the
+        // state need to be updated.
+        self.switch_to_block(while_block);
+
+        let comparison_operator = match operation {
+            ExtremumOperation::Max => ComparisonOp::LessThan,
+            ExtremumOperation::Min => ComparisonOp::GreaterThan,
+        };
+
+        let cond1 = self.context.new_comparison(
+            self.location,
+            comparison_operator,
+            previous_var.to_rvalue(),
+            self.context.new_cast(self.location, src, previous_value.get_type()),
+        );
+        let compare_exchange =
+            self.compare_exchange(dst, previous_var, src, order, load_ordering, false);
+        let cond2 = self.cx.context.new_unary_op(
+            self.location,
+            UnaryOp::LogicalNegate,
+            compare_exchange.get_type(),
+            compare_exchange,
+        );
+        let cond = self.cx.context.new_binary_op(
+            self.location,
+            BinaryOp::LogicalAnd,
+            self.cx.bool_type,
+            cond1,
+            cond2,
+        );
+
+        while_block.end_with_conditional(self.location, cond, while_block, after_block);
+
+        // NOTE: since jumps were added in a place rustc does not expect, the current block in the
+        // state need to be updated.
+        self.switch_to_block(after_block);
+
+        return_value.to_rvalue()
+    }
+
+    fn compare_exchange(
+        &self,
+        dst: RValue<'gcc>,
+        cmp: LValue<'gcc>,
+        src: RValue<'gcc>,
+        order: AtomicOrdering,
+        failure_order: AtomicOrdering,
+        weak: bool,
+    ) -> RValue<'gcc> {
+        let size = get_maybe_pointer_size(src);
+        let compare_exchange =
+            self.context.get_builtin_function(format!("__atomic_compare_exchange_{}", size));
+        let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+        let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
+        let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
+
+        let void_ptr_type = self.context.new_type::<*mut ()>();
+        let volatile_void_ptr_type = void_ptr_type.make_volatile();
+        let dst = self.context.new_cast(self.location, dst, volatile_void_ptr_type);
+        let expected =
+            self.context.new_cast(self.location, cmp.get_address(self.location), void_ptr_type);
+
+        // NOTE: not sure why, but we have the wrong type here.
+        let int_type = compare_exchange.get_param(2).to_rvalue().get_type();
+        let src = self.context.new_bitcast(self.location, src, int_type);
+        self.context.new_call(
+            self.location,
+            compare_exchange,
+            &[dst, expected, src, weak, order, failure_order],
+        )
+    }
+
+    pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) {
+        self.llbb().add_assignment(self.location, lvalue, value);
+    }
+
+    fn check_call<'b>(
+        &mut self,
+        _typ: &str,
+        func: Function<'gcc>,
+        args: &'b [RValue<'gcc>],
+    ) -> Cow<'b, [RValue<'gcc>]> {
+        let mut all_args_match = true;
+        let mut param_types = vec![];
+        let param_count = func.get_param_count();
+        for (index, arg) in args.iter().enumerate().take(param_count) {
+            let param = func.get_param(index as i32);
+            let param = param.to_rvalue().get_type();
+            if param != arg.get_type() {
+                all_args_match = false;
+            }
+            param_types.push(param);
+        }
+
+        if all_args_match {
+            return Cow::Borrowed(args);
+        }
+
+        let casted_args: Vec<_> = param_types
+            .into_iter()
+            .zip(args.iter())
+            .map(|(expected_ty, &actual_val)| {
+                let actual_ty = actual_val.get_type();
+                if expected_ty != actual_ty {
+                    self.bitcast(actual_val, expected_ty)
+                } else {
+                    actual_val
+                }
+            })
+            .collect();
+
+        debug_assert_eq!(casted_args.len(), args.len());
+
+        Cow::Owned(casted_args)
+    }
+
+    fn check_ptr_call<'b>(
+        &mut self,
+        _typ: &str,
+        func_ptr: RValue<'gcc>,
+        args: &'b [RValue<'gcc>],
+    ) -> Cow<'b, [RValue<'gcc>]> {
+        let mut all_args_match = true;
+        let mut param_types = vec![];
+        let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr");
+        for (index, arg) in args.iter().enumerate().take(gcc_func.get_param_count()) {
+            let param = gcc_func.get_param_type(index);
+            if param != arg.get_type() {
+                all_args_match = false;
+            }
+            param_types.push(param);
+        }
+
+        let mut on_stack_param_indices = FxHashSet::default();
+        if let Some(indices) = self.on_stack_params.borrow().get(&gcc_func) {
+            on_stack_param_indices.clone_from(indices);
+        }
+
+        if all_args_match {
+            return Cow::Borrowed(args);
+        }
+
+        let func_name = format!("{:?}", func_ptr);
+
+        let mut casted_args: Vec<_> = param_types
+            .into_iter()
+            .zip(args.iter())
+            .enumerate()
+            .map(|(index, (expected_ty, &actual_val))| {
+                if llvm::ignore_arg_cast(&func_name, index, args.len()) {
+                    return actual_val;
+                }
+
+                let actual_ty = actual_val.get_type();
+                if expected_ty != actual_ty {
+                    if !actual_ty.is_vector()
+                        && !expected_ty.is_vector()
+                        && (actual_ty.is_integral() && expected_ty.is_integral())
+                        || (actual_ty.get_pointee().is_some()
+                            && expected_ty.get_pointee().is_some())
+                    {
+                        self.context.new_cast(self.location, actual_val, expected_ty)
+                    } else if on_stack_param_indices.contains(&index) {
+                        let ty = actual_val.get_type();
+                        // It's possible that the value behind the pointer is actually not exactly
+                        // the expected type, so to go around that, we add a cast before
+                        // dereferencing the value.
+                        if let Some(pointee_val) = ty.get_pointee()
+                            && pointee_val != expected_ty
+                        {
+                            let new_val = self.context.new_cast(
+                                self.location,
+                                actual_val,
+                                expected_ty.make_pointer(),
+                            );
+                            new_val.dereference(self.location).to_rvalue()
+                        } else {
+                            actual_val.dereference(self.location).to_rvalue()
+                        }
+                    } else {
+                        // FIXME: this condition seems wrong: it will pass when both types are not
+                        // a vector.
+                        assert!(
+                            (!expected_ty.is_vector() || actual_ty.is_vector())
+                                && (expected_ty.is_vector() || !actual_ty.is_vector()),
+                            "{:?} (is vector: {}) -> {:?} (is vector: {}), Function: {:?}[{}]",
+                            actual_ty,
+                            actual_ty.is_vector(),
+                            expected_ty,
+                            expected_ty.is_vector(),
+                            func_ptr,
+                            index
+                        );
+                        // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
+                        // TODO: remove bitcast now that vector types can be compared?
+                        // ==> We use bitcast to avoid having to do many manual casts from e.g. __m256i to __v32qi (in
+                        // the case of _mm256_aesenc_epi128).
+                        self.bitcast(actual_val, expected_ty)
+                    }
+                } else {
+                    actual_val
+                }
+            })
+            .collect();
+
+        // NOTE: to take into account variadic functions.
+        for arg in args.iter().skip(casted_args.len()) {
+            casted_args.push(*arg);
+        }
+
+        Cow::Owned(casted_args)
+    }
+
+    fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
+        let stored_ty = self.cx.val_ty(val);
+        let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
+        self.bitcast(ptr, stored_ptr_ty)
+    }
+
+    pub fn current_func(&self) -> Function<'gcc> {
+        self.block.get_function()
+    }
+
+    fn function_call(
+        &mut self,
+        func: RValue<'gcc>,
+        args: &[RValue<'gcc>],
+        _funclet: Option<&Funclet>,
+    ) -> RValue<'gcc> {
+        // TODO(antoyo): remove when the API supports a different type for functions.
+        let func: Function<'gcc> = self.cx.rvalue_as_function(func);
+        let args = self.check_call("call", func, args);
+
+        // gccjit requires to use the result of functions, even when it's not used.
+        // That's why we assign the result to a local or call add_eval().
+        let return_type = func.get_return_type();
+        let void_type = self.context.new_type::<()>();
+        let current_func = self.block.get_function();
+        if return_type != void_type {
+            let result = current_func.new_local(
+                self.location,
+                return_type,
+                format!("returnValue{}", self.next_value_counter()),
+            );
+            self.block.add_assignment(
+                self.location,
+                result,
+                self.cx.context.new_call(self.location, func, &args),
+            );
+            result.to_rvalue()
+        } else {
+            self.block
+                .add_eval(self.location, self.cx.context.new_call(self.location, func, &args));
+            // Return dummy value when not having return value.
+            self.context.new_rvalue_zero(self.isize_type)
+        }
+    }
+
+    fn function_ptr_call(
+        &mut self,
+        typ: Type<'gcc>,
+        mut func_ptr: RValue<'gcc>,
+        args: &[RValue<'gcc>],
+        _funclet: Option<&Funclet>,
+    ) -> RValue<'gcc> {
+        let gcc_func = match func_ptr.get_type().dyncast_function_ptr_type() {
+            Some(func) => func,
+            None => {
+                // NOTE: due to opaque pointers now being used, we need to cast here.
+                let new_func_type = typ.dyncast_function_ptr_type().expect("function ptr");
+                func_ptr = self.context.new_cast(self.location, func_ptr, typ);
+                new_func_type
+            }
+        };
+        let func_name = format!("{:?}", func_ptr);
+        let previous_arg_count = args.len();
+        let orig_args = args;
+        let args = {
+            func_ptr = llvm::adjust_function(self.context, &func_name, func_ptr, args);
+            llvm::adjust_intrinsic_arguments(self, gcc_func, args.into(), &func_name)
+        };
+        let args_adjusted = args.len() != previous_arg_count;
+        let args = self.check_ptr_call("call", func_ptr, &args);
+
+        // gccjit requires to use the result of functions, even when it's not used.
+        // That's why we assign the result to a local or call add_eval().
+        let return_type = gcc_func.get_return_type();
+        let void_type = self.context.new_type::<()>();
+        let current_func = self.block.get_function();
+
+        if return_type != void_type {
+            let return_value = self.cx.context.new_call_through_ptr(self.location, func_ptr, &args);
+            let return_value = llvm::adjust_intrinsic_return_value(
+                self,
+                return_value,
+                &func_name,
+                &args,
+                args_adjusted,
+                orig_args,
+            );
+            let result = current_func.new_local(
+                self.location,
+                return_value.get_type(),
+                format!("ptrReturnValue{}", self.next_value_counter()),
+            );
+            self.block.add_assignment(self.location, result, return_value);
+            result.to_rvalue()
+        } else {
+            #[cfg(not(feature = "master"))]
+            if gcc_func.get_param_count() == 0 {
+                // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
+                self.block.add_eval(
+                    self.location,
+                    self.cx.context.new_call_through_ptr(self.location, func_ptr, &[]),
+                );
+            } else {
+                self.block.add_eval(
+                    self.location,
+                    self.cx.context.new_call_through_ptr(self.location, func_ptr, &args),
+                );
+            }
+            #[cfg(feature = "master")]
+            self.block.add_eval(
+                self.location,
+                self.cx.context.new_call_through_ptr(self.location, func_ptr, &args),
+            );
+            // Return dummy value when not having return value.
+            self.context.new_rvalue_zero(self.isize_type)
+        }
+    }
+
+    pub fn overflow_call(
+        &self,
+        func: Function<'gcc>,
+        args: &[RValue<'gcc>],
+        _funclet: Option<&Funclet>,
+    ) -> RValue<'gcc> {
+        // gccjit requires to use the result of functions, even when it's not used.
+        // That's why we assign the result to a local.
+        let return_type = self.context.new_type::<bool>();
+        let current_func = self.block.get_function();
+        // TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects.
+        let result = current_func.new_local(
+            self.location,
+            return_type,
+            format!("overflowReturnValue{}", self.next_value_counter()),
+        );
+        self.block.add_assignment(
+            self.location,
+            result,
+            self.cx.context.new_call(self.location, func, args),
+        );
+        result.to_rvalue()
+    }
+}
+
+impl<'tcx> HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.cx.tcx()
+    }
+}
+
+impl HasDataLayout for Builder<'_, '_, '_> {
+    fn data_layout(&self) -> &TargetDataLayout {
+        self.cx.data_layout()
+    }
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
+    #[inline]
+    fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+        self.cx.handle_layout_err(err, span, ty)
+    }
+}
+
+impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
+    #[inline]
+    fn handle_fn_abi_err(
+        &self,
+        err: FnAbiError<'tcx>,
+        span: Span,
+        fn_abi_request: FnAbiRequest<'tcx>,
+    ) -> ! {
+        self.cx.handle_fn_abi_err(err, span, fn_abi_request)
+    }
+}
+
+impl<'a, 'gcc, 'tcx> Deref for Builder<'a, 'gcc, 'tcx> {
+    type Target = CodegenCx<'gcc, 'tcx>;
+
+    fn deref<'b>(&'b self) -> &'a Self::Target {
+        self.cx
+    }
+}
+
+impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> {
+    type Value = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Value;
+    type Metadata = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Metadata;
+    type Function = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Function;
+    type BasicBlock = <CodegenCx<'gcc, 'tcx> as BackendTypes>::BasicBlock;
+    type Type = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Type;
+    type Funclet = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Funclet;
+
+    type DIScope = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIScope;
+    type DILocation = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DILocation;
+    type DIVariable = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIVariable;
+}
+
+fn set_rvalue_location<'a, 'gcc, 'tcx>(
+    bx: &mut Builder<'a, 'gcc, 'tcx>,
+    rvalue: RValue<'gcc>,
+) -> RValue<'gcc> {
+    if bx.location.is_some() {
+        #[cfg(feature = "master")]
+        rvalue.set_location(bx.location.unwrap());
+    }
+    rvalue
+}
+
+impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
+    type CodegenCx = CodegenCx<'gcc, 'tcx>;
+
+    fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Builder<'a, 'gcc, 'tcx> {
+        Builder::with_cx(cx, block)
+    }
+
+    fn llbb(&self) -> Block<'gcc> {
+        self.block
+    }
+
+    fn append_block(cx: &'a CodegenCx<'gcc, 'tcx>, func: RValue<'gcc>, name: &str) -> Block<'gcc> {
+        let func = cx.rvalue_as_function(func);
+        func.new_block(name)
+    }
+
+    fn append_sibling_block(&mut self, name: &str) -> Block<'gcc> {
+        let func = self.current_func();
+        func.new_block(name)
+    }
+
+    fn switch_to_block(&mut self, block: Self::BasicBlock) {
+        self.block = block;
+    }
+
+    fn ret_void(&mut self) {
+        self.llbb().end_with_void_return(self.location)
+    }
+
+    fn ret(&mut self, mut value: RValue<'gcc>) {
+        if self.structs_as_pointer.borrow().contains(&value) {
+            // NOTE: hack to workaround a limitation of the rustc API: see comment on
+            // CodegenCx.structs_as_pointer
+            value = value.dereference(self.location).to_rvalue();
+        }
+        let expected_return_type = self.current_func().get_return_type();
+        if !expected_return_type.is_compatible_with(value.get_type()) {
+            // NOTE: due to opaque pointers now being used, we need to cast here.
+            value = self.context.new_cast(self.location, value, expected_return_type);
+        }
+        self.llbb().end_with_return(self.location, value);
+    }
+
+    fn br(&mut self, dest: Block<'gcc>) {
+        self.llbb().end_with_jump(self.location, dest)
+    }
+
+    fn cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>) {
+        self.llbb().end_with_conditional(self.location, cond, then_block, else_block)
+    }
+
+    fn switch(
+        &mut self,
+        value: RValue<'gcc>,
+        default_block: Block<'gcc>,
+        cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>,
+    ) {
+        let mut gcc_cases = vec![];
+        let typ = self.val_ty(value);
+        for (on_val, dest) in cases {
+            let on_val = self.const_uint_big(typ, on_val);
+            gcc_cases.push(self.context.new_case(on_val, on_val, dest));
+        }
+        self.block.end_with_switch(self.location, value, default_block, &gcc_cases);
+    }
+
+    #[cfg(feature = "master")]
+    fn invoke(
+        &mut self,
+        typ: Type<'gcc>,
+        fn_attrs: Option<&CodegenFnAttrs>,
+        _fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
+        func: RValue<'gcc>,
+        args: &[RValue<'gcc>],
+        then: Block<'gcc>,
+        catch: Block<'gcc>,
+        _funclet: Option<&Funclet>,
+        instance: Option<Instance<'tcx>>,
+    ) -> RValue<'gcc> {
+        let try_block = self.current_func().new_block("try");
+
+        let current_block = self.block;
+        self.block = try_block;
+        let call = self.call(typ, fn_attrs, None, func, args, None, instance); // TODO(antoyo): use funclet here?
+        self.block = current_block;
+
+        let return_value =
+            self.current_func().new_local(self.location, call.get_type(), "invokeResult");
+
+        try_block.add_assignment(self.location, return_value, call);
+
+        try_block.end_with_jump(self.location, then);
+
+        if self.cleanup_blocks.borrow().contains(&catch) {
+            self.block.add_try_finally(self.location, try_block, catch);
+        } else {
+            self.block.add_try_catch(self.location, try_block, catch);
+        }
+
+        self.block.end_with_jump(self.location, then);
+
+        return_value.to_rvalue()
+    }
+
+    #[cfg(not(feature = "master"))]
+    fn invoke(
+        &mut self,
+        typ: Type<'gcc>,
+        fn_attrs: Option<&CodegenFnAttrs>,
+        fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
+        func: RValue<'gcc>,
+        args: &[RValue<'gcc>],
+        then: Block<'gcc>,
+        catch: Block<'gcc>,
+        _funclet: Option<&Funclet>,
+        instance: Option<Instance<'tcx>>,
+    ) -> RValue<'gcc> {
+        let call_site = self.call(typ, fn_attrs, None, func, args, None, instance);
+        let condition = self.context.new_rvalue_from_int(self.bool_type, 1);
+        self.llbb().end_with_conditional(self.location, condition, then, catch);
+        if let Some(_fn_abi) = fn_abi {
+            // TODO(bjorn3): Apply function attributes
+        }
+        call_site
+    }
+
+    fn unreachable(&mut self) {
+        let func = self.context.get_builtin_function("__builtin_unreachable");
+        self.block.add_eval(self.location, self.context.new_call(self.location, func, &[]));
+        let return_type = self.block.get_function().get_return_type();
+        let void_type = self.context.new_type::<()>();
+        if return_type == void_type {
+            self.block.end_with_void_return(self.location)
+        } else {
+            let return_value =
+                self.current_func().new_local(self.location, return_type, "unreachableReturn");
+            self.block.end_with_return(self.location, return_value)
+        }
+    }
+
+    fn add(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        self.gcc_add(a, b)
+    }
+
+    fn fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        a + b
+    }
+
+    // TODO(antoyo): should we also override the `unchecked_` versions?
+    fn sub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        self.gcc_sub(a, b)
+    }
+
+    fn fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        a - b
+    }
+
+    fn mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        self.gcc_mul(a, b)
+    }
+
+    fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        self.cx.context.new_binary_op(self.location, BinaryOp::Mult, a.get_type(), a, b)
+    }
+
+    fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        self.gcc_udiv(a, b)
+    }
+
+    fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): poison if not exact.
+        let a_type = a.get_type().to_unsigned(self);
+        let a = self.gcc_int_cast(a, a_type);
+        let b_type = b.get_type().to_unsigned(self);
+        let b = self.gcc_int_cast(b, b_type);
+        a / b
+    }
+
+    fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        self.gcc_sdiv(a, b)
+    }
+
+    fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): poison if not exact.
+        // FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
+        // should be the same.
+        let typ = a.get_type().to_signed(self);
+        let b = self.context.new_cast(self.location, b, typ);
+        a / b
+    }
+
+    fn fdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        a / b
+    }
+
+    fn urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        self.gcc_urem(a, b)
+    }
+
+    fn srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        self.gcc_srem(a, b)
+    }
+
+    fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): add check in libgccjit since using the binary operator % causes the following error:
+        // during RTL pass: expand
+        // libgccjit.so: error: in expmed_mode_index, at expmed.h:240
+        // 0x7f0101d58dc6 expmed_mode_index
+        //     ../../../gcc/gcc/expmed.h:240
+        // 0x7f0101d58e35 expmed_op_cost_ptr
+        //     ../../../gcc/gcc/expmed.h:262
+        // 0x7f0101d594a1 sdiv_cost_ptr
+        //     ../../../gcc/gcc/expmed.h:531
+        // 0x7f0101d594f3 sdiv_cost
+        //     ../../../gcc/gcc/expmed.h:549
+        // 0x7f0101d6af7e expand_divmod(int, tree_code, machine_mode, rtx_def*, rtx_def*, rtx_def*, int, optab_methods)
+        //     ../../../gcc/gcc/expmed.cc:4356
+        // 0x7f0101d94f9e expand_expr_divmod
+        //     ../../../gcc/gcc/expr.cc:8929
+        // 0x7f0101d97a26 expand_expr_real_2(separate_ops*, rtx_def*, machine_mode, expand_modifier)
+        //     ../../../gcc/gcc/expr.cc:9566
+        // 0x7f0101bef6ef expand_gimple_stmt_1
+        //     ../../../gcc/gcc/cfgexpand.cc:3967
+        // 0x7f0101bef910 expand_gimple_stmt
+        //     ../../../gcc/gcc/cfgexpand.cc:4028
+        // 0x7f0101bf6ee7 expand_gimple_basic_block
+        //     ../../../gcc/gcc/cfgexpand.cc:6069
+        // 0x7f0101bf9194 execute
+        //     ../../../gcc/gcc/cfgexpand.cc:6795
+        let a_type = a.get_type();
+        let a_type_unqualified = a_type.unqualified();
+        if a_type.is_compatible_with(self.cx.float_type) {
+            let fmodf = self.context.get_builtin_function("fmodf");
+            // FIXME(antoyo): this seems to produce the wrong result.
+            return self.context.new_call(self.location, fmodf, &[a, b]);
+        }
+
+        #[cfg(feature = "master")]
+        match self.cx.type_kind(a_type) {
+            TypeKind::Half | TypeKind::Float => {
+                let fmodf = self.context.get_builtin_function("fmodf");
+                return self.context.new_call(self.location, fmodf, &[a, b]);
+            }
+            TypeKind::Double => {
+                let fmod = self.context.get_builtin_function("fmod");
+                return self.context.new_call(self.location, fmod, &[a, b]);
+            }
+            TypeKind::FP128 => {
+                let fmodl = self.context.get_builtin_function("fmodl");
+                return self.context.new_call(self.location, fmodl, &[a, b]);
+            }
+            _ => (),
+        }
+
+        if let Some(vector_type) = a_type_unqualified.dyncast_vector() {
+            assert_eq!(a_type_unqualified, b.get_type().unqualified());
+
+            let num_units = vector_type.get_num_units();
+            let new_elements: Vec<_> = (0..num_units)
+                .map(|i| {
+                    let index = self.context.new_rvalue_from_long(self.cx.type_u32(), i as _);
+                    let x = self.extract_element(a, index).to_rvalue();
+                    let y = self.extract_element(b, index).to_rvalue();
+                    self.frem(x, y)
+                })
+                .collect();
+
+            return self.context.new_rvalue_from_vector(self.location, a_type, &new_elements);
+        }
+        assert_eq!(a_type_unqualified, self.cx.double_type);
+
+        let fmod = self.context.get_builtin_function("fmod");
+        self.context.new_call(self.location, fmod, &[a, b])
+    }
+
+    fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        self.gcc_shl(a, b)
+    }
+
+    fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        self.gcc_lshr(a, b)
+    }
+
+    fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): check whether behavior is an arithmetic shift for >> .
+        // It seems to be if the value is signed.
+        self.gcc_lshr(a, b)
+    }
+
+    fn and(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        self.gcc_and(a, b)
+    }
+
+    fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        self.cx.gcc_or(a, b, self.location)
+    }
+
+    fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        set_rvalue_location(self, self.gcc_xor(a, b))
+    }
+
+    fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
+        set_rvalue_location(self, self.gcc_neg(a))
+    }
+
+    fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
+        set_rvalue_location(
+            self,
+            self.cx.context.new_unary_op(self.location, UnaryOp::Minus, a.get_type(), a),
+        )
+    }
+
+    fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
+        set_rvalue_location(self, self.gcc_not(a))
+    }
+
+    fn fadd_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
+        // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
+        set_rvalue_location(self, lhs + rhs)
+    }
+
+    fn fsub_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
+        // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
+        set_rvalue_location(self, lhs - rhs)
+    }
+
+    fn fmul_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
+        // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
+        set_rvalue_location(self, lhs * rhs)
+    }
+
+    fn fdiv_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
+        // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
+        set_rvalue_location(self, lhs / rhs)
+    }
+
+    fn frem_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
+        // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
+        let result = self.frem(lhs, rhs);
+        set_rvalue_location(self, result);
+        result
+    }
+
+    fn fadd_algebraic(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
+        // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
+        lhs + rhs
+    }
+
+    fn fsub_algebraic(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
+        // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
+        lhs - rhs
+    }
+
+    fn fmul_algebraic(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
+        // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
+        lhs * rhs
+    }
+
+    fn fdiv_algebraic(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
+        // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
+        lhs / rhs
+    }
+
+    fn frem_algebraic(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
+        // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
+        self.frem(lhs, rhs)
+    }
+
+    fn checked_binop(
+        &mut self,
+        oop: OverflowOp,
+        typ: Ty<'_>,
+        lhs: Self::Value,
+        rhs: Self::Value,
+    ) -> (Self::Value, Self::Value) {
+        self.gcc_checked_binop(oop, typ, lhs, rhs)
+    }
+
+    fn alloca(&mut self, size: Size, align: Align) -> RValue<'gcc> {
+        let ty = self.cx.type_array(self.cx.type_i8(), size.bytes()).get_aligned(align.bytes());
+        // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
+        self.current_func()
+            .new_local(self.location, ty, format!("stack_var_{}", self.next_value_counter()))
+            .get_address(self.location)
+    }
+
+    fn dynamic_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    fn load(&mut self, pointee_ty: Type<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
+        let block = self.llbb();
+        let function = block.get_function();
+        // NOTE: instead of returning the dereference here, we have to assign it to a variable in
+        // the current basic block. Otherwise, it could be used in another basic block, causing a
+        // dereference after a drop, for instance.
+        // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
+        // Ideally, we shouldn't need to do this check.
+        let aligned_type = if pointee_ty == self.cx.u128_type || pointee_ty == self.cx.i128_type {
+            pointee_ty
+        } else {
+            pointee_ty.get_aligned(align.bytes())
+        };
+        let ptr = self.context.new_cast(self.location, ptr, aligned_type.make_pointer());
+        let deref = ptr.dereference(self.location).to_rvalue();
+        let loaded_value = function.new_local(
+            self.location,
+            aligned_type,
+            format!("loadedValue{}", self.next_value_counter()),
+        );
+        block.add_assignment(self.location, loaded_value, deref);
+        loaded_value.to_rvalue()
+    }
+
+    fn volatile_load(&mut self, ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
+        let ptr = self.context.new_cast(self.location, ptr, ty.make_volatile().make_pointer());
+        ptr.dereference(self.location).to_rvalue()
+    }
+
+    fn atomic_load(
+        &mut self,
+        _ty: Type<'gcc>,
+        ptr: RValue<'gcc>,
+        order: AtomicOrdering,
+        size: Size,
+    ) -> RValue<'gcc> {
+        // TODO(antoyo): use ty.
+        // TODO(antoyo): handle alignment.
+        let atomic_load =
+            self.context.get_builtin_function(format!("__atomic_load_{}", size.bytes()));
+        let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+
+        let volatile_const_void_ptr_type =
+            self.context.new_type::<()>().make_const().make_volatile().make_pointer();
+        let ptr = self.context.new_cast(self.location, ptr, volatile_const_void_ptr_type);
+        self.context.new_call(self.location, atomic_load, &[ptr, ordering])
+    }
+
+    fn load_operand(
+        &mut self,
+        place: PlaceRef<'tcx, RValue<'gcc>>,
+    ) -> OperandRef<'tcx, RValue<'gcc>> {
+        assert_eq!(place.val.llextra.is_some(), place.layout.is_unsized());
+
+        if place.layout.is_zst() {
+            return OperandRef::zero_sized(place.layout);
+        }
+
+        fn scalar_load_metadata<'a, 'gcc, 'tcx>(
+            bx: &mut Builder<'a, 'gcc, 'tcx>,
+            load: RValue<'gcc>,
+            scalar: &abi::Scalar,
+        ) {
+            let vr = scalar.valid_range(bx);
+            match scalar.primitive() {
+                abi::Primitive::Int(..) => {
+                    if !scalar.is_always_valid(bx) {
+                        bx.range_metadata(load, vr);
+                    }
+                }
+                abi::Primitive::Pointer(_) if vr.start < vr.end && !vr.contains(0) => {
+                    bx.nonnull_metadata(load);
+                }
+                _ => {}
+            }
+        }
+
+        let val = if place.val.llextra.is_some() {
+            // FIXME: Merge with the `else` below?
+            OperandValue::Ref(place.val)
+        } else if place.layout.is_gcc_immediate() {
+            let load = self.load(place.layout.gcc_type(self), place.val.llval, place.val.align);
+            OperandValue::Immediate(
+                if let abi::BackendRepr::Scalar(ref scalar) = place.layout.backend_repr {
+                    scalar_load_metadata(self, load, scalar);
+                    self.to_immediate_scalar(load, *scalar)
+                } else {
+                    load
+                },
+            )
+        } else if let abi::BackendRepr::ScalarPair(ref a, ref b) = place.layout.backend_repr {
+            let b_offset = a.size(self).align_to(b.align(self).abi);
+
+            let mut load = |i, scalar: &abi::Scalar, align| {
+                let llptr = if i == 0 {
+                    place.val.llval
+                } else {
+                    self.inbounds_ptradd(place.val.llval, self.const_usize(b_offset.bytes()))
+                };
+                let llty = place.layout.scalar_pair_element_gcc_type(self, i);
+                let load = self.load(llty, llptr, align);
+                scalar_load_metadata(self, load, scalar);
+                if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load }
+            };
+
+            OperandValue::Pair(
+                load(0, a, place.val.align),
+                load(1, b, place.val.align.restrict_for_offset(b_offset)),
+            )
+        } else {
+            OperandValue::Ref(place.val)
+        };
+
+        OperandRef { val, layout: place.layout }
+    }
+
+    fn write_operand_repeatedly(
+        &mut self,
+        cg_elem: OperandRef<'tcx, RValue<'gcc>>,
+        count: u64,
+        dest: PlaceRef<'tcx, RValue<'gcc>>,
+    ) {
+        let zero = self.const_usize(0);
+        let count = self.const_usize(count);
+        let start = dest.project_index(self, zero).val.llval;
+        let end = dest.project_index(self, count).val.llval;
+
+        let header_bb = self.append_sibling_block("repeat_loop_header");
+        let body_bb = self.append_sibling_block("repeat_loop_body");
+        let next_bb = self.append_sibling_block("repeat_loop_next");
+
+        let ptr_type = start.get_type();
+        let current = self.llbb().get_function().new_local(self.location, ptr_type, "loop_var");
+        let current_val = current.to_rvalue();
+        self.assign(current, start);
+
+        self.br(header_bb);
+
+        self.switch_to_block(header_bb);
+        let keep_going = self.icmp(IntPredicate::IntNE, current_val, end);
+        self.cond_br(keep_going, body_bb, next_bb);
+
+        self.switch_to_block(body_bb);
+        let align = dest.val.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
+        cg_elem.val.store(self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
+
+        let next = self.inbounds_gep(
+            self.backend_type(cg_elem.layout),
+            current.to_rvalue(),
+            &[self.const_usize(1)],
+        );
+        self.llbb().add_assignment(self.location, current, next);
+        self.br(header_bb);
+
+        self.switch_to_block(next_bb);
+    }
+
+    fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) {
+        // TODO(antoyo)
+    }
+
+    fn nonnull_metadata(&mut self, _load: RValue<'gcc>) {
+        // TODO(antoyo)
+    }
+
+    fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
+        self.store_with_flags(val, ptr, align, MemFlags::empty())
+    }
+
+    fn store_with_flags(
+        &mut self,
+        val: RValue<'gcc>,
+        ptr: RValue<'gcc>,
+        align: Align,
+        flags: MemFlags,
+    ) -> RValue<'gcc> {
+        let ptr = self.check_store(val, ptr);
+        let destination = ptr.dereference(self.location);
+        // NOTE: libgccjit does not support specifying the alignment on the assignment, so we cast
+        // to type so it gets the proper alignment.
+        let destination_type = destination.to_rvalue().get_type().unqualified();
+        let align = if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() };
+        let mut modified_destination_type = destination_type.get_aligned(align);
+        if flags.contains(MemFlags::VOLATILE) {
+            modified_destination_type = modified_destination_type.make_volatile();
+        }
+
+        let modified_ptr =
+            self.cx.context.new_cast(self.location, ptr, modified_destination_type.make_pointer());
+        let modified_destination = modified_ptr.dereference(self.location);
+        self.llbb().add_assignment(self.location, modified_destination, val);
+        // TODO(antoyo): handle `MemFlags::NONTEMPORAL`.
+        // NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here?
+        // When adding support for NONTEMPORAL, make sure to not just emit MOVNT on x86; see the
+        // LLVM backend for details.
+        self.cx.context.new_rvalue_zero(self.type_i32())
+    }
+
+    fn atomic_store(
+        &mut self,
+        value: RValue<'gcc>,
+        ptr: RValue<'gcc>,
+        order: AtomicOrdering,
+        size: Size,
+    ) {
+        // TODO(antoyo): handle alignment.
+        let atomic_store =
+            self.context.get_builtin_function(format!("__atomic_store_{}", size.bytes()));
+        let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+        let volatile_const_void_ptr_type =
+            self.context.new_type::<()>().make_volatile().make_pointer();
+        let ptr = self.context.new_cast(self.location, ptr, volatile_const_void_ptr_type);
+
+        // FIXME(antoyo): fix libgccjit to allow comparing an integer type with an aligned integer type because
+        // the following cast is required to avoid this error:
+        // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int  __attribute__((aligned(4))))
+        let int_type = atomic_store.get_param(1).to_rvalue().get_type();
+        let value = self.context.new_bitcast(self.location, value, int_type);
+        self.llbb().add_eval(
+            self.location,
+            self.context.new_call(self.location, atomic_store, &[ptr, value, ordering]),
+        );
+    }
+
+    fn gep(
+        &mut self,
+        typ: Type<'gcc>,
+        ptr: RValue<'gcc>,
+        indices: &[RValue<'gcc>],
+    ) -> RValue<'gcc> {
+        // NOTE: due to opaque pointers now being used, we need to cast here.
+        let ptr = self.context.new_cast(self.location, ptr, typ.make_pointer());
+        let ptr_type = ptr.get_type();
+        let mut pointee_type = ptr.get_type();
+        // NOTE: we cannot use array indexing here like in inbounds_gep because array indexing is
+        // always considered in bounds in GCC (TODO(antoyo): to be verified).
+        // So, we have to cast to a number.
+        let mut result = self.context.new_bitcast(self.location, ptr, self.sizet_type);
+        // FIXME(antoyo): if there were more than 1 index, this code is probably wrong and would
+        // require dereferencing the pointer.
+        for index in indices {
+            pointee_type = pointee_type.get_pointee().expect("pointee type");
+            #[cfg(feature = "master")]
+            let pointee_size = {
+                let size = self.cx.context.new_sizeof(pointee_type);
+                self.context.new_cast(self.location, size, index.get_type())
+            };
+            #[cfg(not(feature = "master"))]
+            let pointee_size =
+                self.context.new_rvalue_from_int(index.get_type(), pointee_type.get_size() as i32);
+            result = result + self.gcc_int_cast(*index * pointee_size, self.sizet_type);
+        }
+        self.context.new_bitcast(self.location, result, ptr_type)
+    }
+
+    fn inbounds_gep(
+        &mut self,
+        typ: Type<'gcc>,
+        ptr: RValue<'gcc>,
+        indices: &[RValue<'gcc>],
+    ) -> RValue<'gcc> {
+        // NOTE: due to opaque pointers now being used, we need to cast here.
+        let ptr = self.context.new_cast(self.location, ptr, typ.make_pointer());
+        // NOTE: array indexing is always considered in bounds in GCC (TODO(antoyo): to be verified).
+        let mut indices = indices.iter();
+        let index = indices.next().expect("first index in inbounds_gep");
+        let mut result = self.context.new_array_access(self.location, ptr, *index);
+        for index in indices {
+            result = self.context.new_array_access(self.location, result, *index);
+        }
+        result.get_address(self.location)
+    }
+
+    /* Casts */
+    fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): check that it indeed truncate the value.
+        self.gcc_int_cast(value, dest_ty)
+    }
+
+    fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): check that it indeed sign extend the value.
+        if dest_ty.dyncast_vector().is_some() {
+            // TODO(antoyo): nothing to do as it is only for LLVM?
+            return value;
+        }
+        self.context.new_cast(self.location, value, dest_ty)
+    }
+
+    fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        set_rvalue_location(self, self.gcc_float_to_uint_cast(value, dest_ty))
+    }
+
+    fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        set_rvalue_location(self, self.gcc_float_to_int_cast(value, dest_ty))
+    }
+
+    fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        set_rvalue_location(self, self.gcc_uint_to_float_cast(value, dest_ty))
+    }
+
+    fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        set_rvalue_location(self, self.gcc_int_to_float_cast(value, dest_ty))
+    }
+
+    fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): make sure it truncates.
+        set_rvalue_location(self, self.context.new_cast(self.location, value, dest_ty))
+    }
+
+    fn fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        set_rvalue_location(self, self.context.new_cast(self.location, value, dest_ty))
+    }
+
+    fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        let usize_value = self.cx.context.new_cast(None, value, self.cx.type_isize());
+        self.intcast(usize_value, dest_ty, false)
+    }
+
+    fn inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        let usize_value = self.intcast(value, self.cx.type_isize(), false);
+        self.cx.context.new_cast(None, usize_value, dest_ty)
+    }
+
+    fn bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        self.cx.const_bitcast(value, dest_ty)
+    }
+
+    fn intcast(
+        &mut self,
+        value: RValue<'gcc>,
+        dest_typ: Type<'gcc>,
+        _is_signed: bool,
+    ) -> RValue<'gcc> {
+        // NOTE: is_signed is for value, not dest_typ.
+        self.gcc_int_cast(value, dest_typ)
+    }
+
+    fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        let val_type = value.get_type();
+        match (type_is_pointer(val_type), type_is_pointer(dest_ty)) {
+            (false, true) => {
+                // NOTE: Projecting a field of a pointer type will attempt a cast from a signed char to
+                // a pointer, which is not supported by gccjit.
+                self.cx.context.new_cast(
+                    self.location,
+                    self.inttoptr(value, val_type.make_pointer()),
+                    dest_ty,
+                )
+            }
+            (false, false) => {
+                // When they are not pointers, we want a transmute (or reinterpret_cast).
+                self.bitcast(value, dest_ty)
+            }
+            (true, true) => self.cx.context.new_cast(self.location, value, dest_ty),
+            (true, false) => unimplemented!(),
+        }
+    }
+
+    /* Comparisons */
+    fn icmp(&mut self, op: IntPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
+        self.gcc_icmp(op, lhs, rhs)
+    }
+
+    fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
+        // LLVM has a concept of "unordered compares", where eg ULT returns true if either the two
+        // arguments are unordered (i.e. either is NaN), or the lhs is less than the rhs. GCC does
+        // not natively have this concept, so in some cases we must manually handle NaNs
+        let must_handle_nan = match op {
+            RealPredicate::RealPredicateFalse => unreachable!(),
+            RealPredicate::RealOEQ => false,
+            RealPredicate::RealOGT => false,
+            RealPredicate::RealOGE => false,
+            RealPredicate::RealOLT => false,
+            RealPredicate::RealOLE => false,
+            RealPredicate::RealONE => false,
+            RealPredicate::RealORD => unreachable!(),
+            RealPredicate::RealUNO => unreachable!(),
+            RealPredicate::RealUEQ => false,
+            RealPredicate::RealUGT => true,
+            RealPredicate::RealUGE => true,
+            RealPredicate::RealULT => true,
+            RealPredicate::RealULE => true,
+            RealPredicate::RealUNE => false,
+            RealPredicate::RealPredicateTrue => unreachable!(),
+        };
+
+        let cmp = self.context.new_comparison(self.location, op.to_gcc_comparison(), lhs, rhs);
+
+        if must_handle_nan {
+            let is_nan = self.context.new_binary_op(
+                self.location,
+                BinaryOp::LogicalOr,
+                self.cx.bool_type,
+                // compare a value to itself to check whether it is NaN
+                self.context.new_comparison(self.location, ComparisonOp::NotEquals, lhs, lhs),
+                self.context.new_comparison(self.location, ComparisonOp::NotEquals, rhs, rhs),
+            );
+
+            self.context.new_binary_op(
+                self.location,
+                BinaryOp::LogicalOr,
+                self.cx.bool_type,
+                is_nan,
+                cmp,
+            )
+        } else {
+            cmp
+        }
+    }
+
+    /* Miscellaneous instructions */
+    fn memcpy(
+        &mut self,
+        dst: RValue<'gcc>,
+        _dst_align: Align,
+        src: RValue<'gcc>,
+        _src_align: Align,
+        size: RValue<'gcc>,
+        flags: MemFlags,
+    ) {
+        assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
+        let size = self.intcast(size, self.type_size_t(), false);
+        let _is_volatile = flags.contains(MemFlags::VOLATILE);
+        let dst = self.pointercast(dst, self.type_i8p());
+        let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
+        let memcpy = self.context.get_builtin_function("memcpy");
+        // TODO(antoyo): handle aligns and is_volatile.
+        self.block.add_eval(
+            self.location,
+            self.context.new_call(self.location, memcpy, &[dst, src, size]),
+        );
+    }
+
+    fn memmove(
+        &mut self,
+        dst: RValue<'gcc>,
+        _dst_align: Align,
+        src: RValue<'gcc>,
+        _src_align: Align,
+        size: RValue<'gcc>,
+        flags: MemFlags,
+    ) {
+        assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported");
+        let size = self.intcast(size, self.type_size_t(), false);
+        let _is_volatile = flags.contains(MemFlags::VOLATILE);
+        let dst = self.pointercast(dst, self.type_i8p());
+        let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
+
+        let memmove = self.context.get_builtin_function("memmove");
+        // TODO(antoyo): handle is_volatile.
+        self.block.add_eval(
+            self.location,
+            self.context.new_call(self.location, memmove, &[dst, src, size]),
+        );
+    }
+
+    fn memset(
+        &mut self,
+        ptr: RValue<'gcc>,
+        fill_byte: RValue<'gcc>,
+        size: RValue<'gcc>,
+        _align: Align,
+        flags: MemFlags,
+    ) {
+        assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memset not supported");
+        let _is_volatile = flags.contains(MemFlags::VOLATILE);
+        let ptr = self.pointercast(ptr, self.type_i8p());
+        let memset = self.context.get_builtin_function("memset");
+        // TODO(antoyo): handle align and is_volatile.
+        let fill_byte = self.context.new_cast(self.location, fill_byte, self.i32_type);
+        let size = self.intcast(size, self.type_size_t(), false);
+        self.block.add_eval(
+            self.location,
+            self.context.new_call(self.location, memset, &[ptr, fill_byte, size]),
+        );
+    }
+
+    fn select(
+        &mut self,
+        cond: RValue<'gcc>,
+        then_val: RValue<'gcc>,
+        mut else_val: RValue<'gcc>,
+    ) -> RValue<'gcc> {
+        let func = self.current_func();
+        let variable = func.new_local(self.location, then_val.get_type(), "selectVar");
+        let then_block = func.new_block("then");
+        let else_block = func.new_block("else");
+        let after_block = func.new_block("after");
+        self.llbb().end_with_conditional(self.location, cond, then_block, else_block);
+
+        then_block.add_assignment(self.location, variable, then_val);
+        then_block.end_with_jump(self.location, after_block);
+
+        if !then_val.get_type().is_compatible_with(else_val.get_type()) {
+            else_val = self.context.new_cast(self.location, else_val, then_val.get_type());
+        }
+        else_block.add_assignment(self.location, variable, else_val);
+        else_block.end_with_jump(self.location, after_block);
+
+        // NOTE: since jumps were added in a place rustc does not expect, the current block in the
+        // state need to be updated.
+        self.switch_to_block(after_block);
+
+        variable.to_rvalue()
+    }
+
+    #[allow(dead_code)]
+    fn va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    #[cfg(feature = "master")]
+    fn extract_element(&mut self, vec: RValue<'gcc>, idx: RValue<'gcc>) -> RValue<'gcc> {
+        self.context.new_vector_access(self.location, vec, idx).to_rvalue()
+    }
+
+    #[cfg(not(feature = "master"))]
+    fn extract_element(&mut self, vec: RValue<'gcc>, idx: RValue<'gcc>) -> RValue<'gcc> {
+        let vector_type = vec
+            .get_type()
+            .unqualified()
+            .dyncast_vector()
+            .expect("Called extract_element on a non-vector type");
+        let element_type = vector_type.get_element_type();
+        let vec_num_units = vector_type.get_num_units();
+        let array_type =
+            self.context.new_array_type(self.location, element_type, vec_num_units as u64);
+        let array = self.context.new_bitcast(self.location, vec, array_type).to_rvalue();
+        self.context.new_array_access(self.location, array, idx).to_rvalue()
+    }
+
+    fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
+        // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
+        assert_eq!(idx as usize as u64, idx);
+        let value_type = aggregate_value.get_type();
+
+        if value_type.dyncast_array().is_some() {
+            let index = self
+                .context
+                .new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+            let element = self.context.new_array_access(self.location, aggregate_value, index);
+            element.get_address(self.location)
+        } else if value_type.dyncast_vector().is_some() {
+            panic!();
+        } else if let Some(pointer_type) = value_type.get_pointee() {
+            if let Some(struct_type) = pointer_type.is_struct() {
+                // NOTE: hack to workaround a limitation of the rustc API: see comment on
+                // CodegenCx.structs_as_pointer
+                aggregate_value
+                    .dereference_field(self.location, struct_type.get_field(idx as i32))
+                    .to_rvalue()
+            } else {
+                panic!("Unexpected type {:?}", value_type);
+            }
+        } else if let Some(struct_type) = value_type.is_struct() {
+            aggregate_value
+                .access_field(self.location, struct_type.get_field(idx as i32))
+                .to_rvalue()
+        } else {
+            panic!("Unexpected type {:?}", value_type);
+        }
+    }
+
+    fn insert_value(
+        &mut self,
+        aggregate_value: RValue<'gcc>,
+        value: RValue<'gcc>,
+        idx: u64,
+    ) -> RValue<'gcc> {
+        // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
+        assert_eq!(idx as usize as u64, idx);
+        let value_type = aggregate_value.get_type();
+
+        let lvalue = if value_type.dyncast_array().is_some() {
+            let index = self
+                .context
+                .new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+            self.context.new_array_access(self.location, aggregate_value, index)
+        } else if value_type.dyncast_vector().is_some() {
+            panic!();
+        } else if let Some(pointer_type) = value_type.get_pointee() {
+            if let Some(struct_type) = pointer_type.is_struct() {
+                // NOTE: hack to workaround a limitation of the rustc API: see comment on
+                // CodegenCx.structs_as_pointer
+                aggregate_value.dereference_field(self.location, struct_type.get_field(idx as i32))
+            } else {
+                panic!("Unexpected type {:?}", value_type);
+            }
+        } else {
+            panic!("Unexpected type {:?}", value_type);
+        };
+
+        let lvalue_type = lvalue.to_rvalue().get_type();
+        let value =
+            // NOTE: sometimes, rustc will create a value with the wrong type.
+            if lvalue_type != value.get_type() {
+                self.context.new_cast(self.location, value, lvalue_type)
+            }
+            else {
+                value
+            };
+
+        self.llbb().add_assignment(self.location, lvalue, value);
+
+        aggregate_value
+    }
+
+    fn set_personality_fn(&mut self, _personality: RValue<'gcc>) {
+        #[cfg(feature = "master")]
+        {
+            let personality = self.rvalue_as_function(_personality);
+            self.current_func().set_personality_function(personality);
+        }
+    }
+
+    #[cfg(feature = "master")]
+    fn cleanup_landing_pad(&mut self, pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) {
+        self.set_personality_fn(pers_fn);
+
+        // NOTE: insert the current block in a variable so that a later call to invoke knows to
+        // generate a try/finally instead of a try/catch for this block.
+        self.cleanup_blocks.borrow_mut().insert(self.block);
+
+        let eh_pointer_builtin =
+            self.cx.context.get_target_builtin_function("__builtin_eh_pointer");
+        let zero = self.cx.context.new_rvalue_zero(self.int_type);
+        let ptr = self.cx.context.new_call(self.location, eh_pointer_builtin, &[zero]);
+
+        let value1_type = self.u8_type.make_pointer();
+        let ptr = self.cx.context.new_cast(self.location, ptr, value1_type);
+        let value1 = ptr;
+        let value2 = zero; // TODO(antoyo): set the proper value here (the type of exception?).
+
+        (value1, value2)
+    }
+
+    #[cfg(not(feature = "master"))]
+    fn cleanup_landing_pad(&mut self, _pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) {
+        let value1 = self
+            .current_func()
+            .new_local(self.location, self.u8_type.make_pointer(), "landing_pad0")
+            .to_rvalue();
+        let value2 =
+            self.current_func().new_local(self.location, self.i32_type, "landing_pad1").to_rvalue();
+        (value1, value2)
+    }
+
+    fn filter_landing_pad(&mut self, pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) {
+        // TODO(antoyo): generate the correct landing pad
+        self.cleanup_landing_pad(pers_fn)
+    }
+
+    #[cfg(feature = "master")]
+    fn resume(&mut self, exn0: RValue<'gcc>, _exn1: RValue<'gcc>) {
+        let exn_type = exn0.get_type();
+        let exn = self.context.new_cast(self.location, exn0, exn_type);
+        let unwind_resume = self.context.get_target_builtin_function("__builtin_unwind_resume");
+        self.llbb()
+            .add_eval(self.location, self.context.new_call(self.location, unwind_resume, &[exn]));
+        self.unreachable();
+    }
+
+    #[cfg(not(feature = "master"))]
+    fn resume(&mut self, _exn0: RValue<'gcc>, _exn1: RValue<'gcc>) {
+        self.unreachable();
+    }
+
+    fn cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet {
+        unimplemented!();
+    }
+
+    fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) {
+        unimplemented!();
+    }
+
+    fn catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet {
+        unimplemented!();
+    }
+
+    fn catch_switch(
+        &mut self,
+        _parent: Option<RValue<'gcc>>,
+        _unwind: Option<Block<'gcc>>,
+        _handlers: &[Block<'gcc>],
+    ) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    // Atomic Operations
+    fn atomic_cmpxchg(
+        &mut self,
+        dst: RValue<'gcc>,
+        cmp: RValue<'gcc>,
+        src: RValue<'gcc>,
+        order: AtomicOrdering,
+        failure_order: AtomicOrdering,
+        weak: bool,
+    ) -> (RValue<'gcc>, RValue<'gcc>) {
+        let expected = self.current_func().new_local(None, cmp.get_type(), "expected");
+        self.llbb().add_assignment(None, expected, cmp);
+        // NOTE: gcc doesn't support a failure memory model that is stronger than the success
+        // memory model.
+        let order = if failure_order as i32 > order as i32 { failure_order } else { order };
+        let success = self.compare_exchange(dst, expected, src, order, failure_order, weak);
+
+        // NOTE: since success contains the call to the intrinsic, it must be added to the basic block before
+        // expected so that we store expected after the call.
+        let success_var = self.current_func().new_local(self.location, self.bool_type, "success");
+        self.llbb().add_assignment(self.location, success_var, success);
+
+        (expected.to_rvalue(), success_var.to_rvalue())
+    }
+
+    fn atomic_rmw(
+        &mut self,
+        op: AtomicRmwBinOp,
+        dst: RValue<'gcc>,
+        src: RValue<'gcc>,
+        order: AtomicOrdering,
+    ) -> RValue<'gcc> {
+        let size = get_maybe_pointer_size(src);
+        let name = match op {
+            AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
+            AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size),
+            AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size),
+            AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size),
+            AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size),
+            AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size),
+            AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size),
+            AtomicRmwBinOp::AtomicMax => {
+                return self.atomic_extremum(ExtremumOperation::Max, dst, src, order);
+            }
+            AtomicRmwBinOp::AtomicMin => {
+                return self.atomic_extremum(ExtremumOperation::Min, dst, src, order);
+            }
+            AtomicRmwBinOp::AtomicUMax => {
+                return self.atomic_extremum(ExtremumOperation::Max, dst, src, order);
+            }
+            AtomicRmwBinOp::AtomicUMin => {
+                return self.atomic_extremum(ExtremumOperation::Min, dst, src, order);
+            }
+        };
+
+        let atomic_function = self.context.get_builtin_function(name);
+        let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+
+        let void_ptr_type = self.context.new_type::<*mut ()>();
+        let volatile_void_ptr_type = void_ptr_type.make_volatile();
+        let dst = self.context.new_cast(self.location, dst, volatile_void_ptr_type);
+        // FIXME(antoyo): not sure why, but we have the wrong type here.
+        let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
+        let src = self.context.new_bitcast(self.location, src, new_src_type);
+        let res = self.context.new_call(self.location, atomic_function, &[dst, src, order]);
+        self.context.new_cast(self.location, res, src.get_type())
+    }
+
+    fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) {
+        let name = match scope {
+            SynchronizationScope::SingleThread => "__atomic_signal_fence",
+            SynchronizationScope::CrossThread => "__atomic_thread_fence",
+        };
+        let thread_fence = self.context.get_builtin_function(name);
+        let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+        self.llbb()
+            .add_eval(self.location, self.context.new_call(self.location, thread_fence, &[order]));
+    }
+
+    fn set_invariant_load(&mut self, load: RValue<'gcc>) {
+        // NOTE: Hack to consider vtable function pointer as non-global-variable function pointer.
+        self.normal_function_addresses.borrow_mut().insert(load);
+        // TODO(antoyo)
+    }
+
+    fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) {
+        // TODO(antoyo)
+    }
+
+    fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) {
+        // TODO(antoyo)
+    }
+
+    fn call(
+        &mut self,
+        typ: Type<'gcc>,
+        _fn_attrs: Option<&CodegenFnAttrs>,
+        fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
+        func: RValue<'gcc>,
+        args: &[RValue<'gcc>],
+        funclet: Option<&Funclet>,
+        _instance: Option<Instance<'tcx>>,
+    ) -> RValue<'gcc> {
+        // FIXME(antoyo): remove when having a proper API.
+        let gcc_func = unsafe { std::mem::transmute::<RValue<'gcc>, Function<'gcc>>(func) };
+        let call = if self.functions.borrow().values().any(|value| *value == gcc_func) {
+            self.function_call(func, args, funclet)
+        } else {
+            // If it's a not function that was defined, it's a function pointer.
+            self.function_ptr_call(typ, func, args, funclet)
+        };
+        if let Some(_fn_abi) = fn_abi {
+            // TODO(bjorn3): Apply function attributes
+        }
+        call
+    }
+
+    fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+        // FIXME(antoyo): this does not zero-extend.
+        self.gcc_int_cast(value, dest_typ)
+    }
+
+    fn cx(&self) -> &CodegenCx<'gcc, 'tcx> {
+        self.cx
+    }
+
+    fn apply_attrs_to_cleanup_callsite(&mut self, _llret: RValue<'gcc>) {
+        // FIXME(bjorn3): implement
+    }
+
+    fn set_span(&mut self, _span: Span) {}
+
+    fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
+        if self.cx().val_ty(val) == self.cx().type_i1() {
+            self.zext(val, self.cx().type_i8())
+        } else {
+            val
+        }
+    }
+
+    fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
+        if scalar.is_bool() {
+            return self.unchecked_utrunc(val, self.cx().type_i1());
+        }
+        val
+    }
+
+    fn fptoui_sat(&mut self, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        self.fptoint_sat(false, val, dest_ty)
+    }
+
+    fn fptosi_sat(&mut self, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        self.fptoint_sat(true, val, dest_ty)
+    }
+}
+
+impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+    fn fptoint_sat(
+        &mut self,
+        signed: bool,
+        val: RValue<'gcc>,
+        dest_ty: Type<'gcc>,
+    ) -> RValue<'gcc> {
+        let src_ty = self.cx.val_ty(val);
+        let (float_ty, int_ty) = if self.cx.type_kind(src_ty) == TypeKind::Vector {
+            assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty));
+            (self.cx.element_type(src_ty), self.cx.element_type(dest_ty))
+        } else {
+            (src_ty, dest_ty)
+        };
+
+        // FIXME(jistone): the following was originally the fallback SSA implementation, before LLVM 13
+        // added native `fptosi.sat` and `fptoui.sat` conversions, but it was used by GCC as well.
+        // Now that LLVM always relies on its own, the code has been moved to GCC, but the comments are
+        // still LLVM-specific. This should be updated, and use better GCC specifics if possible.
+
+        let int_width = self.cx.int_width(int_ty);
+        let float_width = self.cx.float_width(float_ty);
+        // LLVM's fpto[su]i returns undef when the input val is infinite, NaN, or does not fit into the
+        // destination integer type after rounding towards zero. This `undef` value can cause UB in
+        // safe code (see issue #10184), so we implement a saturating conversion on top of it:
+        // Semantically, the mathematical value of the input is rounded towards zero to the next
+        // mathematical integer, and then the result is clamped into the range of the destination
+        // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
+        // the destination integer type. NaN is mapped to 0.
+        //
+        // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
+        // a value representable in int_ty.
+        // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
+        // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
+        // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
+        // representable. Note that this only works if float_ty's exponent range is sufficiently large.
+        // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
+        // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
+        // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
+        // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
+        // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
+        let int_max = |signed: bool, int_width: u64| -> u128 {
+            let shift_amount = 128 - int_width;
+            if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
+        };
+        let int_min = |signed: bool, int_width: u64| -> i128 {
+            if signed { i128::MIN >> (128 - int_width) } else { 0 }
+        };
+
+        let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
+            let rounded_min =
+                ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
+            assert_eq!(rounded_min.status, Status::OK);
+            let rounded_max =
+                ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
+            assert!(rounded_max.value.is_finite());
+            (rounded_min.value.to_bits(), rounded_max.value.to_bits())
+        };
+        let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) {
+            let rounded_min =
+                ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
+            assert_eq!(rounded_min.status, Status::OK);
+            let rounded_max =
+                ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
+            assert!(rounded_max.value.is_finite());
+            (rounded_min.value.to_bits(), rounded_max.value.to_bits())
+        };
+        // To implement saturation, we perform the following steps:
+        //
+        // 1. Cast val to an integer with fpto[su]i. This may result in undef.
+        // 2. Compare val to f_min and f_max, and use the comparison results to select:
+        //  a) int_ty::MIN if val < f_min or val is NaN
+        //  b) int_ty::MAX if val > f_max
+        //  c) the result of fpto[su]i otherwise
+        // 3. If val is NaN, return 0.0, otherwise return the result of step 2.
+        //
+        // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
+        // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
+        // undef does not introduce any non-determinism either.
+        // More importantly, the above procedure correctly implements saturating conversion.
+        // Proof (sketch):
+        // If val is NaN, 0 is returned by definition.
+        // Otherwise, val is finite or infinite and thus can be compared with f_min and f_max.
+        // This yields three cases to consider:
+        // (1) if val in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
+        //     saturating conversion for inputs in that range.
+        // (2) if val > f_max, then val is larger than int_ty::MAX. This holds even if f_max is rounded
+        //     (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
+        //     than int_ty::MAX. Because val is larger than int_ty::MAX, the return value of int_ty::MAX
+        //     is correct.
+        // (3) if val < f_min, then val is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
+        //     int_ty::MIN and therefore the return value of int_ty::MIN is correct.
+        // QED.
+
+        let float_bits_to_llval = |bx: &mut Self, bits| {
+            let bits_llval = match float_width {
+                32 => bx.cx().const_u32(bits as u32),
+                64 => bx.cx().const_u64(bits as u64),
+                n => bug!("unsupported float width {}", n),
+            };
+            bx.bitcast(bits_llval, float_ty)
+        };
+        let (f_min, f_max) = match float_width {
+            32 => compute_clamp_bounds_single(signed, int_width),
+            64 => compute_clamp_bounds_double(signed, int_width),
+            n => bug!("unsupported float width {}", n),
+        };
+        let f_min = float_bits_to_llval(self, f_min);
+        let f_max = float_bits_to_llval(self, f_max);
+        let int_max = self.cx.const_uint_big(int_ty, int_max(signed, int_width));
+        let int_min = self.cx.const_uint_big(int_ty, int_min(signed, int_width) as u128);
+        let zero = self.cx.const_uint(int_ty, 0);
+
+        // If we're working with vectors, constants must be "splatted": the constant is duplicated
+        // into each lane of the vector.  The algorithm stays the same, we are just using the
+        // same constant across all lanes.
+        let maybe_splat = |bx: &mut Self, val| {
+            if bx.cx().type_kind(dest_ty) == TypeKind::Vector {
+                bx.vector_splat(bx.vector_length(dest_ty), val)
+            } else {
+                val
+            }
+        };
+        let f_min = maybe_splat(self, f_min);
+        let f_max = maybe_splat(self, f_max);
+        let int_max = maybe_splat(self, int_max);
+        let int_min = maybe_splat(self, int_min);
+        let zero = maybe_splat(self, zero);
+
+        // Step 1 ...
+        let fptosui_result =
+            if signed { self.fptosi(val, dest_ty) } else { self.fptoui(val, dest_ty) };
+        let less_or_nan = self.fcmp(RealPredicate::RealULT, val, f_min);
+        let greater = self.fcmp(RealPredicate::RealOGT, val, f_max);
+
+        // Step 2: We use two comparisons and two selects, with %s1 being the
+        // result:
+        //     %less_or_nan = fcmp ult %val, %f_min
+        //     %greater = fcmp olt %val, %f_max
+        //     %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
+        //     %s1 = select %greater, int_ty::MAX, %s0
+        // Note that %less_or_nan uses an *unordered* comparison. This
+        // comparison is true if the operands are not comparable (i.e., if val is
+        // NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if
+        // val is NaN.
+        //
+        // Performance note: Unordered comparison can be lowered to a "flipped"
+        // comparison and a negation, and the negation can be merged into the
+        // select. Therefore, it not necessarily any more expensive than an
+        // ordered ("normal") comparison. Whether these optimizations will be
+        // performed is ultimately up to the backend, but at least x86 does
+        // perform them.
+        let s0 = self.select(less_or_nan, int_min, fptosui_result);
+        let s1 = self.select(greater, int_max, s0);
+
+        // Step 3: NaN replacement.
+        // For unsigned types, the above step already yielded int_ty::MIN == 0 if val is NaN.
+        // Therefore we only need to execute this step for signed integer types.
+        if signed {
+            // LLVM has no isNaN predicate, so we use (val == val) instead
+            let cmp = self.fcmp(RealPredicate::RealOEQ, val, val);
+            self.select(cmp, s1, zero)
+        } else {
+            s1
+        }
+    }
+
+    #[cfg(feature = "master")]
+    pub fn shuffle_vector(
+        &mut self,
+        v1: RValue<'gcc>,
+        v2: RValue<'gcc>,
+        mask: RValue<'gcc>,
+    ) -> RValue<'gcc> {
+        // NOTE: if the `mask` is a constant value, the following code will copy it in many places,
+        // which will make GCC create a lot (+4000) local variables in some cases.
+        // So we assign it to an explicit local variable once to avoid this.
+        let func = self.current_func();
+        let mask_var = func.new_local(self.location, mask.get_type(), "mask");
+        let block = self.block;
+        block.add_assignment(self.location, mask_var, mask);
+        let mask = mask_var.to_rvalue();
+
+        // TODO(antoyo): use a recursive unqualified() here.
+        let vector_type = v1.get_type().unqualified().dyncast_vector().expect("vector type");
+        let element_type = vector_type.get_element_type();
+        let vec_num_units = vector_type.get_num_units();
+
+        let mask_element_type = if element_type.is_integral() {
+            element_type
+        } else {
+            #[cfg(feature = "master")]
+            {
+                self.cx.type_ix(element_type.get_size() as u64 * 8)
+            }
+            #[cfg(not(feature = "master"))]
+            self.int_type
+        };
+
+        // NOTE: this condition is needed because we call shuffle_vector in the implementation of
+        // simd_gather.
+        let mut mask_elements = if let Some(vector_type) = mask.get_type().dyncast_vector() {
+            let mask_num_units = vector_type.get_num_units();
+            let mut mask_elements = vec![];
+            for i in 0..mask_num_units {
+                let index = self.context.new_rvalue_from_long(self.cx.type_u32(), i as _);
+                mask_elements.push(self.context.new_cast(
+                    self.location,
+                    self.extract_element(mask, index).to_rvalue(),
+                    mask_element_type,
+                ));
+            }
+            mask_elements
+        } else {
+            let struct_type = mask.get_type().is_struct().expect("mask should be of struct type");
+            let mask_num_units = struct_type.get_field_count();
+            let mut mask_elements = vec![];
+            for i in 0..mask_num_units {
+                let field = struct_type.get_field(i as i32);
+                mask_elements.push(self.context.new_cast(
+                    self.location,
+                    mask.access_field(self.location, field).to_rvalue(),
+                    mask_element_type,
+                ));
+            }
+            mask_elements
+        };
+        let mask_num_units = mask_elements.len();
+
+        // NOTE: the mask needs to be the same length as the input vectors, so add the missing
+        // elements in the mask if needed.
+        for _ in mask_num_units..vec_num_units {
+            mask_elements.push(self.context.new_rvalue_zero(mask_element_type));
+        }
+
+        let result_type = self.context.new_vector_type(element_type, mask_num_units as u64);
+        let (v1, v2) = if vec_num_units < mask_num_units {
+            // NOTE: the mask needs to be the same length as the input vectors, so join the 2
+            // vectors and create a dummy second vector.
+            let mut elements = vec![];
+            for i in 0..vec_num_units {
+                elements.push(
+                    self.context
+                        .new_vector_access(
+                            self.location,
+                            v1,
+                            self.context.new_rvalue_from_int(self.int_type, i as i32),
+                        )
+                        .to_rvalue(),
+                );
+            }
+            for i in 0..(mask_num_units - vec_num_units) {
+                elements.push(
+                    self.context
+                        .new_vector_access(
+                            self.location,
+                            v2,
+                            self.context.new_rvalue_from_int(self.int_type, i as i32),
+                        )
+                        .to_rvalue(),
+                );
+            }
+            let v1 = self.context.new_rvalue_from_vector(self.location, result_type, &elements);
+            let zero = self.context.new_rvalue_zero(element_type);
+            let v2 = self.context.new_rvalue_from_vector(
+                self.location,
+                result_type,
+                &vec![zero; mask_num_units],
+            );
+            (v1, v2)
+        } else {
+            (v1, v2)
+        };
+
+        let new_mask_num_units = std::cmp::max(mask_num_units, vec_num_units);
+        let mask_type = self.context.new_vector_type(mask_element_type, new_mask_num_units as u64);
+        let mask = self.context.new_rvalue_from_vector(self.location, mask_type, &mask_elements);
+        let result = self.context.new_rvalue_vector_perm(self.location, v1, v2, mask);
+
+        if vec_num_units != mask_num_units {
+            // NOTE: if padding was added, only select the number of elements of the masks to
+            // remove that padding in the result.
+            let mut elements = vec![];
+            for i in 0..mask_num_units {
+                elements.push(
+                    self.context
+                        .new_vector_access(
+                            self.location,
+                            result,
+                            self.context.new_rvalue_from_int(self.int_type, i as i32),
+                        )
+                        .to_rvalue(),
+                );
+            }
+            self.context.new_rvalue_from_vector(self.location, result_type, &elements)
+        } else {
+            result
+        }
+    }
+
+    #[cfg(not(feature = "master"))]
+    pub fn shuffle_vector(
+        &mut self,
+        _v1: RValue<'gcc>,
+        _v2: RValue<'gcc>,
+        _mask: RValue<'gcc>,
+    ) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    #[cfg(feature = "master")]
+    pub fn vector_reduce<F>(&mut self, src: RValue<'gcc>, op: F) -> RValue<'gcc>
+    where
+        F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>,
+    {
+        let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
+        let element_type = vector_type.get_element_type();
+        let mask_element_type = self.type_ix(element_type.get_size() as u64 * 8);
+        let element_count = vector_type.get_num_units();
+        let mut vector_elements = vec![];
+        for i in 0..element_count {
+            vector_elements.push(i);
+        }
+        let mask_type = self.context.new_vector_type(mask_element_type, element_count as u64);
+        let mut shift = 1;
+        let mut res = src;
+        while shift < element_count {
+            let vector_elements: Vec<_> = vector_elements
+                .iter()
+                .map(|i| {
+                    self.context.new_rvalue_from_int(
+                        mask_element_type,
+                        ((i + shift) % element_count) as i32,
+                    )
+                })
+                .collect();
+            let mask =
+                self.context.new_rvalue_from_vector(self.location, mask_type, &vector_elements);
+            let shifted = self.context.new_rvalue_vector_perm(self.location, res, res, mask);
+            shift *= 2;
+            res = op(res, shifted, self.context);
+        }
+        self.context
+            .new_vector_access(self.location, res, self.context.new_rvalue_zero(self.int_type))
+            .to_rvalue()
+    }
+
+    #[cfg(not(feature = "master"))]
+    pub fn vector_reduce<F>(&mut self, _src: RValue<'gcc>, _op: F) -> RValue<'gcc>
+    where
+        F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>,
+    {
+        unimplemented!();
+    }
+
+    pub fn vector_reduce_op(&mut self, src: RValue<'gcc>, op: BinaryOp) -> RValue<'gcc> {
+        let loc = self.location;
+        self.vector_reduce(src, |a, b, context| context.new_binary_op(loc, op, a.get_type(), a, b))
+    }
+
+    pub fn vector_reduce_fadd_reassoc(
+        &mut self,
+        _acc: RValue<'gcc>,
+        _src: RValue<'gcc>,
+    ) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    #[cfg(feature = "master")]
+    pub fn vector_reduce_fadd(&mut self, acc: RValue<'gcc>, src: RValue<'gcc>) -> RValue<'gcc> {
+        let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
+        let element_count = vector_type.get_num_units();
+        (0..element_count)
+            .map(|i| {
+                self.context
+                    .new_vector_access(
+                        self.location,
+                        src,
+                        self.context.new_rvalue_from_int(self.int_type, i as _),
+                    )
+                    .to_rvalue()
+            })
+            .fold(acc, |x, i| x + i)
+    }
+
+    #[cfg(not(feature = "master"))]
+    pub fn vector_reduce_fadd(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    pub fn vector_reduce_fmul_reassoc(
+        &mut self,
+        _acc: RValue<'gcc>,
+        _src: RValue<'gcc>,
+    ) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    #[cfg(feature = "master")]
+    pub fn vector_reduce_fmul(&mut self, acc: RValue<'gcc>, src: RValue<'gcc>) -> RValue<'gcc> {
+        let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
+        let element_count = vector_type.get_num_units();
+        (0..element_count)
+            .map(|i| {
+                self.context
+                    .new_vector_access(
+                        self.location,
+                        src,
+                        self.context.new_rvalue_from_int(self.int_type, i as _),
+                    )
+                    .to_rvalue()
+            })
+            .fold(acc, |x, i| x * i)
+    }
+
+    #[cfg(not(feature = "master"))]
+    pub fn vector_reduce_fmul(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
+        unimplemented!()
+    }
+
+    // Inspired by Hacker's Delight min implementation.
+    pub fn vector_reduce_min(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
+        let loc = self.location;
+        self.vector_reduce(src, |a, b, context| {
+            let differences_or_zeros = difference_or_zero(loc, a, b, context);
+            context.new_binary_op(loc, BinaryOp::Plus, b.get_type(), b, differences_or_zeros)
+        })
+    }
+
+    // Inspired by Hacker's Delight max implementation.
+    pub fn vector_reduce_max(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
+        let loc = self.location;
+        self.vector_reduce(src, |a, b, context| {
+            let differences_or_zeros = difference_or_zero(loc, a, b, context);
+            context.new_binary_op(loc, BinaryOp::Minus, a.get_type(), a, differences_or_zeros)
+        })
+    }
+
+    fn vector_extremum(
+        &mut self,
+        a: RValue<'gcc>,
+        b: RValue<'gcc>,
+        direction: ExtremumOperation,
+    ) -> RValue<'gcc> {
+        let vector_type = a.get_type();
+
+        // mask out the NaNs in b and replace them with the corresponding lane in a, so when a and
+        // b get compared & spliced together, we get the numeric values instead of NaNs.
+        let b_nan_mask = self.context.new_comparison(self.location, ComparisonOp::NotEquals, b, b);
+        let mask_type = b_nan_mask.get_type();
+        let b_nan_mask_inverted =
+            self.context.new_unary_op(self.location, UnaryOp::BitwiseNegate, mask_type, b_nan_mask);
+        let a_cast = self.context.new_bitcast(self.location, a, mask_type);
+        let b_cast = self.context.new_bitcast(self.location, b, mask_type);
+        let res = (b_nan_mask & a_cast) | (b_nan_mask_inverted & b_cast);
+        let b = self.context.new_bitcast(self.location, res, vector_type);
+
+        // now do the actual comparison
+        let comparison_op = match direction {
+            ExtremumOperation::Min => ComparisonOp::LessThan,
+            ExtremumOperation::Max => ComparisonOp::GreaterThan,
+        };
+        let cmp = self.context.new_comparison(self.location, comparison_op, a, b);
+        let cmp_inverted =
+            self.context.new_unary_op(self.location, UnaryOp::BitwiseNegate, cmp.get_type(), cmp);
+        let res = (cmp & a_cast) | (cmp_inverted & res);
+        self.context.new_bitcast(self.location, res, vector_type)
+    }
+
+    pub fn vector_fmin(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        self.vector_extremum(a, b, ExtremumOperation::Min)
+    }
+
+    #[cfg(feature = "master")]
+    pub fn vector_reduce_fmin(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
+        let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
+        let element_count = vector_type.get_num_units();
+        let mut acc = self
+            .context
+            .new_vector_access(self.location, src, self.context.new_rvalue_zero(self.int_type))
+            .to_rvalue();
+        for i in 1..element_count {
+            let elem = self
+                .context
+                .new_vector_access(
+                    self.location,
+                    src,
+                    self.context.new_rvalue_from_int(self.int_type, i as _),
+                )
+                .to_rvalue();
+            let cmp = self.context.new_comparison(self.location, ComparisonOp::LessThan, acc, elem);
+            acc = self.select(cmp, acc, elem);
+        }
+        acc
+    }
+
+    #[cfg(not(feature = "master"))]
+    pub fn vector_reduce_fmin(&mut self, _src: RValue<'gcc>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    pub fn vector_fmax(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        self.vector_extremum(a, b, ExtremumOperation::Max)
+    }
+
+    #[cfg(feature = "master")]
+    pub fn vector_reduce_fmax(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
+        let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
+        let element_count = vector_type.get_num_units();
+        let mut acc = self
+            .context
+            .new_vector_access(self.location, src, self.context.new_rvalue_zero(self.int_type))
+            .to_rvalue();
+        for i in 1..element_count {
+            let elem = self
+                .context
+                .new_vector_access(
+                    self.location,
+                    src,
+                    self.context.new_rvalue_from_int(self.int_type, i as _),
+                )
+                .to_rvalue();
+            let cmp =
+                self.context.new_comparison(self.location, ComparisonOp::GreaterThan, acc, elem);
+            acc = self.select(cmp, acc, elem);
+        }
+        acc
+    }
+
+    #[cfg(not(feature = "master"))]
+    pub fn vector_reduce_fmax(&mut self, _src: RValue<'gcc>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    pub fn vector_select(
+        &mut self,
+        cond: RValue<'gcc>,
+        then_val: RValue<'gcc>,
+        else_val: RValue<'gcc>,
+    ) -> RValue<'gcc> {
+        // cond is a vector of integers, not of bools.
+        let vector_type = cond.get_type().unqualified().dyncast_vector().expect("vector type");
+        let num_units = vector_type.get_num_units();
+        let element_type = vector_type.get_element_type();
+
+        #[cfg(feature = "master")]
+        let (cond, element_type) = {
+            // TODO(antoyo): dyncast_vector should not require a call to unqualified.
+            let then_val_vector_type =
+                then_val.get_type().unqualified().dyncast_vector().expect("vector type");
+            let then_val_element_type = then_val_vector_type.get_element_type();
+            let then_val_element_size = then_val_element_type.get_size();
+
+            // NOTE: the mask needs to be of the same size as the other arguments in order for the &
+            // operation to work.
+            if then_val_element_size != element_type.get_size() {
+                let new_element_type = self.type_ix(then_val_element_size as u64 * 8);
+                let new_vector_type =
+                    self.context.new_vector_type(new_element_type, num_units as u64);
+                let cond = self.context.convert_vector(self.location, cond, new_vector_type);
+                (cond, new_element_type)
+            } else {
+                (cond, element_type)
+            }
+        };
+
+        let cond_type = cond.get_type();
+
+        let zeros = vec![self.context.new_rvalue_zero(element_type); num_units];
+        let zeros = self.context.new_rvalue_from_vector(self.location, cond_type, &zeros);
+
+        let result_type = then_val.get_type();
+
+        let masks =
+            self.context.new_comparison(self.location, ComparisonOp::NotEquals, cond, zeros);
+        // NOTE: masks is a vector of integers, but the values can be vectors of floats, so use bitcast to make
+        // the & operation work.
+        let then_val = self.bitcast_if_needed(then_val, masks.get_type());
+        let then_vals = masks & then_val;
+
+        let minus_ones = vec![self.context.new_rvalue_from_int(element_type, -1); num_units];
+        let minus_ones = self.context.new_rvalue_from_vector(self.location, cond_type, &minus_ones);
+        let inverted_masks = masks ^ minus_ones;
+        // NOTE: sometimes, the type of else_val can be different than the type of then_val in
+        // libgccjit (vector of int vs vector of int32_t), but they should be the same for the AND
+        // operation to work.
+        // TODO: remove bitcast now that vector types can be compared?
+        let else_val = self.context.new_bitcast(self.location, else_val, then_val.get_type());
+        let else_vals = inverted_masks & else_val;
+
+        let res = then_vals | else_vals;
+        self.bitcast_if_needed(res, result_type)
+    }
+}
+
+fn difference_or_zero<'gcc>(
+    loc: Option<Location<'gcc>>,
+    a: RValue<'gcc>,
+    b: RValue<'gcc>,
+    context: &'gcc Context<'gcc>,
+) -> RValue<'gcc> {
+    let difference = a - b;
+    let masks = context.new_comparison(loc, ComparisonOp::GreaterThanEquals, b, a);
+    // NOTE: masks is a vector of integers, but the values can be vectors of floats, so use bitcast to make
+    // the & operation work.
+    let a_type = a.get_type();
+    let masks =
+        if masks.get_type() != a_type { context.new_bitcast(loc, masks, a_type) } else { masks };
+    difference & masks
+}
+
+impl<'a, 'gcc, 'tcx> StaticBuilderMethods for Builder<'a, 'gcc, 'tcx> {
+    fn get_static(&mut self, def_id: DefId) -> RValue<'gcc> {
+        // Forward to the `get_static` method of `CodegenCx`
+        self.cx().get_static(def_id).get_address(self.location)
+    }
+}
+
+impl<'tcx> HasTypingEnv<'tcx> for Builder<'_, '_, 'tcx> {
+    fn typing_env(&self) -> ty::TypingEnv<'tcx> {
+        self.cx.typing_env()
+    }
+}
+
+impl<'tcx> HasTargetSpec for Builder<'_, '_, 'tcx> {
+    fn target_spec(&self) -> &Target {
+        self.cx.target_spec()
+    }
+}
+
+impl<'tcx> HasWasmCAbiOpt for Builder<'_, '_, 'tcx> {
+    fn wasm_c_abi_opt(&self) -> WasmCAbi {
+        self.cx.wasm_c_abi_opt()
+    }
+}
+
+impl<'tcx> HasX86AbiOpt for Builder<'_, '_, 'tcx> {
+    fn x86_abi_opt(&self) -> X86Abi {
+        self.cx.x86_abi_opt()
+    }
+}
+
+pub trait ToGccComp {
+    fn to_gcc_comparison(&self) -> ComparisonOp;
+}
+
+impl ToGccComp for IntPredicate {
+    fn to_gcc_comparison(&self) -> ComparisonOp {
+        match *self {
+            IntPredicate::IntEQ => ComparisonOp::Equals,
+            IntPredicate::IntNE => ComparisonOp::NotEquals,
+            IntPredicate::IntUGT => ComparisonOp::GreaterThan,
+            IntPredicate::IntUGE => ComparisonOp::GreaterThanEquals,
+            IntPredicate::IntULT => ComparisonOp::LessThan,
+            IntPredicate::IntULE => ComparisonOp::LessThanEquals,
+            IntPredicate::IntSGT => ComparisonOp::GreaterThan,
+            IntPredicate::IntSGE => ComparisonOp::GreaterThanEquals,
+            IntPredicate::IntSLT => ComparisonOp::LessThan,
+            IntPredicate::IntSLE => ComparisonOp::LessThanEquals,
+        }
+    }
+}
+
+impl ToGccComp for RealPredicate {
+    fn to_gcc_comparison(&self) -> ComparisonOp {
+        // TODO(antoyo): check that ordered vs non-ordered is respected.
+        match *self {
+            RealPredicate::RealPredicateFalse => unreachable!(),
+            RealPredicate::RealOEQ => ComparisonOp::Equals,
+            RealPredicate::RealOGT => ComparisonOp::GreaterThan,
+            RealPredicate::RealOGE => ComparisonOp::GreaterThanEquals,
+            RealPredicate::RealOLT => ComparisonOp::LessThan,
+            RealPredicate::RealOLE => ComparisonOp::LessThanEquals,
+            RealPredicate::RealONE => ComparisonOp::NotEquals,
+            RealPredicate::RealORD => unreachable!(),
+            RealPredicate::RealUNO => unreachable!(),
+            RealPredicate::RealUEQ => ComparisonOp::Equals,
+            RealPredicate::RealUGT => ComparisonOp::GreaterThan,
+            RealPredicate::RealUGE => ComparisonOp::GreaterThan,
+            RealPredicate::RealULT => ComparisonOp::LessThan,
+            RealPredicate::RealULE => ComparisonOp::LessThan,
+            RealPredicate::RealUNE => ComparisonOp::NotEquals,
+            RealPredicate::RealPredicateTrue => unreachable!(),
+        }
+    }
+}
+
+#[repr(C)]
+#[allow(non_camel_case_types)]
+enum MemOrdering {
+    __ATOMIC_RELAXED,
+    __ATOMIC_CONSUME,
+    __ATOMIC_ACQUIRE,
+    __ATOMIC_RELEASE,
+    __ATOMIC_ACQ_REL,
+    __ATOMIC_SEQ_CST,
+}
+
+trait ToGccOrdering {
+    fn to_gcc(self) -> i32;
+}
+
+impl ToGccOrdering for AtomicOrdering {
+    fn to_gcc(self) -> i32 {
+        use MemOrdering::*;
+
+        let ordering = match self {
+            AtomicOrdering::Unordered => __ATOMIC_RELAXED,
+            AtomicOrdering::Relaxed => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
+            AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
+            AtomicOrdering::Release => __ATOMIC_RELEASE,
+            AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,
+            AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST,
+        };
+        ordering as i32
+    }
+}
+
+// Needed because gcc 12 `get_size()` doesn't work on pointers.
+#[cfg(feature = "master")]
+fn get_maybe_pointer_size(value: RValue<'_>) -> u32 {
+    value.get_type().get_size()
+}
+
+#[cfg(not(feature = "master"))]
+fn get_maybe_pointer_size(value: RValue<'_>) -> u32 {
+    let type_ = value.get_type();
+    if type_.get_pointee().is_some() { size_of::<*const ()>() as _ } else { type_.get_size() }
+}
diff --git a/compiler/rustc_codegen_gcc/src/callee.rs b/compiler/rustc_codegen_gcc/src/callee.rs
new file mode 100644
index 00000000000..c133ae4fcdd
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/callee.rs
@@ -0,0 +1,151 @@
+#[cfg(feature = "master")]
+use gccjit::{FnAttribute, Visibility};
+use gccjit::{Function, FunctionType};
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
+use rustc_middle::ty::{self, Instance, TypeVisitableExt};
+
+use crate::attributes;
+use crate::context::CodegenCx;
+
+/// Codegens a reference to a fn/method item, monomorphizing and
+/// inlining as it goes.
+///
+/// # Parameters
+///
+/// - `cx`: the crate context
+/// - `instance`: the instance to be instantiated
+pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) -> Function<'gcc> {
+    let tcx = cx.tcx();
+
+    assert!(!instance.args.has_infer());
+    assert!(!instance.args.has_escaping_bound_vars());
+
+    let sym = tcx.symbol_name(instance).name;
+
+    if let Some(&func) = cx.function_instances.borrow().get(&instance) {
+        return func;
+    }
+
+    let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
+
+    let func = if let Some(_func) = cx.get_declared_value(sym) {
+        // FIXME(antoyo): we never reach this because get_declared_value only returns global variables
+        // and here we try to get a function.
+        unreachable!();
+        /*
+        // Create a fn pointer with the new signature.
+        let ptrty = fn_abi.ptr_to_gcc_type(cx);
+
+        // This is subtle and surprising, but sometimes we have to bitcast
+        // the resulting fn pointer.  The reason has to do with external
+        // functions.  If you have two crates that both bind the same C
+        // library, they may not use precisely the same types: for
+        // example, they will probably each declare their own structs,
+        // which are distinct types from LLVM's point of view (nominal
+        // types).
+        //
+        // Now, if those two crates are linked into an application, and
+        // they contain inlined code, you can wind up with a situation
+        // where both of those functions wind up being loaded into this
+        // application simultaneously. In that case, the same function
+        // (from LLVM's point of view) requires two types. But of course
+        // LLVM won't allow one function to have two types.
+        //
+        // What we currently do, therefore, is declare the function with
+        // one of the two types (whichever happens to come first) and then
+        // bitcast as needed when the function is referenced to make sure
+        // it has the type we expect.
+        //
+        // This can occur on either a crate-local or crate-external
+        // reference. It also occurs when testing libcore and in some
+        // other weird situations. Annoying.
+        if cx.val_ty(func) != ptrty {
+            // TODO(antoyo): cast the pointer.
+            func
+        }
+        else {
+            func
+        }*/
+    } else {
+        cx.linkage.set(FunctionType::Extern);
+        let func = cx.declare_fn(sym, fn_abi);
+
+        attributes::from_fn_attrs(cx, func, instance);
+
+        #[cfg(feature = "master")]
+        {
+            let instance_def_id = instance.def_id();
+
+            // TODO(antoyo): set linkage and attributes.
+
+            // Apply an appropriate linkage/visibility value to our item that we
+            // just declared.
+            //
+            // This is sort of subtle. Inside our codegen unit we started off
+            // compilation by predefining all our own `MonoItem` instances. That
+            // is, everything we're codegenning ourselves is already defined. That
+            // means that anything we're actually codegenning in this codegen unit
+            // will have hit the above branch in `get_declared_value`. As a result,
+            // we're guaranteed here that we're declaring a symbol that won't get
+            // defined, or in other words we're referencing a value from another
+            // codegen unit or even another crate.
+            //
+            // So because this is a foreign value we blanket apply an external
+            // linkage directive because it's coming from a different object file.
+            // The visibility here is where it gets tricky. This symbol could be
+            // referencing some foreign crate or foreign library (an `extern`
+            // block) in which case we want to leave the default visibility. We may
+            // also, though, have multiple codegen units. It could be a
+            // monomorphization, in which case its expected visibility depends on
+            // whether we are sharing generics or not. The important thing here is
+            // that the visibility we apply to the declaration is the same one that
+            // has been applied to the definition (wherever that definition may be).
+            let is_generic = instance.args.non_erasable_generics().next().is_some();
+
+            let is_hidden = if is_generic {
+                // This is a monomorphization of a generic function.
+                if !(cx.tcx.sess.opts.share_generics()
+                    || tcx.codegen_fn_attrs(instance_def_id).inline
+                        == rustc_attr_parsing::InlineAttr::Never)
+                {
+                    // When not sharing generics, all instances are in the same
+                    // crate and have hidden visibility.
+                    true
+                } else if let Some(instance_def_id) = instance_def_id.as_local() {
+                    // This is a monomorphization of a generic function
+                    // defined in the current crate. It is hidden if:
+                    // - the definition is unreachable for downstream
+                    //   crates, or
+                    // - the current crate does not re-export generics
+                    //   (because the crate is a C library or executable)
+                    cx.tcx.is_unreachable_local_definition(instance_def_id)
+                        || !cx.tcx.local_crate_exports_generics()
+                } else {
+                    // This is a monomorphization of a generic function
+                    // defined in an upstream crate. It is hidden if:
+                    // - it is instantiated in this crate, and
+                    // - the current crate does not re-export generics
+                    instance.upstream_monomorphization(tcx).is_none()
+                        && !cx.tcx.local_crate_exports_generics()
+                }
+            } else {
+                // This is a non-generic function. It is hidden if:
+                // - it is instantiated in the local crate, and
+                //   - it is defined an upstream crate (non-local), or
+                //   - it is not reachable
+                cx.tcx.is_codegened_item(instance_def_id)
+                    && (!instance_def_id.is_local()
+                        || !cx.tcx.is_reachable_non_generic(instance_def_id))
+            };
+            if is_hidden {
+                func.add_attribute(FnAttribute::Visibility(Visibility::Hidden));
+            }
+        }
+
+        func
+    };
+
+    cx.function_instances.borrow_mut().insert(instance, func);
+
+    func
+}
diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs
new file mode 100644
index 00000000000..a63da6b6e27
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/common.rs
@@ -0,0 +1,463 @@
+use gccjit::{LValue, RValue, ToRValue, Type};
+use rustc_abi as abi;
+use rustc_abi::HasDataLayout;
+use rustc_abi::Primitive::Pointer;
+use rustc_codegen_ssa::traits::{
+    BaseTypeCodegenMethods, ConstCodegenMethods, MiscCodegenMethods, StaticCodegenMethods,
+};
+use rustc_middle::mir::Mutability;
+use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
+use rustc_middle::ty::layout::LayoutOf;
+
+use crate::consts::const_alloc_to_gcc;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+    pub fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> {
+        self.context.new_cast(None, val, ty)
+    }
+
+    pub fn const_bytes(&self, bytes: &[u8]) -> RValue<'gcc> {
+        bytes_in_context(self, bytes)
+    }
+
+    fn global_string(&self, string: &str) -> LValue<'gcc> {
+        // TODO(antoyo): handle non-null-terminated strings.
+        let string = self.context.new_string_literal(string);
+        let sym = self.generate_local_symbol_name("str");
+        let global = self.declare_private_global(&sym, self.val_ty(string));
+        global.global_set_initializer_rvalue(string);
+        global
+        // TODO(antoyo): set linkage.
+    }
+
+    pub fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> {
+        if value.get_type() == self.bool_type.make_pointer() {
+            if let Some(pointee) = typ.get_pointee() {
+                if pointee.dyncast_vector().is_some() {
+                    panic!()
+                }
+            }
+        }
+        // NOTE: since bitcast makes a value non-constant, don't bitcast if not necessary as some
+        // SIMD builtins require a constant value.
+        self.bitcast_if_needed(value, typ)
+    }
+}
+
+pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> RValue<'gcc> {
+    let context = &cx.context;
+    let byte_type = context.new_type::<u8>();
+    let typ = context.new_array_type(None, byte_type, bytes.len() as u64);
+    let elements: Vec<_> =
+        bytes.iter().map(|&byte| context.new_rvalue_from_int(byte_type, byte as i32)).collect();
+    context.new_array_constructor(None, typ, &elements)
+}
+
+pub fn type_is_pointer(typ: Type<'_>) -> bool {
+    typ.get_pointee().is_some()
+}
+
+impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> {
+    fn const_null(&self, typ: Type<'gcc>) -> RValue<'gcc> {
+        if type_is_pointer(typ) { self.context.new_null(typ) } else { self.const_int(typ, 0) }
+    }
+
+    fn const_undef(&self, typ: Type<'gcc>) -> RValue<'gcc> {
+        let local = self.current_func.borrow().expect("func").new_local(None, typ, "undefined");
+        if typ.is_struct().is_some() {
+            // NOTE: hack to workaround a limitation of the rustc API: see comment on
+            // CodegenCx.structs_as_pointer
+            let pointer = local.get_address(None);
+            self.structs_as_pointer.borrow_mut().insert(pointer);
+            pointer
+        } else {
+            local.to_rvalue()
+        }
+    }
+
+    fn const_poison(&self, typ: Type<'gcc>) -> RValue<'gcc> {
+        // No distinction between undef and poison.
+        self.const_undef(typ)
+    }
+
+    fn const_bool(&self, val: bool) -> RValue<'gcc> {
+        self.const_uint(self.type_i1(), val as u64)
+    }
+
+    fn const_i8(&self, i: i8) -> RValue<'gcc> {
+        self.const_int(self.type_i8(), i as i64)
+    }
+
+    fn const_i16(&self, i: i16) -> RValue<'gcc> {
+        self.const_int(self.type_i16(), i as i64)
+    }
+
+    fn const_i32(&self, i: i32) -> RValue<'gcc> {
+        self.const_int(self.type_i32(), i as i64)
+    }
+
+    fn const_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
+        self.gcc_int(typ, int)
+    }
+
+    fn const_u8(&self, i: u8) -> RValue<'gcc> {
+        self.const_uint(self.type_u8(), i as u64)
+    }
+
+    fn const_u32(&self, i: u32) -> RValue<'gcc> {
+        self.const_uint(self.type_u32(), i as u64)
+    }
+
+    fn const_u64(&self, i: u64) -> RValue<'gcc> {
+        self.const_uint(self.type_u64(), i)
+    }
+
+    fn const_u128(&self, i: u128) -> RValue<'gcc> {
+        self.const_uint_big(self.type_u128(), i)
+    }
+
+    fn const_usize(&self, i: u64) -> RValue<'gcc> {
+        let bit_size = self.data_layout().pointer_size.bits();
+        if bit_size < 64 {
+            // make sure it doesn't overflow
+            assert!(i < (1 << bit_size));
+        }
+
+        self.const_uint(self.usize_type, i)
+    }
+
+    fn const_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
+        self.gcc_uint(typ, int)
+    }
+
+    fn const_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
+        self.gcc_uint_big(typ, num)
+    }
+
+    fn const_real(&self, typ: Type<'gcc>, val: f64) -> RValue<'gcc> {
+        self.context.new_rvalue_from_double(typ, val)
+    }
+
+    fn const_str(&self, s: &str) -> (RValue<'gcc>, RValue<'gcc>) {
+        let mut const_str_cache = self.const_str_cache.borrow_mut();
+        let str_global = const_str_cache.get(s).copied().unwrap_or_else(|| {
+            let g = self.global_string(s);
+            const_str_cache.insert(s.to_owned(), g);
+            g
+        });
+        let len = s.len();
+        let cs = self.const_ptrcast(
+            str_global.get_address(None),
+            self.type_ptr_to(self.layout_of(self.tcx.types.str_).gcc_type(self)),
+        );
+        (cs, self.const_usize(len as u64))
+    }
+
+    fn const_struct(&self, values: &[RValue<'gcc>], packed: bool) -> RValue<'gcc> {
+        let fields: Vec<_> = values.iter().map(|value| value.get_type()).collect();
+        // TODO(antoyo): cache the type? It's anonymous, so probably not.
+        let typ = self.type_struct(&fields, packed);
+        let struct_type = typ.is_struct().expect("struct type");
+        self.context.new_struct_constructor(None, struct_type.as_type(), None, values)
+    }
+
+    fn const_vector(&self, values: &[RValue<'gcc>]) -> RValue<'gcc> {
+        let typ = self.type_vector(values[0].get_type(), values.len() as u64);
+        self.context.new_rvalue_from_vector(None, typ, values)
+    }
+
+    fn const_to_opt_uint(&self, _v: RValue<'gcc>) -> Option<u64> {
+        // TODO(antoyo)
+        None
+    }
+
+    fn const_to_opt_u128(&self, _v: RValue<'gcc>, _sign_ext: bool) -> Option<u128> {
+        // TODO(antoyo)
+        None
+    }
+
+    fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> {
+        let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
+        match cv {
+            Scalar::Int(int) => {
+                let data = int.to_bits(layout.size(self));
+
+                // FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code
+                // the paths for floating-point values.
+                if ty == self.float_type {
+                    return self
+                        .context
+                        .new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64);
+                }
+                if ty == self.double_type {
+                    return self.context.new_rvalue_from_double(ty, f64::from_bits(data as u64));
+                }
+
+                let value = self.const_uint_big(self.type_ix(bitsize), data);
+                let bytesize = layout.size(self).bytes();
+                if bitsize > 1 && ty.is_integral() && bytesize as u32 == ty.get_size() {
+                    // NOTE: since the intrinsic _xabort is called with a bitcast, which
+                    // is non-const, but expects a constant, do a normal cast instead of a bitcast.
+                    // FIXME(antoyo): fix bitcast to work in constant contexts.
+                    // TODO(antoyo): perhaps only use bitcast for pointers?
+                    self.context.new_cast(None, value, ty)
+                } else {
+                    // TODO(bjorn3): assert size is correct
+                    self.const_bitcast(value, ty)
+                }
+            }
+            Scalar::Ptr(ptr, _size) => {
+                let (prov, offset) = ptr.into_parts(); // we know the `offset` is relative
+                let alloc_id = prov.alloc_id();
+                let base_addr = match self.tcx.global_alloc(alloc_id) {
+                    GlobalAlloc::Memory(alloc) => {
+                        let init = const_alloc_to_gcc(self, alloc);
+                        let alloc = alloc.inner();
+                        let value = match alloc.mutability {
+                            Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
+                            _ => self.static_addr_of(init, alloc.align, None),
+                        };
+                        if !self.sess().fewer_names() {
+                            // TODO(antoyo): set value name.
+                        }
+                        value
+                    }
+                    GlobalAlloc::Function { instance, .. } => self.get_fn_addr(instance),
+                    GlobalAlloc::VTable(ty, dyn_ty) => {
+                        let alloc = self
+                            .tcx
+                            .global_alloc(self.tcx.vtable_allocation((
+                                ty,
+                                dyn_ty.principal().map(|principal| {
+                                    self.tcx.instantiate_bound_regions_with_erased(principal)
+                                }),
+                            )))
+                            .unwrap_memory();
+                        let init = const_alloc_to_gcc(self, alloc);
+                        self.static_addr_of(init, alloc.inner().align, None)
+                    }
+                    GlobalAlloc::Static(def_id) => {
+                        assert!(self.tcx.is_static(def_id));
+                        self.get_static(def_id).get_address(None)
+                    }
+                };
+                let ptr_type = base_addr.get_type();
+                let base_addr = self.context.new_cast(None, base_addr, self.usize_type);
+                let offset =
+                    self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64);
+                let ptr = self.context.new_cast(None, base_addr + offset, ptr_type);
+                if !matches!(layout.primitive(), Pointer(_)) {
+                    self.const_bitcast(ptr.dereference(None).to_rvalue(), ty)
+                } else {
+                    self.context.new_cast(None, ptr, ty)
+                }
+            }
+        }
+    }
+
+    fn const_data_from_alloc(&self, alloc: ConstAllocation<'_>) -> Self::Value {
+        const_alloc_to_gcc(self, alloc)
+    }
+
+    fn const_ptr_byte_offset(&self, base_addr: Self::Value, offset: abi::Size) -> Self::Value {
+        self.context
+            .new_array_access(None, base_addr, self.const_usize(offset.bytes()))
+            .get_address(None)
+    }
+}
+
+pub trait SignType<'gcc, 'tcx> {
+    fn is_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+    fn to_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+}
+
+impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> {
+    fn is_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.is_i8(cx) || self.is_i16(cx) || self.is_i32(cx) || self.is_i64(cx) || self.is_i128(cx)
+    }
+
+    fn is_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.is_u8(cx) || self.is_u16(cx) || self.is_u32(cx) || self.is_u64(cx) || self.is_u128(cx)
+    }
+
+    fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+        if self.is_u8(cx) {
+            cx.i8_type
+        } else if self.is_u16(cx) {
+            cx.i16_type
+        } else if self.is_u32(cx) {
+            cx.i32_type
+        } else if self.is_u64(cx) {
+            cx.i64_type
+        } else if self.is_u128(cx) {
+            cx.i128_type
+        } else if self.is_uchar(cx) {
+            cx.char_type
+        } else if self.is_ushort(cx) {
+            cx.short_type
+        } else if self.is_uint(cx) {
+            cx.int_type
+        } else if self.is_ulong(cx) {
+            cx.long_type
+        } else if self.is_ulonglong(cx) {
+            cx.longlong_type
+        } else {
+            *self
+        }
+    }
+
+    fn to_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+        if self.is_i8(cx) {
+            cx.u8_type
+        } else if self.is_i16(cx) {
+            cx.u16_type
+        } else if self.is_i32(cx) {
+            cx.u32_type
+        } else if self.is_i64(cx) {
+            cx.u64_type
+        } else if self.is_i128(cx) {
+            cx.u128_type
+        } else if self.is_char(cx) {
+            cx.uchar_type
+        } else if self.is_short(cx) {
+            cx.ushort_type
+        } else if self.is_int(cx) {
+            cx.uint_type
+        } else if self.is_long(cx) {
+            cx.ulong_type
+        } else if self.is_longlong(cx) {
+            cx.ulonglong_type
+        } else {
+            *self
+        }
+    }
+}
+
+pub trait TypeReflection<'gcc, 'tcx> {
+    fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_ulong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_ulonglong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_char(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_short(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_int(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_long(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_longlong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+
+    fn is_i8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_u8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_i16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_u16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_i32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_u32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_i64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_u64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_u128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+
+    fn is_vector(&self) -> bool;
+}
+
+impl<'gcc, 'tcx> TypeReflection<'gcc, 'tcx> for Type<'gcc> {
+    fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.uchar_type
+    }
+
+    fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.ushort_type
+    }
+
+    fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.uint_type
+    }
+
+    fn is_ulong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.ulong_type
+    }
+
+    fn is_ulonglong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.ulonglong_type
+    }
+
+    fn is_char(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.char_type
+    }
+
+    fn is_short(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.short_type
+    }
+
+    fn is_int(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.int_type
+    }
+
+    fn is_long(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.long_type
+    }
+
+    fn is_longlong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.longlong_type
+    }
+
+    fn is_i8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.is_compatible_with(cx.i8_type)
+    }
+
+    fn is_u8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.is_compatible_with(cx.u8_type)
+    }
+
+    fn is_i16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.is_compatible_with(cx.i16_type)
+    }
+
+    fn is_u16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.is_compatible_with(cx.u16_type)
+    }
+
+    fn is_i32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.is_compatible_with(cx.i32_type)
+    }
+
+    fn is_u32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.is_compatible_with(cx.u32_type)
+    }
+
+    fn is_i64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.is_compatible_with(cx.i64_type)
+    }
+
+    fn is_u64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.is_compatible_with(cx.u64_type)
+    }
+
+    fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.i128_type.unqualified()
+    }
+
+    fn is_u128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.u128_type.unqualified()
+    }
+
+    fn is_vector(&self) -> bool {
+        let mut typ = *self;
+        loop {
+            if typ.dyncast_vector().is_some() {
+                return true;
+            }
+
+            let old_type = typ;
+            typ = typ.unqualified();
+            if old_type == typ {
+                break;
+            }
+        }
+
+        false
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/consts.rs b/compiler/rustc_codegen_gcc/src/consts.rs
new file mode 100644
index 00000000000..acb39374628
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/consts.rs
@@ -0,0 +1,421 @@
+#[cfg(feature = "master")]
+use gccjit::{FnAttribute, VarAttribute, Visibility};
+use gccjit::{Function, GlobalKind, LValue, RValue, ToRValue, Type};
+use rustc_abi::{self as abi, Align, HasDataLayout, Primitive, Size, WrappingRange};
+use rustc_codegen_ssa::traits::{
+    BaseTypeCodegenMethods, ConstCodegenMethods, StaticCodegenMethods,
+};
+use rustc_hir::def::DefKind;
+use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
+use rustc_middle::mir::interpret::{
+    self, ConstAllocation, ErrorHandled, Scalar as InterpScalar, read_target_uint,
+};
+use rustc_middle::mir::mono::Linkage;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::{self, Instance};
+use rustc_middle::{bug, span_bug};
+use rustc_span::def_id::DefId;
+
+use crate::base;
+use crate::context::CodegenCx;
+use crate::errors::InvalidMinimumAlignment;
+use crate::type_of::LayoutGccExt;
+
+fn set_global_alignment<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    gv: LValue<'gcc>,
+    mut align: Align,
+) {
+    // The target may require greater alignment for globals than the type does.
+    // Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
+    // which can force it to be smaller. Rust doesn't support this yet.
+    if let Some(min) = cx.sess().target.min_global_align {
+        match Align::from_bits(min) {
+            Ok(min) => align = align.max(min),
+            Err(err) => {
+                cx.sess().dcx().emit_err(InvalidMinimumAlignment { err: err.to_string() });
+            }
+        }
+    }
+    gv.set_alignment(align.bytes() as i32);
+}
+
+impl<'gcc, 'tcx> StaticCodegenMethods for CodegenCx<'gcc, 'tcx> {
+    fn static_addr_of(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> {
+        // TODO(antoyo): implement a proper rvalue comparison in libgccjit instead of doing the
+        // following:
+        for (value, variable) in &*self.const_globals.borrow() {
+            if format!("{:?}", value) == format!("{:?}", cv) {
+                if let Some(global_variable) = self.global_lvalues.borrow().get(variable) {
+                    let alignment = align.bits() as i32;
+                    if alignment > global_variable.get_alignment() {
+                        global_variable.set_alignment(alignment);
+                    }
+                }
+                return *variable;
+            }
+        }
+        let global_value = self.static_addr_of_mut(cv, align, kind);
+        #[cfg(feature = "master")]
+        self.global_lvalues
+            .borrow()
+            .get(&global_value)
+            .expect("`static_addr_of_mut` did not add the global to `self.global_lvalues`")
+            .global_set_readonly();
+        self.const_globals.borrow_mut().insert(cv, global_value);
+        global_value
+    }
+
+    #[cfg_attr(not(feature = "master"), allow(unused_mut))]
+    fn codegen_static(&self, def_id: DefId) {
+        let attrs = self.tcx.codegen_fn_attrs(def_id);
+
+        let Ok((value, alloc)) = codegen_static_initializer(self, def_id) else {
+            // Error has already been reported
+            return;
+        };
+        let alloc = alloc.inner();
+
+        // boolean SSA values are i1, but they have to be stored in i8 slots,
+        // otherwise some LLVM optimization passes don't work as expected
+        let val_llty = self.val_ty(value);
+        if val_llty == self.type_i1() {
+            unimplemented!();
+        };
+
+        let is_thread_local = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
+        let global = self.get_static_inner(def_id, val_llty);
+
+        #[cfg(feature = "master")]
+        if global.to_rvalue().get_type() != val_llty {
+            global.to_rvalue().set_type(val_llty);
+        }
+        set_global_alignment(self, global, alloc.align);
+
+        global.global_set_initializer_rvalue(value);
+
+        // As an optimization, all shared statics which do not have interior
+        // mutability are placed into read-only memory.
+        if alloc.mutability.is_not() {
+            #[cfg(feature = "master")]
+            global.global_set_readonly();
+        }
+
+        if is_thread_local {
+            // Do not allow LLVM to change the alignment of a TLS on macOS.
+            //
+            // By default a global's alignment can be freely increased.
+            // This allows LLVM to generate more performant instructions
+            // e.g., using load-aligned into a SIMD register.
+            //
+            // However, on macOS 10.10 or below, the dynamic linker does not
+            // respect any alignment given on the TLS (radar 24221680).
+            // This will violate the alignment assumption, and causing segfault at runtime.
+            //
+            // This bug is very easy to trigger. In `println!` and `panic!`,
+            // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS,
+            // which the values would be `mem::replace`d on initialization.
+            // The implementation of `mem::replace` will use SIMD
+            // whenever the size is 32 bytes or higher. LLVM notices SIMD is used
+            // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary,
+            // which macOS's dyld disregarded and causing crashes
+            // (see issues #51794, #51758, #50867, #48866 and #44056).
+            //
+            // To workaround the bug, we trick LLVM into not increasing
+            // the global's alignment by explicitly assigning a section to it
+            // (equivalent to automatically generating a `#[link_section]` attribute).
+            // See the comment in the `GlobalValue::canIncreaseAlignment()` function
+            // of `lib/IR/Globals.cpp` for why this works.
+            //
+            // When the alignment is not increased, the optimized `mem::replace`
+            // will use load-unaligned instructions instead, and thus avoiding the crash.
+            //
+            // We could remove this hack whenever we decide to drop macOS 10.10 support.
+            if self.tcx.sess.target.options.is_like_darwin {
+                // The `inspect` method is okay here because we checked for provenance, and
+                // because we are doing this access to inspect the final interpreter state
+                // (not as part of the interpreter execution).
+                //
+                // FIXME: This check requires that the (arbitrary) value of undefined bytes
+                // happens to be zero. Instead, we should only check the value of defined bytes
+                // and set all undefined bytes to zero if this allocation is headed for the
+                // BSS.
+                unimplemented!();
+            }
+        }
+
+        // Wasm statics with custom link sections get special treatment as they
+        // go into custom sections of the wasm executable.
+        if self.tcx.sess.target.is_like_wasm {
+            if let Some(_section) = attrs.link_section {
+                unimplemented!();
+            }
+        } else {
+            // TODO(antoyo): set link section.
+        }
+
+        if attrs.flags.contains(CodegenFnAttrFlags::USED)
+            || attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER)
+        {
+            self.add_used_global(global.to_rvalue());
+        }
+    }
+
+    /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*.
+    fn add_used_global(&self, _global: RValue<'gcc>) {
+        // TODO(antoyo)
+    }
+
+    fn add_compiler_used_global(&self, global: RValue<'gcc>) {
+        // NOTE: seems like GCC does not make the distinction between compiler.used and used.
+        self.add_used_global(global);
+    }
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+    #[cfg_attr(not(feature = "master"), allow(unused_variables))]
+    pub fn add_used_function(&self, function: Function<'gcc>) {
+        #[cfg(feature = "master")]
+        function.add_attribute(FnAttribute::Used);
+    }
+
+    pub fn static_addr_of_mut(
+        &self,
+        cv: RValue<'gcc>,
+        align: Align,
+        kind: Option<&str>,
+    ) -> RValue<'gcc> {
+        let global = match kind {
+            Some(kind) if !self.tcx.sess.fewer_names() => {
+                let name = self.generate_local_symbol_name(kind);
+                // TODO(antoyo): check if it's okay that no link_section is set.
+
+                let typ = self.val_ty(cv).get_aligned(align.bytes());
+                let global = self.declare_private_global(&name[..], typ);
+                global
+            }
+            _ => {
+                let typ = self.val_ty(cv).get_aligned(align.bytes());
+                let global = self.declare_unnamed_global(typ);
+                global
+            }
+        };
+        global.global_set_initializer_rvalue(cv);
+        // TODO(antoyo): set unnamed address.
+        let rvalue = global.get_address(None);
+        self.global_lvalues.borrow_mut().insert(rvalue, global);
+        rvalue
+    }
+
+    pub fn get_static(&self, def_id: DefId) -> LValue<'gcc> {
+        let instance = Instance::mono(self.tcx, def_id);
+        let DefKind::Static { nested, .. } = self.tcx.def_kind(def_id) else { bug!() };
+        // Nested statics do not have a type, so pick a random type and let `define_static` figure out
+        // the gcc type from the actual evaluated initializer.
+        let gcc_type = if nested {
+            self.type_i8()
+        } else {
+            let ty = instance.ty(self.tcx, ty::TypingEnv::fully_monomorphized());
+            self.layout_of(ty).gcc_type(self)
+        };
+
+        self.get_static_inner(def_id, gcc_type)
+    }
+
+    pub(crate) fn get_static_inner(&self, def_id: DefId, gcc_type: Type<'gcc>) -> LValue<'gcc> {
+        let instance = Instance::mono(self.tcx, def_id);
+        if let Some(&global) = self.instances.borrow().get(&instance) {
+            trace!("used cached value");
+            return global;
+        }
+
+        // FIXME: Once we stop removing globals in `codegen_static`, we can uncomment this code.
+        // let defined_in_current_codegen_unit =
+        //     self.codegen_unit.items().contains_key(&MonoItem::Static(def_id));
+        // assert!(
+        //     !defined_in_current_codegen_unit,
+        //     "consts::get_static() should always hit the cache for \
+        //          statics defined in the same CGU, but did not for `{:?}`",
+        //     def_id
+        // );
+        let sym = self.tcx.symbol_name(instance).name;
+        let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
+
+        let global = if def_id.is_local() && !self.tcx.is_foreign_item(def_id) {
+            if let Some(global) = self.get_declared_value(sym) {
+                if self.val_ty(global) != self.type_ptr_to(gcc_type) {
+                    span_bug!(self.tcx.def_span(def_id), "Conflicting types for static");
+                }
+            }
+
+            let is_tls = fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
+            let global = self.declare_global(
+                sym,
+                gcc_type,
+                GlobalKind::Imported,
+                is_tls,
+                fn_attrs.link_section,
+            );
+
+            if !self.tcx.is_reachable_non_generic(def_id) {
+                #[cfg(feature = "master")]
+                global.add_attribute(VarAttribute::Visibility(Visibility::Hidden));
+            }
+
+            global
+        } else {
+            check_and_apply_linkage(self, fn_attrs, gcc_type, sym)
+        };
+
+        if !def_id.is_local() {
+            let needs_dll_storage_attr = false; // TODO(antoyo)
+
+            // If this assertion triggers, there's something wrong with commandline
+            // argument validation.
+            debug_assert!(
+                !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled()
+                    && self.tcx.sess.target.options.is_like_msvc
+                    && self.tcx.sess.opts.cg.prefer_dynamic)
+            );
+
+            if needs_dll_storage_attr {
+                // This item is external but not foreign, i.e., it originates from an external Rust
+                // crate. Since we don't know whether this crate will be linked dynamically or
+                // statically in the final application, we always mark such symbols as 'dllimport'.
+                // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs
+                // to make things work.
+                //
+                // However, in some scenarios we defer emission of statics to downstream
+                // crates, so there are cases where a static with an upstream DefId
+                // is actually present in the current crate. We can find out via the
+                // is_codegened_item query.
+                if !self.tcx.is_codegened_item(def_id) {
+                    unimplemented!();
+                }
+            }
+        }
+
+        // TODO(antoyo): set dll storage class.
+
+        self.instances.borrow_mut().insert(instance, global);
+        global
+    }
+}
+
+pub fn const_alloc_to_gcc<'gcc>(
+    cx: &CodegenCx<'gcc, '_>,
+    alloc: ConstAllocation<'_>,
+) -> RValue<'gcc> {
+    let alloc = alloc.inner();
+    let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1);
+    let dl = cx.data_layout();
+    let pointer_size = dl.pointer_size.bytes() as usize;
+
+    let mut next_offset = 0;
+    for &(offset, prov) in alloc.provenance().ptrs().iter() {
+        let alloc_id = prov.alloc_id();
+        let offset = offset.bytes();
+        assert_eq!(offset as usize as u64, offset);
+        let offset = offset as usize;
+        if offset > next_offset {
+            // This `inspect` is okay since we have checked that it is not within a pointer with provenance, it
+            // is within the bounds of the allocation, and it doesn't affect interpreter execution
+            // (we inspect the result after interpreter execution). Any undef byte is replaced with
+            // some arbitrary byte value.
+            //
+            // FIXME: relay undef bytes to codegen as undef const bytes
+            let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(next_offset..offset);
+            llvals.push(cx.const_bytes(bytes));
+        }
+        let ptr_offset = read_target_uint(
+            dl.endian,
+            // This `inspect` is okay since it is within the bounds of the allocation, it doesn't
+            // affect interpreter execution (we inspect the result after interpreter execution),
+            // and we properly interpret the provenance as a relocation pointer offset.
+            alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
+        )
+        .expect("const_alloc_to_llvm: could not read relocation pointer")
+            as u64;
+
+        let address_space = cx.tcx.global_alloc(alloc_id).address_space(cx);
+
+        llvals.push(cx.scalar_to_backend(
+            InterpScalar::from_pointer(
+                interpret::Pointer::new(prov, Size::from_bytes(ptr_offset)),
+                &cx.tcx,
+            ),
+            abi::Scalar::Initialized {
+                value: Primitive::Pointer(address_space),
+                valid_range: WrappingRange::full(dl.pointer_size),
+            },
+            cx.type_i8p_ext(address_space),
+        ));
+        next_offset = offset + pointer_size;
+    }
+    if alloc.len() >= next_offset {
+        let range = next_offset..alloc.len();
+        // This `inspect` is okay since we have check that it is after all provenance, it is
+        // within the bounds of the allocation, and it doesn't affect interpreter execution (we
+        // inspect the result after interpreter execution). Any undef byte is replaced with some
+        // arbitrary byte value.
+        //
+        // FIXME: relay undef bytes to codegen as undef const bytes
+        let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
+        llvals.push(cx.const_bytes(bytes));
+    }
+
+    // FIXME(bjorn3) avoid wrapping in a struct when there is only a single element.
+    cx.const_struct(&llvals, true)
+}
+
+fn codegen_static_initializer<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    def_id: DefId,
+) -> Result<(RValue<'gcc>, ConstAllocation<'tcx>), ErrorHandled> {
+    let alloc = cx.tcx.eval_static_initializer(def_id)?;
+    Ok((const_alloc_to_gcc(cx, alloc), alloc))
+}
+
+fn check_and_apply_linkage<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    attrs: &CodegenFnAttrs,
+    gcc_type: Type<'gcc>,
+    sym: &str,
+) -> LValue<'gcc> {
+    let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
+    if let Some(linkage) = attrs.import_linkage {
+        // Declare a symbol `foo` with the desired linkage.
+        let global1 =
+            cx.declare_global_with_linkage(sym, cx.type_i8(), base::global_linkage_to_gcc(linkage));
+
+        if linkage == Linkage::ExternalWeak {
+            #[cfg(feature = "master")]
+            global1.add_attribute(VarAttribute::Weak);
+        }
+
+        // Declare an internal global `extern_with_linkage_foo` which
+        // is initialized with the address of `foo`.  If `foo` is
+        // discarded during linking (for example, if `foo` has weak
+        // linkage and there are no definitions), then
+        // `extern_with_linkage_foo` will instead be initialized to
+        // zero.
+        let mut real_name = "_rust_extern_with_linkage_".to_string();
+        real_name.push_str(sym);
+        let global2 = cx.define_global(&real_name, gcc_type, is_tls, attrs.link_section);
+        // TODO(antoyo): set linkage.
+        let value = cx.const_ptrcast(global1.get_address(None), gcc_type);
+        global2.global_set_initializer_rvalue(value);
+        global2
+    } else {
+        // Generate an external declaration.
+        // FIXME(nagisa): investigate whether it can be changed into define_global
+
+        // Thread-local statics in some other crate need to *always* be linked
+        // against in a thread-local fashion, so we need to be sure to apply the
+        // thread-local attribute locally if it was present remotely. If we
+        // don't do this then linker errors can be generated where the linker
+        // complains that one object files has a thread local version of the
+        // symbol and another one doesn't.
+        cx.declare_global(sym, gcc_type, GlobalKind::Imported, is_tls, attrs.link_section)
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs
new file mode 100644
index 00000000000..73718994e64
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/context.rs
@@ -0,0 +1,604 @@
+use std::cell::{Cell, RefCell};
+
+use gccjit::{
+    Block, CType, Context, Function, FunctionPtrType, FunctionType, LValue, Location, RValue, Type,
+};
+use rustc_abi::{HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx};
+use rustc_codegen_ssa::base::wants_msvc_seh;
+use rustc_codegen_ssa::errors as ssa_errors;
+use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeCodegenMethods, MiscCodegenMethods};
+use rustc_data_structures::base_n::{ALPHANUMERIC_ONLY, ToBaseN};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_middle::mir::mono::CodegenUnit;
+use rustc_middle::span_bug;
+use rustc_middle::ty::layout::{
+    FnAbiError, FnAbiOf, FnAbiOfHelpers, FnAbiRequest, HasTyCtxt, HasTypingEnv, LayoutError,
+    LayoutOfHelpers,
+};
+use rustc_middle::ty::{self, ExistentialTraitRef, Instance, Ty, TyCtxt};
+use rustc_session::Session;
+use rustc_span::source_map::respan;
+use rustc_span::{DUMMY_SP, Span};
+use rustc_target::spec::{
+    HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, Target, TlsModel, WasmCAbi, X86Abi,
+};
+
+#[cfg(feature = "master")]
+use crate::abi::conv_to_fn_attribute;
+use crate::callee::get_fn;
+use crate::common::SignType;
+
+#[cfg_attr(not(feature = "master"), allow(dead_code))]
+pub struct CodegenCx<'gcc, 'tcx> {
+    pub codegen_unit: &'tcx CodegenUnit<'tcx>,
+    pub context: &'gcc Context<'gcc>,
+
+    // TODO(bjorn3): Can this field be removed?
+    pub current_func: RefCell<Option<Function<'gcc>>>,
+    pub normal_function_addresses: RefCell<FxHashSet<RValue<'gcc>>>,
+    pub function_address_names: RefCell<FxHashMap<RValue<'gcc>, String>>,
+
+    pub functions: RefCell<FxHashMap<String, Function<'gcc>>>,
+    pub intrinsics: RefCell<FxHashMap<String, Function<'gcc>>>,
+
+    pub tls_model: gccjit::TlsModel,
+
+    pub bool_type: Type<'gcc>,
+    pub i8_type: Type<'gcc>,
+    pub i16_type: Type<'gcc>,
+    pub i32_type: Type<'gcc>,
+    pub i64_type: Type<'gcc>,
+    pub i128_type: Type<'gcc>,
+    pub isize_type: Type<'gcc>,
+
+    pub u8_type: Type<'gcc>,
+    pub u16_type: Type<'gcc>,
+    pub u32_type: Type<'gcc>,
+    pub u64_type: Type<'gcc>,
+    pub u128_type: Type<'gcc>,
+    pub usize_type: Type<'gcc>,
+
+    pub char_type: Type<'gcc>,
+    pub uchar_type: Type<'gcc>,
+    pub short_type: Type<'gcc>,
+    pub ushort_type: Type<'gcc>,
+    pub int_type: Type<'gcc>,
+    pub uint_type: Type<'gcc>,
+    pub long_type: Type<'gcc>,
+    pub ulong_type: Type<'gcc>,
+    pub longlong_type: Type<'gcc>,
+    pub ulonglong_type: Type<'gcc>,
+    pub sizet_type: Type<'gcc>,
+
+    pub supports_128bit_integers: bool,
+    pub supports_f16_type: bool,
+    pub supports_f32_type: bool,
+    pub supports_f64_type: bool,
+    pub supports_f128_type: bool,
+
+    pub float_type: Type<'gcc>,
+    pub double_type: Type<'gcc>,
+
+    pub linkage: Cell<FunctionType>,
+    pub scalar_types: RefCell<FxHashMap<Ty<'tcx>, Type<'gcc>>>,
+    pub types: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), Type<'gcc>>>,
+    pub tcx: TyCtxt<'tcx>,
+
+    pub struct_types: RefCell<FxHashMap<Vec<Type<'gcc>>, Type<'gcc>>>,
+
+    /// Cache instances of monomorphic and polymorphic items
+    pub instances: RefCell<FxHashMap<Instance<'tcx>, LValue<'gcc>>>,
+    /// Cache function instances of monomorphic and polymorphic items
+    pub function_instances: RefCell<FxHashMap<Instance<'tcx>, Function<'gcc>>>,
+    /// Cache generated vtables
+    pub vtables:
+        RefCell<FxHashMap<(Ty<'tcx>, Option<ty::ExistentialTraitRef<'tcx>>), RValue<'gcc>>>,
+
+    // TODO(antoyo): improve the SSA API to not require those.
+    /// Mapping from function pointer type to indexes of on stack parameters.
+    pub on_stack_params: RefCell<FxHashMap<FunctionPtrType<'gcc>, FxHashSet<usize>>>,
+    /// Mapping from function to indexes of on stack parameters.
+    pub on_stack_function_params: RefCell<FxHashMap<Function<'gcc>, FxHashSet<usize>>>,
+
+    /// Cache of emitted const globals (value -> global)
+    pub const_globals: RefCell<FxHashMap<RValue<'gcc>, RValue<'gcc>>>,
+
+    /// Map from the address of a global variable (rvalue) to the global variable itself (lvalue).
+    /// TODO(antoyo): remove when the rustc API is fixed.
+    pub global_lvalues: RefCell<FxHashMap<RValue<'gcc>, LValue<'gcc>>>,
+
+    /// Cache of constant strings,
+    pub const_str_cache: RefCell<FxHashMap<String, LValue<'gcc>>>,
+
+    /// Cache of globals.
+    pub globals: RefCell<FxHashMap<String, RValue<'gcc>>>,
+
+    /// A counter that is used for generating local symbol names
+    local_gen_sym_counter: Cell<usize>,
+
+    eh_personality: Cell<Option<RValue<'gcc>>>,
+    #[cfg(feature = "master")]
+    pub rust_try_fn: Cell<Option<(Type<'gcc>, Function<'gcc>)>>,
+
+    pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
+
+    /// NOTE: a hack is used because the rustc API is not suitable to libgccjit and as such,
+    /// `const_undef()` returns struct as pointer so that they can later be assigned a value.
+    /// As such, this set remembers which of these pointers were returned by this function so that
+    /// they can be dereferenced later.
+    /// FIXME(antoyo): fix the rustc API to avoid having this hack.
+    pub structs_as_pointer: RefCell<FxHashSet<RValue<'gcc>>>,
+
+    #[cfg(feature = "master")]
+    pub cleanup_blocks: RefCell<FxHashSet<Block<'gcc>>>,
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+    #[allow(clippy::too_many_arguments)]
+    pub fn new(
+        context: &'gcc Context<'gcc>,
+        codegen_unit: &'tcx CodegenUnit<'tcx>,
+        tcx: TyCtxt<'tcx>,
+        supports_128bit_integers: bool,
+        supports_f16_type: bool,
+        supports_f32_type: bool,
+        supports_f64_type: bool,
+        supports_f128_type: bool,
+    ) -> Self {
+        let create_type = |ctype, rust_type| {
+            let layout = tcx
+                .layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(rust_type))
+                .unwrap();
+            let align = layout.align.abi.bytes();
+            #[cfg(feature = "master")]
+            {
+                context.new_c_type(ctype).get_aligned(align)
+            }
+            #[cfg(not(feature = "master"))]
+            {
+                // Since libgccjit 12 doesn't contain the fix to compare aligned integer types,
+                // only align u128 and i128.
+                if layout.ty.int_size_and_signed(tcx).0.bytes() == 16 {
+                    context.new_c_type(ctype).get_aligned(align)
+                } else {
+                    context.new_c_type(ctype)
+                }
+            }
+        };
+
+        let i8_type = create_type(CType::Int8t, tcx.types.i8);
+        let i16_type = create_type(CType::Int16t, tcx.types.i16);
+        let i32_type = create_type(CType::Int32t, tcx.types.i32);
+        let i64_type = create_type(CType::Int64t, tcx.types.i64);
+        let u8_type = create_type(CType::UInt8t, tcx.types.u8);
+        let u16_type = create_type(CType::UInt16t, tcx.types.u16);
+        let u32_type = create_type(CType::UInt32t, tcx.types.u32);
+        let u64_type = create_type(CType::UInt64t, tcx.types.u64);
+
+        let (i128_type, u128_type) = if supports_128bit_integers {
+            let i128_type = create_type(CType::Int128t, tcx.types.i128);
+            let u128_type = create_type(CType::UInt128t, tcx.types.u128);
+            (i128_type, u128_type)
+        } else {
+            /*let layout = tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.i128)).unwrap();
+            let i128_align = layout.align.abi.bytes();
+            let layout = tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.u128)).unwrap();
+            let u128_align = layout.align.abi.bytes();*/
+
+            // TODO(antoyo): re-enable the alignment when libgccjit fixed the issue in
+            // gcc_jit_context_new_array_constructor (it should not use reinterpret_cast).
+            let i128_type = context.new_array_type(None, i64_type, 2)/*.get_aligned(i128_align)*/;
+            let u128_type = context.new_array_type(None, u64_type, 2)/*.get_aligned(u128_align)*/;
+            (i128_type, u128_type)
+        };
+
+        let tls_model = to_gcc_tls_mode(tcx.sess.tls_model());
+
+        // TODO(antoyo): set alignment on those types as well.
+        let float_type = context.new_type::<f32>();
+        let double_type = context.new_type::<f64>();
+
+        let char_type = context.new_c_type(CType::Char);
+        let uchar_type = context.new_c_type(CType::UChar);
+        let short_type = context.new_c_type(CType::Short);
+        let ushort_type = context.new_c_type(CType::UShort);
+        let int_type = context.new_c_type(CType::Int);
+        let uint_type = context.new_c_type(CType::UInt);
+        let long_type = context.new_c_type(CType::Long);
+        let ulong_type = context.new_c_type(CType::ULong);
+        let longlong_type = context.new_c_type(CType::LongLong);
+        let ulonglong_type = context.new_c_type(CType::ULongLong);
+        let sizet_type = context.new_c_type(CType::SizeT);
+
+        let usize_type = sizet_type;
+        let isize_type = usize_type;
+        let bool_type = context.new_type::<bool>();
+
+        let mut functions = FxHashMap::default();
+        let builtins = ["abort"];
+
+        for builtin in builtins.iter() {
+            functions.insert(builtin.to_string(), context.get_builtin_function(builtin));
+        }
+
+        let mut cx = Self {
+            codegen_unit,
+            context,
+            current_func: RefCell::new(None),
+            normal_function_addresses: Default::default(),
+            function_address_names: Default::default(),
+            functions: RefCell::new(functions),
+            intrinsics: RefCell::new(FxHashMap::default()),
+
+            tls_model,
+
+            bool_type,
+            i8_type,
+            i16_type,
+            i32_type,
+            i64_type,
+            i128_type,
+            isize_type,
+            usize_type,
+            u8_type,
+            u16_type,
+            u32_type,
+            u64_type,
+            u128_type,
+            char_type,
+            uchar_type,
+            short_type,
+            ushort_type,
+            int_type,
+            uint_type,
+            long_type,
+            ulong_type,
+            longlong_type,
+            ulonglong_type,
+            sizet_type,
+
+            supports_128bit_integers,
+            supports_f16_type,
+            supports_f32_type,
+            supports_f64_type,
+            supports_f128_type,
+
+            float_type,
+            double_type,
+
+            linkage: Cell::new(FunctionType::Internal),
+            instances: Default::default(),
+            function_instances: Default::default(),
+            on_stack_params: Default::default(),
+            on_stack_function_params: Default::default(),
+            vtables: Default::default(),
+            const_globals: Default::default(),
+            global_lvalues: Default::default(),
+            const_str_cache: Default::default(),
+            globals: Default::default(),
+            scalar_types: Default::default(),
+            types: Default::default(),
+            tcx,
+            struct_types: Default::default(),
+            local_gen_sym_counter: Cell::new(0),
+            eh_personality: Cell::new(None),
+            #[cfg(feature = "master")]
+            rust_try_fn: Cell::new(None),
+            pointee_infos: Default::default(),
+            structs_as_pointer: Default::default(),
+            #[cfg(feature = "master")]
+            cleanup_blocks: Default::default(),
+        };
+        // TODO(antoyo): instead of doing this, add SsizeT to libgccjit.
+        cx.isize_type = usize_type.to_signed(&cx);
+        cx
+    }
+
+    pub fn rvalue_as_function(&self, value: RValue<'gcc>) -> Function<'gcc> {
+        let function: Function<'gcc> = unsafe { std::mem::transmute(value) };
+        debug_assert!(
+            self.functions.borrow().values().any(|value| *value == function),
+            "{:?} ({:?}) is not a function",
+            value,
+            value.get_type()
+        );
+        function
+    }
+
+    pub fn is_native_int_type(&self, typ: Type<'gcc>) -> bool {
+        let types = [
+            self.u8_type,
+            self.u16_type,
+            self.u32_type,
+            self.u64_type,
+            self.i8_type,
+            self.i16_type,
+            self.i32_type,
+            self.i64_type,
+        ];
+
+        for native_type in types {
+            if native_type.is_compatible_with(typ) {
+                return true;
+            }
+        }
+
+        self.supports_128bit_integers
+            && (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
+    }
+
+    pub fn is_non_native_int_type(&self, typ: Type<'gcc>) -> bool {
+        !self.supports_128bit_integers
+            && (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
+    }
+
+    pub fn is_native_int_type_or_bool(&self, typ: Type<'gcc>) -> bool {
+        self.is_native_int_type(typ) || typ.is_compatible_with(self.bool_type)
+    }
+
+    pub fn is_int_type_or_bool(&self, typ: Type<'gcc>) -> bool {
+        self.is_native_int_type(typ)
+            || self.is_non_native_int_type(typ)
+            || typ.is_compatible_with(self.bool_type)
+    }
+
+    pub fn sess(&self) -> &'tcx Session {
+        self.tcx.sess
+    }
+
+    pub fn bitcast_if_needed(
+        &self,
+        value: RValue<'gcc>,
+        expected_type: Type<'gcc>,
+    ) -> RValue<'gcc> {
+        if value.get_type() != expected_type {
+            self.context.new_bitcast(None, value, expected_type)
+        } else {
+            value
+        }
+    }
+}
+
+impl<'gcc, 'tcx> BackendTypes for CodegenCx<'gcc, 'tcx> {
+    type Value = RValue<'gcc>;
+    type Metadata = RValue<'gcc>;
+    // TODO(antoyo): change to Function<'gcc>.
+    type Function = RValue<'gcc>;
+
+    type BasicBlock = Block<'gcc>;
+    type Type = Type<'gcc>;
+    type Funclet = (); // TODO(antoyo)
+
+    type DIScope = (); // TODO(antoyo)
+    type DILocation = Location<'gcc>;
+    type DIVariable = (); // TODO(antoyo)
+}
+
+impl<'gcc, 'tcx> MiscCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+    fn vtables(
+        &self,
+    ) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<ExistentialTraitRef<'tcx>>), RValue<'gcc>>> {
+        &self.vtables
+    }
+
+    fn get_fn(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
+        let func = get_fn(self, instance);
+        *self.current_func.borrow_mut() = Some(func);
+        // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
+        unsafe { std::mem::transmute(func) }
+    }
+
+    fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
+        let func_name = self.tcx.symbol_name(instance).name;
+
+        let func = if self.intrinsics.borrow().contains_key(func_name) {
+            self.intrinsics.borrow()[func_name]
+        } else if let Some(variable) = self.get_declared_value(func_name) {
+            return variable;
+        } else {
+            get_fn(self, instance)
+        };
+        let ptr = func.get_address(None);
+
+        // TODO(antoyo): don't do this twice: i.e. in declare_fn and here.
+        // FIXME(antoyo): the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI).
+
+        self.normal_function_addresses.borrow_mut().insert(ptr);
+        self.function_address_names.borrow_mut().insert(ptr, func_name.to_string());
+
+        ptr
+    }
+
+    fn eh_personality(&self) -> RValue<'gcc> {
+        // The exception handling personality function.
+        //
+        // If our compilation unit has the `eh_personality` lang item somewhere
+        // within it, then we just need to codegen that. Otherwise, we're
+        // building an rlib which will depend on some upstream implementation of
+        // this function, so we just codegen a generic reference to it. We don't
+        // specify any of the types for the function, we just make it a symbol
+        // that LLVM can later use.
+        //
+        // Note that MSVC is a little special here in that we don't use the
+        // `eh_personality` lang item at all. Currently LLVM has support for
+        // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
+        // *name of the personality function* to decide what kind of unwind side
+        // tables/landing pads to emit. It looks like Dwarf is used by default,
+        // injecting a dependency on the `_Unwind_Resume` symbol for resuming
+        // an "exception", but for MSVC we want to force SEH. This means that we
+        // can't actually have the personality function be our standard
+        // `rust_eh_personality` function, but rather we wired it up to the
+        // CRT's custom personality function, which forces LLVM to consider
+        // landing pads as "landing pads for SEH".
+        if let Some(llpersonality) = self.eh_personality.get() {
+            return llpersonality;
+        }
+        let tcx = self.tcx;
+        let func = match tcx.lang_items().eh_personality() {
+            Some(def_id) if !wants_msvc_seh(self.sess()) => {
+                let instance = ty::Instance::expect_resolve(
+                    tcx,
+                    self.typing_env(),
+                    def_id,
+                    ty::List::empty(),
+                    DUMMY_SP,
+                );
+
+                let symbol_name = tcx.symbol_name(instance).name;
+                let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
+                self.linkage.set(FunctionType::Extern);
+                let func = self.declare_fn(symbol_name, fn_abi);
+                let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };
+                func
+            }
+            _ => {
+                let name = if wants_msvc_seh(self.sess()) {
+                    "__CxxFrameHandler3"
+                } else {
+                    "rust_eh_personality"
+                };
+                let func = self.declare_func(name, self.type_i32(), &[], true);
+                unsafe { std::mem::transmute::<Function<'gcc>, RValue<'gcc>>(func) }
+            }
+        };
+        // TODO(antoyo): apply target cpu attributes.
+        self.eh_personality.set(Some(func));
+        func
+    }
+
+    fn sess(&self) -> &Session {
+        self.tcx.sess
+    }
+
+    fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx> {
+        self.codegen_unit
+    }
+
+    fn set_frame_pointer_type(&self, _llfn: RValue<'gcc>) {
+        // TODO(antoyo)
+    }
+
+    fn apply_target_cpu_attr(&self, _llfn: RValue<'gcc>) {
+        // TODO(antoyo)
+    }
+
+    fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
+        let entry_name = self.sess().target.entry_name.as_ref();
+        if !self.functions.borrow().contains_key(entry_name) {
+            #[cfg(feature = "master")]
+            let conv = conv_to_fn_attribute(self.sess().target.entry_abi, &self.sess().target.arch);
+            #[cfg(not(feature = "master"))]
+            let conv = None;
+            Some(self.declare_entry_fn(entry_name, fn_type, conv))
+        } else {
+            // If the symbol already exists, it is an error: for example, the user wrote
+            // #[no_mangle] extern "C" fn main(..) {..}
+            None
+        }
+    }
+}
+
+impl<'gcc, 'tcx> HasTyCtxt<'tcx> for CodegenCx<'gcc, 'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+}
+
+impl<'gcc, 'tcx> HasDataLayout for CodegenCx<'gcc, 'tcx> {
+    fn data_layout(&self) -> &TargetDataLayout {
+        &self.tcx.data_layout
+    }
+}
+
+impl<'gcc, 'tcx> HasTargetSpec for CodegenCx<'gcc, 'tcx> {
+    fn target_spec(&self) -> &Target {
+        &self.tcx.sess.target
+    }
+}
+
+impl<'gcc, 'tcx> HasWasmCAbiOpt for CodegenCx<'gcc, 'tcx> {
+    fn wasm_c_abi_opt(&self) -> WasmCAbi {
+        self.tcx.sess.opts.unstable_opts.wasm_c_abi
+    }
+}
+
+impl<'gcc, 'tcx> HasX86AbiOpt for CodegenCx<'gcc, 'tcx> {
+    fn x86_abi_opt(&self) -> X86Abi {
+        X86Abi {
+            regparm: self.tcx.sess.opts.unstable_opts.regparm,
+            reg_struct_return: self.tcx.sess.opts.unstable_opts.reg_struct_return,
+        }
+    }
+}
+
+impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> {
+    #[inline]
+    fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+        if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
+            self.tcx.dcx().emit_fatal(respan(span, err.into_diagnostic()))
+        } else {
+            self.tcx.dcx().emit_fatal(ssa_errors::FailedToGetLayout { span, ty, err })
+        }
+    }
+}
+
+impl<'gcc, 'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> {
+    #[inline]
+    fn handle_fn_abi_err(
+        &self,
+        err: FnAbiError<'tcx>,
+        span: Span,
+        fn_abi_request: FnAbiRequest<'tcx>,
+    ) -> ! {
+        if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err {
+            self.tcx.dcx().emit_fatal(respan(span, err))
+        } else {
+            match fn_abi_request {
+                FnAbiRequest::OfFnPtr { sig, extra_args } => {
+                    span_bug!(span, "`fn_abi_of_fn_ptr({sig}, {extra_args:?})` failed: {err:?}");
+                }
+                FnAbiRequest::OfInstance { instance, extra_args } => {
+                    span_bug!(
+                        span,
+                        "`fn_abi_of_instance({instance}, {extra_args:?})` failed: {err:?}"
+                    );
+                }
+            }
+        }
+    }
+}
+
+impl<'tcx, 'gcc> HasTypingEnv<'tcx> for CodegenCx<'gcc, 'tcx> {
+    fn typing_env(&self) -> ty::TypingEnv<'tcx> {
+        ty::TypingEnv::fully_monomorphized()
+    }
+}
+
+impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
+    /// Generates a new symbol name with the given prefix. This symbol name must
+    /// only be used for definitions with `internal` or `private` linkage.
+    pub fn generate_local_symbol_name(&self, prefix: &str) -> String {
+        let idx = self.local_gen_sym_counter.get();
+        self.local_gen_sym_counter.set(idx + 1);
+        // Include a '.' character, so there can be no accidental conflicts with
+        // user defined names
+        let mut name = String::with_capacity(prefix.len() + 6);
+        name.push_str(prefix);
+        name.push('.');
+        // Offset the index by the base so that always at least two characters
+        // are generated. This avoids cases where the suffix is interpreted as
+        // size by the assembler (for m68k: .b, .w, .l).
+        name.push_str(&(idx as u64 + ALPHANUMERIC_ONLY as u64).to_base(ALPHANUMERIC_ONLY));
+        name
+    }
+}
+
+fn to_gcc_tls_mode(tls_model: TlsModel) -> gccjit::TlsModel {
+    match tls_model {
+        TlsModel::GeneralDynamic => gccjit::TlsModel::GlobalDynamic,
+        TlsModel::LocalDynamic => gccjit::TlsModel::LocalDynamic,
+        TlsModel::InitialExec => gccjit::TlsModel::InitialExec,
+        TlsModel::LocalExec => gccjit::TlsModel::LocalExec,
+        TlsModel::Emulated => gccjit::TlsModel::GlobalDynamic,
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/coverageinfo.rs b/compiler/rustc_codegen_gcc/src/coverageinfo.rs
new file mode 100644
index 00000000000..4e44f78f23c
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/coverageinfo.rs
@@ -0,0 +1,11 @@
+use rustc_codegen_ssa::traits::CoverageInfoBuilderMethods;
+use rustc_middle::mir::coverage::CoverageKind;
+use rustc_middle::ty::Instance;
+
+use crate::builder::Builder;
+
+impl<'a, 'gcc, 'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+    fn add_coverage(&mut self, _instance: Instance<'tcx>, _kind: &CoverageKind) {
+        // TODO(antoyo)
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/debuginfo.rs b/compiler/rustc_codegen_gcc/src/debuginfo.rs
new file mode 100644
index 00000000000..55e01687400
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/debuginfo.rs
@@ -0,0 +1,319 @@
+use std::ops::Range;
+use std::sync::Arc;
+
+use gccjit::{Location, RValue};
+use rustc_abi::Size;
+use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext, VariableKind};
+use rustc_codegen_ssa::traits::{DebugInfoBuilderMethods, DebugInfoCodegenMethods};
+use rustc_index::bit_set::DenseBitSet;
+use rustc_index::{Idx, IndexVec};
+use rustc_middle::mir::{self, Body, SourceScope};
+use rustc_middle::ty::{ExistentialTraitRef, Instance, Ty};
+use rustc_session::config::DebugInfo;
+use rustc_span::{BytePos, Pos, SourceFile, SourceFileAndLine, Span, Symbol};
+use rustc_target::callconv::FnAbi;
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+
+pub(super) const UNKNOWN_LINE_NUMBER: u32 = 0;
+pub(super) const UNKNOWN_COLUMN_NUMBER: u32 = 0;
+
+impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> {
+    // FIXME(eddyb) find a common convention for all of the debuginfo-related
+    // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+    fn dbg_var_addr(
+        &mut self,
+        _dbg_var: Self::DIVariable,
+        _dbg_loc: Self::DILocation,
+        _variable_alloca: Self::Value,
+        _direct_offset: Size,
+        _indirect_offsets: &[Size],
+        _fragment: Option<Range<Size>>,
+    ) {
+        // FIXME(tempdragon): Not sure if this is correct, probably wrong but still keep it here.
+        #[cfg(feature = "master")]
+        _variable_alloca.set_location(_dbg_loc);
+    }
+
+    fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
+        // TODO(antoyo): insert reference to gdb debug scripts section global.
+    }
+
+    /// FIXME(tempdragon): Currently, this function is not yet implemented. It seems that the
+    /// debug name and the mangled name should both be included in the LValues.
+    /// Besides, a function to get the rvalue type(m_is_lvalue) should also be included.
+    fn set_var_name(&mut self, _value: RValue<'gcc>, _name: &str) {}
+
+    fn set_dbg_loc(&mut self, dbg_loc: Self::DILocation) {
+        self.location = Some(dbg_loc);
+    }
+
+    fn clear_dbg_loc(&mut self) {
+        self.location = None;
+    }
+
+    fn get_dbg_loc(&self) -> Option<Self::DILocation> {
+        self.location
+    }
+}
+
+/// Generate the `debug_context` in an MIR Body.
+/// # Source of Origin
+/// Copied from `create_scope_map.rs` of rustc_codegen_llvm
+fn compute_mir_scopes<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    instance: Instance<'tcx>,
+    mir: &Body<'tcx>,
+    debug_context: &mut FunctionDebugContext<'tcx, (), Location<'gcc>>,
+) {
+    // Find all scopes with variables defined in them.
+    let variables = if cx.sess().opts.debuginfo == DebugInfo::Full {
+        let mut vars = DenseBitSet::new_empty(mir.source_scopes.len());
+        // FIXME(eddyb) take into account that arguments always have debuginfo,
+        // irrespective of their name (assuming full debuginfo is enabled).
+        // NOTE(eddyb) actually, on second thought, those are always in the
+        // function scope, which always exists.
+        for var_debug_info in &mir.var_debug_info {
+            vars.insert(var_debug_info.source_info.scope);
+        }
+        Some(vars)
+    } else {
+        // Nothing to emit, of course.
+        None
+    };
+    let mut instantiated = DenseBitSet::new_empty(mir.source_scopes.len());
+    // Instantiate all scopes.
+    for idx in 0..mir.source_scopes.len() {
+        let scope = SourceScope::new(idx);
+        make_mir_scope(cx, instance, mir, &variables, debug_context, &mut instantiated, scope);
+    }
+    assert!(instantiated.count() == mir.source_scopes.len());
+}
+
+/// Update the `debug_context`, adding new scope to it,
+/// if it's not added as is denoted in `instantiated`.
+///
+/// # Source of Origin
+/// Copied from `create_scope_map.rs` of rustc_codegen_llvm
+/// FIXME(tempdragon/?): Add Scope Support Here.
+fn make_mir_scope<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    _instance: Instance<'tcx>,
+    mir: &Body<'tcx>,
+    variables: &Option<DenseBitSet<SourceScope>>,
+    debug_context: &mut FunctionDebugContext<'tcx, (), Location<'gcc>>,
+    instantiated: &mut DenseBitSet<SourceScope>,
+    scope: SourceScope,
+) {
+    if instantiated.contains(scope) {
+        return;
+    }
+
+    let scope_data = &mir.source_scopes[scope];
+    let parent_scope = if let Some(parent) = scope_data.parent_scope {
+        make_mir_scope(cx, _instance, mir, variables, debug_context, instantiated, parent);
+        debug_context.scopes[parent]
+    } else {
+        // The root is the function itself.
+        let file = cx.sess().source_map().lookup_source_file(mir.span.lo());
+        debug_context.scopes[scope] = DebugScope {
+            file_start_pos: file.start_pos,
+            file_end_pos: file.end_position(),
+            ..debug_context.scopes[scope]
+        };
+        instantiated.insert(scope);
+        return;
+    };
+
+    if let Some(ref vars) = *variables {
+        if !vars.contains(scope) && scope_data.inlined.is_none() {
+            // Do not create a DIScope if there are no variables defined in this
+            // MIR `SourceScope`, and it's not `inlined`, to avoid debuginfo bloat.
+            debug_context.scopes[scope] = parent_scope;
+            instantiated.insert(scope);
+            return;
+        }
+    }
+
+    let loc = cx.lookup_debug_loc(scope_data.span.lo());
+
+    // FIXME(tempdragon): Add the scope related code here if the scope is supported.
+    let dbg_scope = ();
+
+    let inlined_at = scope_data.inlined.map(|(_, callsite_span)| {
+        // FIXME(eddyb) this doesn't account for the macro-related
+        // `Span` fixups that `rustc_codegen_ssa::mir::debuginfo` does.
+
+        // TODO(tempdragon): Add scope support and then revert to cg_llvm version of this closure
+        // NOTE: These variables passed () here.
+        // Changed to comply to clippy.
+
+        /* let callsite_scope =  */
+        parent_scope.adjust_dbg_scope_for_span(cx, callsite_span);
+        cx.dbg_loc(/* callsite_scope */ (), parent_scope.inlined_at, callsite_span)
+    });
+    let p_inlined_at = parent_scope.inlined_at;
+    // TODO(tempdragon): dbg_scope: Add support for scope extension here.
+    inlined_at.or(p_inlined_at);
+
+    debug_context.scopes[scope] = DebugScope {
+        dbg_scope,
+        inlined_at,
+        file_start_pos: loc.file.start_pos,
+        file_end_pos: loc.file.end_position(),
+    };
+    instantiated.insert(scope);
+}
+
+/// A source code location used to generate debug information.
+// FIXME(eddyb) rename this to better indicate it's a duplicate of
+// `rustc_span::Loc` rather than `DILocation`, perhaps by making
+// `lookup_char_pos` return the right information instead.
+pub struct DebugLoc {
+    /// Information about the original source file.
+    pub file: Arc<SourceFile>,
+    /// The (1-based) line number.
+    pub line: u32,
+    /// The (1-based) column number.
+    pub col: u32,
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+    /// Looks up debug source information about a `BytePos`.
+    // FIXME(eddyb) rename this to better indicate it's a duplicate of
+    // `lookup_char_pos` rather than `dbg_loc`, perhaps by making
+    // `lookup_char_pos` return the right information instead.
+    // Source of Origin: cg_llvm
+    pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc {
+        let (file, line, col) = match self.sess().source_map().lookup_line(pos) {
+            Ok(SourceFileAndLine { sf: file, line }) => {
+                let line_pos = file.lines()[line];
+
+                // Use 1-based indexing.
+                let line = (line + 1) as u32;
+                let col = (file.relative_position(pos) - line_pos).to_u32() + 1;
+
+                (file, line, col)
+            }
+            Err(file) => (file, UNKNOWN_LINE_NUMBER, UNKNOWN_COLUMN_NUMBER),
+        };
+
+        // For MSVC, omit the column number.
+        // Otherwise, emit it. This mimics clang behaviour.
+        // See discussion in https://github.com/rust-lang/rust/issues/42921
+        if self.sess().target.is_like_msvc {
+            DebugLoc { file, line, col: UNKNOWN_COLUMN_NUMBER }
+        } else {
+            DebugLoc { file, line, col }
+        }
+    }
+}
+
+impl<'gcc, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+    fn create_vtable_debuginfo(
+        &self,
+        _ty: Ty<'tcx>,
+        _trait_ref: Option<ExistentialTraitRef<'tcx>>,
+        _vtable: Self::Value,
+    ) {
+        // TODO(antoyo)
+    }
+
+    fn create_function_debug_context(
+        &self,
+        instance: Instance<'tcx>,
+        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+        llfn: RValue<'gcc>,
+        mir: &mir::Body<'tcx>,
+    ) -> Option<FunctionDebugContext<'tcx, Self::DIScope, Self::DILocation>> {
+        if self.sess().opts.debuginfo == DebugInfo::None {
+            return None;
+        }
+
+        // Initialize fn debug context (including scopes).
+        let empty_scope = DebugScope {
+            dbg_scope: self.dbg_scope_fn(instance, fn_abi, Some(llfn)),
+            inlined_at: None,
+            file_start_pos: BytePos(0),
+            file_end_pos: BytePos(0),
+        };
+        let mut fn_debug_context = FunctionDebugContext {
+            scopes: IndexVec::from_elem(empty_scope, mir.source_scopes.as_slice()),
+            inlined_function_scopes: Default::default(),
+        };
+
+        // Fill in all the scopes, with the information from the MIR body.
+        compute_mir_scopes(self, instance, mir, &mut fn_debug_context);
+
+        Some(fn_debug_context)
+    }
+
+    fn extend_scope_to_file(
+        &self,
+        _scope_metadata: Self::DIScope,
+        _file: &SourceFile,
+    ) -> Self::DIScope {
+        // TODO(antoyo): implement.
+    }
+
+    fn debuginfo_finalize(&self) {
+        self.context.set_debug_info(true)
+    }
+
+    fn create_dbg_var(
+        &self,
+        _variable_name: Symbol,
+        _variable_type: Ty<'tcx>,
+        _scope_metadata: Self::DIScope,
+        _variable_kind: VariableKind,
+        _span: Span,
+    ) -> Self::DIVariable {
+    }
+
+    fn dbg_scope_fn(
+        &self,
+        _instance: Instance<'tcx>,
+        _fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+        _maybe_definition_llfn: Option<RValue<'gcc>>,
+    ) -> Self::DIScope {
+        // TODO(antoyo): implement.
+    }
+
+    fn dbg_loc(
+        &self,
+        _scope: Self::DIScope,
+        _inlined_at: Option<Self::DILocation>,
+        span: Span,
+    ) -> Self::DILocation {
+        let pos = span.lo();
+        let DebugLoc { file, line, col } = self.lookup_debug_loc(pos);
+        let loc = match file.name {
+            rustc_span::FileName::Real(ref name) => match *name {
+                rustc_span::RealFileName::LocalPath(ref name) => {
+                    if let Some(name) = name.to_str() {
+                        self.context.new_location(name, line as i32, col as i32)
+                    } else {
+                        Location::null()
+                    }
+                }
+                rustc_span::RealFileName::Remapped {
+                    ref local_path,
+                    virtual_name: ref _unused,
+                } => {
+                    if let Some(name) = local_path.as_ref() {
+                        if let Some(name) = name.to_str() {
+                            self.context.new_location(name, line as i32, col as i32)
+                        } else {
+                            Location::null()
+                        }
+                    } else {
+                        Location::null()
+                    }
+                }
+            },
+            _ => Location::null(),
+        };
+        loc
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/declare.rs b/compiler/rustc_codegen_gcc/src/declare.rs
new file mode 100644
index 00000000000..c1ca3eb849e
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/declare.rs
@@ -0,0 +1,281 @@
+#[cfg(feature = "master")]
+use gccjit::{FnAttribute, ToRValue};
+use gccjit::{Function, FunctionType, GlobalKind, LValue, RValue, Type};
+use rustc_codegen_ssa::traits::BaseTypeCodegenMethods;
+use rustc_middle::ty::Ty;
+use rustc_span::Symbol;
+use rustc_target::callconv::FnAbi;
+
+use crate::abi::{FnAbiGcc, FnAbiGccExt};
+use crate::context::CodegenCx;
+use crate::intrinsic::llvm;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+    pub fn get_or_insert_global(
+        &self,
+        name: &str,
+        ty: Type<'gcc>,
+        is_tls: bool,
+        link_section: Option<Symbol>,
+    ) -> LValue<'gcc> {
+        if self.globals.borrow().contains_key(name) {
+            let typ = self.globals.borrow()[name].get_type();
+            let global = self.context.new_global(None, GlobalKind::Imported, typ, name);
+            if is_tls {
+                global.set_tls_model(self.tls_model);
+            }
+            if let Some(link_section) = link_section {
+                global.set_link_section(link_section.as_str());
+            }
+            global
+        } else {
+            self.declare_global(name, ty, GlobalKind::Exported, is_tls, link_section)
+        }
+    }
+
+    pub fn declare_unnamed_global(&self, ty: Type<'gcc>) -> LValue<'gcc> {
+        let name = self.generate_local_symbol_name("global");
+        self.context.new_global(None, GlobalKind::Internal, ty, name)
+    }
+
+    pub fn declare_global_with_linkage(
+        &self,
+        name: &str,
+        ty: Type<'gcc>,
+        linkage: GlobalKind,
+    ) -> LValue<'gcc> {
+        let global = self.context.new_global(None, linkage, ty, name);
+        let global_address = global.get_address(None);
+        self.globals.borrow_mut().insert(name.to_string(), global_address);
+        global
+    }
+
+    pub fn declare_func(
+        &self,
+        name: &str,
+        return_type: Type<'gcc>,
+        params: &[Type<'gcc>],
+        variadic: bool,
+    ) -> Function<'gcc> {
+        self.linkage.set(FunctionType::Extern);
+        declare_raw_fn(self, name, None, return_type, params, variadic)
+    }
+
+    pub fn declare_global(
+        &self,
+        name: &str,
+        ty: Type<'gcc>,
+        global_kind: GlobalKind,
+        is_tls: bool,
+        link_section: Option<Symbol>,
+    ) -> LValue<'gcc> {
+        let global = self.context.new_global(None, global_kind, ty, name);
+        if is_tls {
+            global.set_tls_model(self.tls_model);
+        }
+        if let Some(link_section) = link_section {
+            global.set_link_section(link_section.as_str());
+        }
+        let global_address = global.get_address(None);
+        self.globals.borrow_mut().insert(name.to_string(), global_address);
+        global
+    }
+
+    pub fn declare_private_global(&self, name: &str, ty: Type<'gcc>) -> LValue<'gcc> {
+        let global = self.context.new_global(None, GlobalKind::Internal, ty, name);
+        let global_address = global.get_address(None);
+        self.globals.borrow_mut().insert(name.to_string(), global_address);
+        global
+    }
+
+    pub fn declare_entry_fn(
+        &self,
+        name: &str,
+        _fn_type: Type<'gcc>,
+        #[cfg(feature = "master")] callconv: Option<FnAttribute<'gcc>>,
+        #[cfg(not(feature = "master"))] callconv: Option<()>,
+    ) -> RValue<'gcc> {
+        // TODO(antoyo): use the fn_type parameter.
+        let const_string = self.context.new_type::<u8>().make_pointer().make_pointer();
+        let return_type = self.type_i32();
+        let variadic = false;
+        self.linkage.set(FunctionType::Exported);
+        let func = declare_raw_fn(
+            self,
+            name,
+            callconv,
+            return_type,
+            &[self.type_i32(), const_string],
+            variadic,
+        );
+        // NOTE: it is needed to set the current_func here as well, because get_fn() is not called
+        // for the main function.
+        *self.current_func.borrow_mut() = Some(func);
+        // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
+        unsafe { std::mem::transmute(func) }
+    }
+
+    pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Function<'gcc> {
+        let FnAbiGcc {
+            return_type,
+            arguments_type,
+            is_c_variadic,
+            on_stack_param_indices,
+            #[cfg(feature = "master")]
+            fn_attributes,
+        } = fn_abi.gcc_type(self);
+        #[cfg(feature = "master")]
+        let conv = fn_abi.gcc_cconv(self);
+        #[cfg(not(feature = "master"))]
+        let conv = None;
+        let func = declare_raw_fn(self, name, conv, return_type, &arguments_type, is_c_variadic);
+        self.on_stack_function_params.borrow_mut().insert(func, on_stack_param_indices);
+        #[cfg(feature = "master")]
+        for fn_attr in fn_attributes {
+            func.add_attribute(fn_attr);
+        }
+        func
+    }
+
+    pub fn define_global(
+        &self,
+        name: &str,
+        ty: Type<'gcc>,
+        is_tls: bool,
+        link_section: Option<Symbol>,
+    ) -> LValue<'gcc> {
+        self.get_or_insert_global(name, ty, is_tls, link_section)
+    }
+
+    pub fn get_declared_value(&self, name: &str) -> Option<RValue<'gcc>> {
+        // TODO(antoyo): use a different field than globals, because this seems to return a function?
+        self.globals.borrow().get(name).cloned()
+    }
+}
+
+/// Declare a function.
+///
+/// If there’s a value with the same name already declared, the function will
+/// update the declaration and return existing Value instead.
+fn declare_raw_fn<'gcc>(
+    cx: &CodegenCx<'gcc, '_>,
+    name: &str,
+    #[cfg(feature = "master")] callconv: Option<FnAttribute<'gcc>>,
+    #[cfg(not(feature = "master"))] _callconv: Option<()>,
+    return_type: Type<'gcc>,
+    param_types: &[Type<'gcc>],
+    variadic: bool,
+) -> Function<'gcc> {
+    if name.starts_with("llvm.") {
+        let intrinsic = match name {
+            "llvm.fma.f16" => {
+                // fma is not a target builtin, but a normal builtin, so we handle it differently
+                // here.
+                cx.context.get_builtin_function("fma")
+            }
+            _ => llvm::intrinsic(name, cx),
+        };
+
+        cx.intrinsics.borrow_mut().insert(name.to_string(), intrinsic);
+        return intrinsic;
+    }
+    let func = if cx.functions.borrow().contains_key(name) {
+        cx.functions.borrow()[name]
+    } else {
+        let params: Vec<_> = param_types
+            .iter()
+            .enumerate()
+            .map(|(index, param)| cx.context.new_parameter(None, *param, format!("param{}", index))) // TODO(antoyo): set name.
+            .collect();
+        #[cfg(not(feature = "master"))]
+        let name = &mangle_name(name);
+        let func =
+            cx.context.new_function(None, cx.linkage.get(), return_type, &params, name, variadic);
+        #[cfg(feature = "master")]
+        if let Some(attribute) = callconv {
+            func.add_attribute(attribute);
+        }
+        cx.functions.borrow_mut().insert(name.to_string(), func);
+
+        #[cfg(feature = "master")]
+        if name == "rust_eh_personality" {
+            // NOTE: GCC will sometimes change the personality function set on a function from
+            // rust_eh_personality to __gcc_personality_v0 as an optimization.
+            // As such, we need to create a weak alias from __gcc_personality_v0 to
+            // rust_eh_personality in order to avoid a linker error.
+            // This needs to be weak in order to still allow using the standard
+            // __gcc_personality_v0 when the linking to it.
+            // Since aliases don't work (maybe because of a bug in LTO partitioning?), we
+            // create a wrapper function that calls rust_eh_personality.
+
+            let params: Vec<_> = param_types
+                .iter()
+                .enumerate()
+                .map(|(index, param)| {
+                    cx.context.new_parameter(None, *param, format!("param{}", index))
+                }) // TODO(antoyo): set name.
+                .collect();
+            let gcc_func = cx.context.new_function(
+                None,
+                FunctionType::Exported,
+                return_type,
+                &params,
+                "__gcc_personality_v0",
+                variadic,
+            );
+
+            // We need a normal extern function for the crates that access rust_eh_personality
+            // without defining it, otherwise we'll get a compiler error.
+            //
+            // For the crate defining it, that needs to be a weak alias instead.
+            gcc_func.add_attribute(FnAttribute::Weak);
+
+            let block = gcc_func.new_block("start");
+            let mut args = vec![];
+            for param in &params {
+                args.push(param.to_rvalue());
+            }
+            let call = cx.context.new_call(None, func, &args);
+            if return_type == cx.type_void() {
+                block.add_eval(None, call);
+                block.end_with_void_return(None);
+            } else {
+                block.end_with_return(None, call);
+            }
+        }
+
+        func
+    };
+
+    // TODO(antoyo): set function calling convention.
+    // TODO(antoyo): set unnamed address.
+    // TODO(antoyo): set no red zone function attribute.
+    // TODO(antoyo): set attributes for optimisation.
+    // TODO(antoyo): set attributes for non lazy bind.
+
+    // FIXME(antoyo): invalid cast.
+    func
+}
+
+// FIXME(antoyo): this is a hack because libgccjit currently only supports alpha, num and _.
+// Unsupported characters: `$`, `.` and `*`.
+// FIXME(antoyo): `*` might not be expected: https://github.com/rust-lang/rust/issues/116979#issuecomment-1840926865
+#[cfg(not(feature = "master"))]
+fn mangle_name(name: &str) -> String {
+    name.replace(
+        |char: char| {
+            if !char.is_alphanumeric() && char != '_' {
+                debug_assert!(
+                    "$.*".contains(char),
+                    "Unsupported char in function name {}: {}",
+                    name,
+                    char
+                );
+                true
+            } else {
+                false
+            }
+        },
+        "_",
+    )
+}
diff --git a/compiler/rustc_codegen_gcc/src/errors.rs b/compiler/rustc_codegen_gcc/src/errors.rs
new file mode 100644
index 00000000000..1b59b9ac169
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/errors.rs
@@ -0,0 +1,79 @@
+use rustc_macros::{Diagnostic, Subdiagnostic};
+use rustc_span::Span;
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_unknown_ctarget_feature_prefix)]
+#[note]
+pub(crate) struct UnknownCTargetFeaturePrefix<'a> {
+    pub feature: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_unknown_ctarget_feature)]
+#[note]
+pub(crate) struct UnknownCTargetFeature<'a> {
+    pub feature: &'a str,
+    #[subdiagnostic]
+    pub rust_feature: PossibleFeature<'a>,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_unstable_ctarget_feature)]
+#[note]
+pub(crate) struct UnstableCTargetFeature<'a> {
+    pub feature: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_forbidden_ctarget_feature)]
+pub(crate) struct ForbiddenCTargetFeature<'a> {
+    pub feature: &'a str,
+    pub enabled: &'a str,
+    pub reason: &'a str,
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum PossibleFeature<'a> {
+    #[help(codegen_gcc_possible_feature)]
+    Some { rust_feature: &'a str },
+    #[help(codegen_gcc_consider_filing_feature_request)]
+    None,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_unwinding_inline_asm)]
+pub(crate) struct UnwindingInlineAsm {
+    #[primary_span]
+    pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_minimum_alignment)]
+pub(crate) struct InvalidMinimumAlignment {
+    pub err: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_copy_bitcode)]
+pub(crate) struct CopyBitcode {
+    pub err: std::io::Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_dynamic_linking_with_lto)]
+#[note]
+pub(crate) struct DynamicLinkingWithLTO;
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_lto_disallowed)]
+pub(crate) struct LtoDisallowed;
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_lto_dylib)]
+pub(crate) struct LtoDylib;
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_lto_bitcode_from_rlib)]
+pub(crate) struct LtoBitcodeFromRlib {
+    pub gcc_err: String,
+}
diff --git a/compiler/rustc_codegen_gcc/src/gcc_util.rs b/compiler/rustc_codegen_gcc/src/gcc_util.rs
new file mode 100644
index 00000000000..202764d5649
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/gcc_util.rs
@@ -0,0 +1,223 @@
+#[cfg(feature = "master")]
+use gccjit::Context;
+use rustc_codegen_ssa::codegen_attrs::check_tied_features;
+use rustc_codegen_ssa::errors::TargetFeatureDisableOrEnable;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::unord::UnordSet;
+use rustc_session::Session;
+use rustc_target::target_features::RUSTC_SPECIFIC_FEATURES;
+use smallvec::{SmallVec, smallvec};
+
+use crate::errors::{
+    ForbiddenCTargetFeature, PossibleFeature, UnknownCTargetFeature, UnknownCTargetFeaturePrefix,
+    UnstableCTargetFeature,
+};
+
+/// The list of GCC features computed from CLI flags (`-Ctarget-cpu`, `-Ctarget-feature`,
+/// `--target` and similar).
+pub(crate) fn global_gcc_features(sess: &Session, diagnostics: bool) -> Vec<String> {
+    // Features that come earlier are overridden by conflicting features later in the string.
+    // Typically we'll want more explicit settings to override the implicit ones, so:
+    //
+    // * Features from -Ctarget-cpu=*; are overridden by [^1]
+    // * Features implied by --target; are overridden by
+    // * Features from -Ctarget-feature; are overridden by
+    // * function specific features.
+    //
+    // [^1]: target-cpu=native is handled here, other target-cpu values are handled implicitly
+    // through GCC march implementation.
+    //
+    // FIXME(nagisa): it isn't clear what's the best interaction between features implied by
+    // `-Ctarget-cpu` and `--target` are. On one hand, you'd expect CLI arguments to always
+    // override anything that's implicit, so e.g. when there's no `--target` flag, features implied
+    // the host target are overridden by `-Ctarget-cpu=*`. On the other hand, what about when both
+    // `--target` and `-Ctarget-cpu=*` are specified? Both then imply some target features and both
+    // flags are specified by the user on the CLI. It isn't as clear-cut which order of precedence
+    // should be taken in cases like these.
+    let mut features = vec![];
+
+    // Features implied by an implicit or explicit `--target`.
+    features.extend(sess.target.features.split(',').filter(|v| !v.is_empty()).map(String::from));
+
+    // -Ctarget-features
+    let known_features = sess.target.rust_target_features();
+    let mut featsmap = FxHashMap::default();
+
+    // Compute implied features
+    let mut all_rust_features = vec![];
+    for feature in sess.opts.cg.target_feature.split(',') {
+        if let Some(feature) = feature.strip_prefix('+') {
+            all_rust_features.extend(
+                UnordSet::from(sess.target.implied_target_features(feature))
+                    .to_sorted_stable_ord()
+                    .iter()
+                    .map(|&&s| (true, s)),
+            )
+        } else if let Some(feature) = feature.strip_prefix('-') {
+            // FIXME: Why do we not remove implied features on "-" here?
+            // We do the equivalent above in `target_features_cfg`.
+            // See <https://github.com/rust-lang/rust/issues/134792>.
+            all_rust_features.push((false, feature));
+        } else if !feature.is_empty() && diagnostics {
+            sess.dcx().emit_warn(UnknownCTargetFeaturePrefix { feature });
+        }
+    }
+    // Remove features that are meant for rustc, not codegen.
+    all_rust_features.retain(|&(_, feature)| {
+        // Retain if it is not a rustc feature
+        !RUSTC_SPECIFIC_FEATURES.contains(&feature)
+    });
+
+    // Check feature validity.
+    if diagnostics {
+        for &(enable, feature) in &all_rust_features {
+            let feature_state = known_features.iter().find(|&&(v, _, _)| v == feature);
+            match feature_state {
+                None => {
+                    let rust_feature = known_features.iter().find_map(|&(rust_feature, _, _)| {
+                        let gcc_features = to_gcc_features(sess, rust_feature);
+                        if gcc_features.contains(&feature) && !gcc_features.contains(&rust_feature)
+                        {
+                            Some(rust_feature)
+                        } else {
+                            None
+                        }
+                    });
+                    let unknown_feature = if let Some(rust_feature) = rust_feature {
+                        UnknownCTargetFeature {
+                            feature,
+                            rust_feature: PossibleFeature::Some { rust_feature },
+                        }
+                    } else {
+                        UnknownCTargetFeature { feature, rust_feature: PossibleFeature::None }
+                    };
+                    sess.dcx().emit_warn(unknown_feature);
+                }
+                Some(&(_, stability, _)) => {
+                    if let Err(reason) = stability.toggle_allowed() {
+                        sess.dcx().emit_warn(ForbiddenCTargetFeature {
+                            feature,
+                            enabled: if enable { "enabled" } else { "disabled" },
+                            reason,
+                        });
+                    } else if stability.requires_nightly().is_some() {
+                        // An unstable feature. Warn about using it. (It makes little sense
+                        // to hard-error here since we just warn about fully unknown
+                        // features above).
+                        sess.dcx().emit_warn(UnstableCTargetFeature { feature });
+                    }
+                }
+            }
+
+            // FIXME(nagisa): figure out how to not allocate a full hashset here.
+            featsmap.insert(feature, enable);
+        }
+    }
+
+    // Translate this into GCC features.
+    let feats =
+        all_rust_features.iter().flat_map(|&(enable, feature)| {
+            let enable_disable = if enable { '+' } else { '-' };
+            // We run through `to_gcc_features` when
+            // passing requests down to GCC. This means that all in-language
+            // features also work on the command line instead of having two
+            // different names when the GCC name and the Rust name differ.
+            to_gcc_features(sess, feature)
+                .iter()
+                .flat_map(|feat| to_gcc_features(sess, feat).into_iter())
+                .map(|feature| {
+                    if enable_disable == '-' {
+                        format!("-{}", feature)
+                    } else {
+                        feature.to_string()
+                    }
+                })
+                .collect::<Vec<_>>()
+        });
+    features.extend(feats);
+
+    if diagnostics {
+        if let Some(f) = check_tied_features(sess, &featsmap) {
+            sess.dcx().emit_err(TargetFeatureDisableOrEnable {
+                features: f,
+                span: None,
+                missing_features: None,
+            });
+        }
+    }
+
+    features
+}
+
+// To find a list of GCC's names, check https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
+pub fn to_gcc_features<'a>(sess: &Session, s: &'a str) -> SmallVec<[&'a str; 2]> {
+    let arch = if sess.target.arch == "x86_64" { "x86" } else { &*sess.target.arch };
+    match (arch, s) {
+        // FIXME: seems like x87 does not exist?
+        ("x86", "x87") => smallvec![],
+        ("x86", "sse4.2") => smallvec!["sse4.2", "crc32"],
+        ("x86", "pclmulqdq") => smallvec!["pclmul"],
+        ("x86", "rdrand") => smallvec!["rdrnd"],
+        ("x86", "bmi1") => smallvec!["bmi"],
+        ("x86", "cmpxchg16b") => smallvec!["cx16"],
+        ("x86", "avx512vaes") => smallvec!["vaes"],
+        ("x86", "avx512gfni") => smallvec!["gfni"],
+        ("x86", "avx512vpclmulqdq") => smallvec!["vpclmulqdq"],
+        // NOTE: seems like GCC requires 'avx512bw' for 'avx512vbmi2'.
+        ("x86", "avx512vbmi2") => smallvec!["avx512vbmi2", "avx512bw"],
+        // NOTE: seems like GCC requires 'avx512bw' for 'avx512bitalg'.
+        ("x86", "avx512bitalg") => smallvec!["avx512bitalg", "avx512bw"],
+        ("aarch64", "rcpc2") => smallvec!["rcpc-immo"],
+        ("aarch64", "dpb") => smallvec!["ccpp"],
+        ("aarch64", "dpb2") => smallvec!["ccdp"],
+        ("aarch64", "frintts") => smallvec!["fptoint"],
+        ("aarch64", "fcma") => smallvec!["complxnum"],
+        ("aarch64", "pmuv3") => smallvec!["perfmon"],
+        ("aarch64", "paca") => smallvec!["pauth"],
+        ("aarch64", "pacg") => smallvec!["pauth"],
+        // Rust ties fp and neon together. In GCC neon implicitly enables fp,
+        // but we manually enable neon when a feature only implicitly enables fp
+        ("aarch64", "f32mm") => smallvec!["f32mm", "neon"],
+        ("aarch64", "f64mm") => smallvec!["f64mm", "neon"],
+        ("aarch64", "fhm") => smallvec!["fp16fml", "neon"],
+        ("aarch64", "fp16") => smallvec!["fullfp16", "neon"],
+        ("aarch64", "jsconv") => smallvec!["jsconv", "neon"],
+        ("aarch64", "sve") => smallvec!["sve", "neon"],
+        ("aarch64", "sve2") => smallvec!["sve2", "neon"],
+        ("aarch64", "sve2-aes") => smallvec!["sve2-aes", "neon"],
+        ("aarch64", "sve2-sm4") => smallvec!["sve2-sm4", "neon"],
+        ("aarch64", "sve2-sha3") => smallvec!["sve2-sha3", "neon"],
+        ("aarch64", "sve2-bitperm") => smallvec!["sve2-bitperm", "neon"],
+        (_, s) => smallvec![s],
+    }
+}
+
+fn arch_to_gcc(name: &str) -> &str {
+    match name {
+        "M68000" => "68000",
+        "M68020" => "68020",
+        _ => name,
+    }
+}
+
+fn handle_native(name: &str) -> &str {
+    if name != "native" {
+        return arch_to_gcc(name);
+    }
+
+    #[cfg(feature = "master")]
+    {
+        // Get the native arch.
+        let context = Context::default();
+        context.get_target_info().arch().unwrap().to_str().unwrap()
+    }
+    #[cfg(not(feature = "master"))]
+    unimplemented!();
+}
+
+pub fn target_cpu(sess: &Session) -> &str {
+    match sess.opts.cg.target_cpu {
+        Some(ref name) => handle_native(name),
+        None => handle_native(sess.target.cpu.as_ref()),
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/int.rs b/compiler/rustc_codegen_gcc/src/int.rs
new file mode 100644
index 00000000000..906d7eaceb6
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/int.rs
@@ -0,0 +1,1038 @@
+//! Module to handle integer operations.
+//! This module exists because some integer types are not supported on some gcc platforms, e.g.
+//! 128-bit integers on 32-bit platforms and thus require to be handled manually.
+
+use gccjit::{BinaryOp, ComparisonOp, FunctionType, Location, RValue, ToRValue, Type, UnaryOp};
+use rustc_abi::{Endian, ExternAbi};
+use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
+use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeCodegenMethods, BuilderMethods, OverflowOp};
+use rustc_middle::ty::{self, Ty};
+use rustc_target::callconv::{ArgAbi, ArgAttributes, Conv, FnAbi, PassMode};
+
+use crate::builder::{Builder, ToGccComp};
+use crate::common::{SignType, TypeReflection};
+use crate::context::CodegenCx;
+
+impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+    pub fn gcc_urem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        // 128-bit unsigned %: __umodti3
+        self.multiplicative_operation(BinaryOp::Modulo, "mod", false, a, b)
+    }
+
+    pub fn gcc_srem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        // 128-bit signed %:   __modti3
+        self.multiplicative_operation(BinaryOp::Modulo, "mod", true, a, b)
+    }
+
+    pub fn gcc_not(&self, a: RValue<'gcc>) -> RValue<'gcc> {
+        let typ = a.get_type();
+        if self.is_native_int_type_or_bool(typ) {
+            let operation =
+                if typ.is_bool() { UnaryOp::LogicalNegate } else { UnaryOp::BitwiseNegate };
+            self.cx.context.new_unary_op(self.location, operation, typ, a)
+        } else {
+            let element_type = typ.dyncast_array().expect("element type");
+            self.concat_low_high_rvalues(
+                typ,
+                self.cx.context.new_unary_op(
+                    self.location,
+                    UnaryOp::BitwiseNegate,
+                    element_type,
+                    self.low(a),
+                ),
+                self.cx.context.new_unary_op(
+                    self.location,
+                    UnaryOp::BitwiseNegate,
+                    element_type,
+                    self.high(a),
+                ),
+            )
+        }
+    }
+
+    pub fn gcc_neg(&self, a: RValue<'gcc>) -> RValue<'gcc> {
+        let a_type = a.get_type();
+        if self.is_native_int_type(a_type) || a_type.is_vector() {
+            self.cx.context.new_unary_op(self.location, UnaryOp::Minus, a.get_type(), a)
+        } else {
+            self.gcc_add(self.gcc_not(a), self.gcc_int(a_type, 1))
+        }
+    }
+
+    pub fn gcc_and(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        self.cx.bitwise_operation(BinaryOp::BitwiseAnd, a, b, self.location)
+    }
+
+    pub fn gcc_lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        let a_type = a.get_type();
+        let b_type = b.get_type();
+        let a_native = self.is_native_int_type(a_type);
+        let b_native = self.is_native_int_type(b_type);
+        if a_native && b_native {
+            // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by a signed number.
+            // TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
+            if a_type.is_signed(self) != b_type.is_signed(self) {
+                let b = self.context.new_cast(self.location, b, a_type);
+                a >> b
+            } else {
+                let a_size = a_type.get_size();
+                let b_size = b_type.get_size();
+                match a_size.cmp(&b_size) {
+                    std::cmp::Ordering::Less => {
+                        let a = self.context.new_cast(self.location, a, b_type);
+                        a >> b
+                    }
+                    std::cmp::Ordering::Equal => a >> b,
+                    std::cmp::Ordering::Greater => {
+                        let b = self.context.new_cast(self.location, b, a_type);
+                        a >> b
+                    }
+                }
+            }
+        } else if a_type.is_vector() && b_type.is_vector() {
+            a >> b
+        } else if a_native && !b_native {
+            self.gcc_lshr(a, self.gcc_int_cast(b, a_type))
+        } else {
+            // NOTE: we cannot use the lshr builtin because it's calling hi() (to get the most
+            // significant half of the number) which uses lshr.
+
+            let native_int_type = a_type.dyncast_array().expect("get element type");
+
+            let func = self.current_func();
+            let then_block = func.new_block("then");
+            let else_block = func.new_block("else");
+            let after_block = func.new_block("after");
+            let b0_block = func.new_block("b0");
+            let actual_else_block = func.new_block("actual_else");
+
+            let result = func.new_local(self.location, a_type, "shiftResult");
+
+            let sixty_four = self.gcc_int(native_int_type, 64);
+            let sixty_three = self.gcc_int(native_int_type, 63);
+            let zero = self.gcc_zero(native_int_type);
+            let b = self.gcc_int_cast(b, native_int_type);
+            let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
+            self.llbb().end_with_conditional(self.location, condition, then_block, else_block);
+
+            let shift_value = self.gcc_sub(b, sixty_four);
+            let high = self.high(a);
+            let sign = if a_type.is_signed(self) { high >> sixty_three } else { zero };
+            let array_value = self.concat_low_high_rvalues(a_type, high >> shift_value, sign);
+            then_block.add_assignment(self.location, result, array_value);
+            then_block.end_with_jump(self.location, after_block);
+
+            let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
+            else_block.end_with_conditional(self.location, condition, b0_block, actual_else_block);
+
+            b0_block.add_assignment(self.location, result, a);
+            b0_block.end_with_jump(self.location, after_block);
+
+            let shift_value = self.gcc_sub(sixty_four, b);
+            // NOTE: cast low to its unsigned type in order to perform a logical right shift.
+            let unsigned_type = native_int_type.to_unsigned(self.cx);
+            let casted_low = self.context.new_cast(self.location, self.low(a), unsigned_type);
+            let shifted_low = casted_low >> self.context.new_cast(self.location, b, unsigned_type);
+            let shifted_low = self.context.new_cast(self.location, shifted_low, native_int_type);
+            let array_value = self.concat_low_high_rvalues(
+                a_type,
+                (high << shift_value) | shifted_low,
+                high >> b,
+            );
+            actual_else_block.add_assignment(self.location, result, array_value);
+            actual_else_block.end_with_jump(self.location, after_block);
+
+            // NOTE: since jumps were added in a place rustc does not expect, the current block in the
+            // state need to be updated.
+            self.switch_to_block(after_block);
+
+            result.to_rvalue()
+        }
+    }
+
+    fn additive_operation(
+        &self,
+        operation: BinaryOp,
+        a: RValue<'gcc>,
+        mut b: RValue<'gcc>,
+    ) -> RValue<'gcc> {
+        let a_type = a.get_type();
+        let b_type = b.get_type();
+        if (self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type))
+            || (a_type.is_vector() && b_type.is_vector())
+        {
+            if a_type != b_type {
+                if a_type.is_vector() {
+                    // Vector types need to be bitcast.
+                    // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
+                    b = self.context.new_bitcast(self.location, b, a.get_type());
+                } else {
+                    b = self.context.new_cast(self.location, b, a.get_type());
+                }
+            }
+            self.context.new_binary_op(self.location, operation, a_type, a, b)
+        } else {
+            debug_assert!(a_type.dyncast_array().is_some());
+            debug_assert!(b_type.dyncast_array().is_some());
+            let signed = a_type.is_compatible_with(self.i128_type);
+            let func_name = match (operation, signed) {
+                (BinaryOp::Plus, true) => "__rust_i128_add",
+                (BinaryOp::Plus, false) => "__rust_u128_add",
+                (BinaryOp::Minus, true) => "__rust_i128_sub",
+                (BinaryOp::Minus, false) => "__rust_u128_sub",
+                _ => unreachable!("unexpected additive operation {:?}", operation),
+            };
+            let param_a = self.context.new_parameter(self.location, a_type, "a");
+            let param_b = self.context.new_parameter(self.location, b_type, "b");
+            let func = self.context.new_function(
+                self.location,
+                FunctionType::Extern,
+                a_type,
+                &[param_a, param_b],
+                func_name,
+                false,
+            );
+            self.context.new_call(self.location, func, &[a, b])
+        }
+    }
+
+    pub fn gcc_add(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        self.additive_operation(BinaryOp::Plus, a, b)
+    }
+
+    pub fn gcc_mul(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        self.multiplicative_operation(BinaryOp::Mult, "mul", true, a, b)
+    }
+
+    pub fn gcc_sub(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        self.additive_operation(BinaryOp::Minus, a, b)
+    }
+
+    fn multiplicative_operation(
+        &self,
+        operation: BinaryOp,
+        operation_name: &str,
+        signed: bool,
+        a: RValue<'gcc>,
+        b: RValue<'gcc>,
+    ) -> RValue<'gcc> {
+        let a_type = a.get_type();
+        let b_type = b.get_type();
+        if (self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type))
+            || (a_type.is_vector() && b_type.is_vector())
+        {
+            self.context.new_binary_op(self.location, operation, a_type, a, b)
+        } else {
+            debug_assert!(a_type.dyncast_array().is_some());
+            debug_assert!(b_type.dyncast_array().is_some());
+            let sign = if signed { "" } else { "u" };
+            let func_name = format!("__{}{}ti3", sign, operation_name);
+            let param_a = self.context.new_parameter(self.location, a_type, "a");
+            let param_b = self.context.new_parameter(self.location, b_type, "b");
+            let func = self.context.new_function(
+                self.location,
+                FunctionType::Extern,
+                a_type,
+                &[param_a, param_b],
+                func_name,
+                false,
+            );
+            self.context.new_call(self.location, func, &[a, b])
+        }
+    }
+
+    pub fn gcc_sdiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): check if the types are signed?
+        // 128-bit, signed: __divti3
+        // TODO(antoyo): convert the arguments to signed?
+        self.multiplicative_operation(BinaryOp::Divide, "div", true, a, b)
+    }
+
+    pub fn gcc_udiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        // 128-bit, unsigned: __udivti3
+        self.multiplicative_operation(BinaryOp::Divide, "div", false, a, b)
+    }
+
+    pub fn gcc_checked_binop(
+        &self,
+        oop: OverflowOp,
+        typ: Ty<'_>,
+        lhs: <Self as BackendTypes>::Value,
+        rhs: <Self as BackendTypes>::Value,
+    ) -> (<Self as BackendTypes>::Value, <Self as BackendTypes>::Value) {
+        use rustc_middle::ty::IntTy::*;
+        use rustc_middle::ty::UintTy::*;
+        use rustc_middle::ty::{Int, Uint};
+
+        let new_kind = match *typ.kind() {
+            Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
+            Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
+            t @ (Uint(_) | Int(_)) => t,
+            _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
+        };
+
+        // TODO(antoyo): remove duplication with intrinsic?
+        let name = if self.is_native_int_type(lhs.get_type()) {
+            match oop {
+                OverflowOp::Add => match new_kind {
+                    Int(I8) => "__builtin_add_overflow",
+                    Int(I16) => "__builtin_add_overflow",
+                    Int(I32) => "__builtin_sadd_overflow",
+                    Int(I64) => "__builtin_saddll_overflow",
+                    Int(I128) => "__builtin_add_overflow",
+
+                    Uint(U8) => "__builtin_add_overflow",
+                    Uint(U16) => "__builtin_add_overflow",
+                    Uint(U32) => "__builtin_uadd_overflow",
+                    Uint(U64) => "__builtin_uaddll_overflow",
+                    Uint(U128) => "__builtin_add_overflow",
+
+                    _ => unreachable!(),
+                },
+                OverflowOp::Sub => match new_kind {
+                    Int(I8) => "__builtin_sub_overflow",
+                    Int(I16) => "__builtin_sub_overflow",
+                    Int(I32) => "__builtin_ssub_overflow",
+                    Int(I64) => "__builtin_ssubll_overflow",
+                    Int(I128) => "__builtin_sub_overflow",
+
+                    Uint(U8) => "__builtin_sub_overflow",
+                    Uint(U16) => "__builtin_sub_overflow",
+                    Uint(U32) => "__builtin_usub_overflow",
+                    Uint(U64) => "__builtin_usubll_overflow",
+                    Uint(U128) => "__builtin_sub_overflow",
+
+                    _ => unreachable!(),
+                },
+                OverflowOp::Mul => match new_kind {
+                    Int(I8) => "__builtin_mul_overflow",
+                    Int(I16) => "__builtin_mul_overflow",
+                    Int(I32) => "__builtin_smul_overflow",
+                    Int(I64) => "__builtin_smulll_overflow",
+                    Int(I128) => "__builtin_mul_overflow",
+
+                    Uint(U8) => "__builtin_mul_overflow",
+                    Uint(U16) => "__builtin_mul_overflow",
+                    Uint(U32) => "__builtin_umul_overflow",
+                    Uint(U64) => "__builtin_umulll_overflow",
+                    Uint(U128) => "__builtin_mul_overflow",
+
+                    _ => unreachable!(),
+                },
+            }
+        } else {
+            let (func_name, width) = match oop {
+                OverflowOp::Add => match new_kind {
+                    Int(I128) => ("__rust_i128_addo", 128),
+                    Uint(U128) => ("__rust_u128_addo", 128),
+                    _ => unreachable!(),
+                },
+                OverflowOp::Sub => match new_kind {
+                    Int(I128) => ("__rust_i128_subo", 128),
+                    Uint(U128) => ("__rust_u128_subo", 128),
+                    _ => unreachable!(),
+                },
+                OverflowOp::Mul => match new_kind {
+                    Int(I32) => ("__mulosi4", 32),
+                    Int(I64) => ("__mulodi4", 64),
+                    Int(I128) => ("__rust_i128_mulo", 128), // TODO(antoyo): use __muloti4d instead?
+                    Uint(U128) => ("__rust_u128_mulo", 128),
+                    _ => unreachable!(),
+                },
+            };
+            return self.operation_with_overflow(func_name, lhs, rhs, width);
+        };
+
+        let intrinsic = self.context.get_builtin_function(name);
+        let res = self
+            .current_func()
+            // TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
+            .new_local(self.location, rhs.get_type(), "binopResult")
+            .get_address(self.location);
+        let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
+        (res.dereference(self.location).to_rvalue(), overflow)
+    }
+
+    /// Non-`__builtin_*` overflow operations with a `fn(T, T, &mut i32) -> T` signature.
+    pub fn operation_with_overflow(
+        &self,
+        func_name: &str,
+        lhs: RValue<'gcc>,
+        rhs: RValue<'gcc>,
+        width: u64,
+    ) -> (RValue<'gcc>, RValue<'gcc>) {
+        let a_type = lhs.get_type();
+        let b_type = rhs.get_type();
+        debug_assert!(a_type.dyncast_array().is_some());
+        debug_assert!(b_type.dyncast_array().is_some());
+        let overflow_type = self.i32_type;
+        let overflow_param_type = overflow_type.make_pointer();
+        let res_type = a_type;
+
+        let overflow_value =
+            self.current_func().new_local(self.location, overflow_type, "overflow");
+        let overflow_addr = overflow_value.get_address(self.location);
+
+        let param_a = self.context.new_parameter(self.location, a_type, "a");
+        let param_b = self.context.new_parameter(self.location, b_type, "b");
+        let param_overflow =
+            self.context.new_parameter(self.location, overflow_param_type, "overflow");
+
+        let a_elem_type = a_type.dyncast_array().expect("non-array a value");
+        debug_assert!(a_elem_type.is_integral());
+        let res_ty = match width {
+            32 => self.tcx.types.i32,
+            64 => self.tcx.types.i64,
+            128 => self.tcx.types.i128,
+            _ => unreachable!("unexpected integer size"),
+        };
+        let layout = self
+            .tcx
+            .layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(res_ty))
+            .unwrap();
+
+        let arg_abi = ArgAbi { layout, mode: PassMode::Direct(ArgAttributes::new()) };
+        let mut fn_abi = FnAbi {
+            args: vec![arg_abi.clone(), arg_abi.clone(), arg_abi.clone()].into_boxed_slice(),
+            ret: arg_abi,
+            c_variadic: false,
+            fixed_count: 3,
+            conv: Conv::C,
+            can_unwind: false,
+        };
+        fn_abi.adjust_for_foreign_abi(self.cx, ExternAbi::C { unwind: false });
+
+        let ret_indirect = matches!(fn_abi.ret.mode, PassMode::Indirect { .. });
+
+        let result = if ret_indirect {
+            let res_value = self.current_func().new_local(self.location, res_type, "result_value");
+            let res_addr = res_value.get_address(self.location);
+            let res_param_type = res_type.make_pointer();
+            let param_res = self.context.new_parameter(self.location, res_param_type, "result");
+
+            let func = self.context.new_function(
+                self.location,
+                FunctionType::Extern,
+                self.type_void(),
+                &[param_res, param_a, param_b, param_overflow],
+                func_name,
+                false,
+            );
+            let _void =
+                self.context.new_call(self.location, func, &[res_addr, lhs, rhs, overflow_addr]);
+            res_value.to_rvalue()
+        } else {
+            let func = self.context.new_function(
+                self.location,
+                FunctionType::Extern,
+                res_type,
+                &[param_a, param_b, param_overflow],
+                func_name,
+                false,
+            );
+            self.context.new_call(self.location, func, &[lhs, rhs, overflow_addr])
+        };
+        // NOTE: we must assign the result of the operation to a variable at this point to make
+        // sure it will be evaluated by libgccjit now.
+        // Otherwise, it will only be evaluated when the rvalue for the call is used somewhere else
+        // and overflow_value will not be initialized at the correct point in the program.
+        let result = self.current_func().new_local(self.location, res_type, "result");
+        self.block.add_assignment(self.location, result, call);
+
+        (
+            result.to_rvalue(),
+            self.context.new_cast(self.location, overflow_value, self.bool_type).to_rvalue(),
+        )
+    }
+
+    pub fn gcc_icmp(
+        &mut self,
+        op: IntPredicate,
+        mut lhs: RValue<'gcc>,
+        mut rhs: RValue<'gcc>,
+    ) -> RValue<'gcc> {
+        let a_type = lhs.get_type();
+        let b_type = rhs.get_type();
+        if self.is_non_native_int_type(a_type) || self.is_non_native_int_type(b_type) {
+            // This algorithm is based on compiler-rt's __cmpti2:
+            // https://github.com/llvm-mirror/compiler-rt/blob/f0745e8476f069296a7c71accedd061dce4cdf79/lib/builtins/cmpti2.c#L21
+            let result = self.current_func().new_local(self.location, self.int_type, "icmp_result");
+            let block1 = self.current_func().new_block("block1");
+            let block2 = self.current_func().new_block("block2");
+            let block3 = self.current_func().new_block("block3");
+            let block4 = self.current_func().new_block("block4");
+            let block5 = self.current_func().new_block("block5");
+            let block6 = self.current_func().new_block("block6");
+            let block7 = self.current_func().new_block("block7");
+            let block8 = self.current_func().new_block("block8");
+            let after = self.current_func().new_block("after");
+
+            let native_int_type = a_type.dyncast_array().expect("get element type");
+            // NOTE: cast low to its unsigned type in order to perform a comparison correctly (e.g.
+            // the sign is only on high).
+            let unsigned_type = native_int_type.to_unsigned(self.cx);
+
+            let lhs_low = self.context.new_cast(self.location, self.low(lhs), unsigned_type);
+            let rhs_low = self.context.new_cast(self.location, self.low(rhs), unsigned_type);
+
+            let condition = self.context.new_comparison(
+                self.location,
+                ComparisonOp::LessThan,
+                self.high(lhs),
+                self.high(rhs),
+            );
+            self.llbb().end_with_conditional(self.location, condition, block1, block2);
+
+            block1.add_assignment(
+                self.location,
+                result,
+                self.context.new_rvalue_zero(self.int_type),
+            );
+            block1.end_with_jump(self.location, after);
+
+            let condition = self.context.new_comparison(
+                self.location,
+                ComparisonOp::GreaterThan,
+                self.high(lhs),
+                self.high(rhs),
+            );
+            block2.end_with_conditional(self.location, condition, block3, block4);
+
+            block3.add_assignment(
+                self.location,
+                result,
+                self.context.new_rvalue_from_int(self.int_type, 2),
+            );
+            block3.end_with_jump(self.location, after);
+
+            let condition = self.context.new_comparison(
+                self.location,
+                ComparisonOp::LessThan,
+                lhs_low,
+                rhs_low,
+            );
+            block4.end_with_conditional(self.location, condition, block5, block6);
+
+            block5.add_assignment(
+                self.location,
+                result,
+                self.context.new_rvalue_zero(self.int_type),
+            );
+            block5.end_with_jump(self.location, after);
+
+            let condition = self.context.new_comparison(
+                self.location,
+                ComparisonOp::GreaterThan,
+                lhs_low,
+                rhs_low,
+            );
+            block6.end_with_conditional(self.location, condition, block7, block8);
+
+            block7.add_assignment(
+                self.location,
+                result,
+                self.context.new_rvalue_from_int(self.int_type, 2),
+            );
+            block7.end_with_jump(self.location, after);
+
+            block8.add_assignment(
+                self.location,
+                result,
+                self.context.new_rvalue_one(self.int_type),
+            );
+            block8.end_with_jump(self.location, after);
+
+            // NOTE: since jumps were added in a place rustc does not expect, the current block in the
+            // state need to be updated.
+            self.switch_to_block(after);
+
+            let cmp = result.to_rvalue();
+            let (op, limit) = match op {
+                IntPredicate::IntEQ => {
+                    return self.context.new_comparison(
+                        self.location,
+                        ComparisonOp::Equals,
+                        cmp,
+                        self.context.new_rvalue_one(self.int_type),
+                    );
+                }
+                IntPredicate::IntNE => {
+                    return self.context.new_comparison(
+                        self.location,
+                        ComparisonOp::NotEquals,
+                        cmp,
+                        self.context.new_rvalue_one(self.int_type),
+                    );
+                }
+                // TODO(antoyo): cast to u128 for unsigned comparison. See below.
+                IntPredicate::IntUGT => (ComparisonOp::Equals, 2),
+                IntPredicate::IntUGE => (ComparisonOp::GreaterThanEquals, 1),
+                IntPredicate::IntULT => (ComparisonOp::Equals, 0),
+                IntPredicate::IntULE => (ComparisonOp::LessThanEquals, 1),
+                IntPredicate::IntSGT => (ComparisonOp::Equals, 2),
+                IntPredicate::IntSGE => (ComparisonOp::GreaterThanEquals, 1),
+                IntPredicate::IntSLT => (ComparisonOp::Equals, 0),
+                IntPredicate::IntSLE => (ComparisonOp::LessThanEquals, 1),
+            };
+            self.context.new_comparison(
+                self.location,
+                op,
+                cmp,
+                self.context.new_rvalue_from_int(self.int_type, limit),
+            )
+        } else if a_type.get_pointee().is_some() && b_type.get_pointee().is_some() {
+            // NOTE: gcc cannot compare pointers to different objects, but rustc does that, so cast them to usize.
+            lhs = self.context.new_bitcast(self.location, lhs, self.usize_type);
+            rhs = self.context.new_bitcast(self.location, rhs, self.usize_type);
+            self.context.new_comparison(self.location, op.to_gcc_comparison(), lhs, rhs)
+        } else {
+            if a_type != b_type {
+                // NOTE: because libgccjit cannot compare function pointers.
+                if a_type.dyncast_function_ptr_type().is_some()
+                    && b_type.dyncast_function_ptr_type().is_some()
+                {
+                    lhs = self.context.new_cast(self.location, lhs, self.usize_type.make_pointer());
+                    rhs = self.context.new_cast(self.location, rhs, self.usize_type.make_pointer());
+                }
+                // NOTE: hack because we try to cast a vector type to the same vector type.
+                else if format!("{:?}", a_type) != format!("{:?}", b_type) {
+                    rhs = self.context.new_cast(self.location, rhs, a_type);
+                }
+            }
+            match op {
+                IntPredicate::IntUGT
+                | IntPredicate::IntUGE
+                | IntPredicate::IntULT
+                | IntPredicate::IntULE => {
+                    if !a_type.is_vector() {
+                        let unsigned_type = a_type.to_unsigned(self.cx);
+                        lhs = self.context.new_cast(self.location, lhs, unsigned_type);
+                        rhs = self.context.new_cast(self.location, rhs, unsigned_type);
+                    }
+                }
+                // TODO(antoyo): we probably need to handle signed comparison for unsigned
+                // integers.
+                _ => (),
+            }
+            self.context.new_comparison(self.location, op.to_gcc_comparison(), lhs, rhs)
+        }
+    }
+
+    pub fn gcc_xor(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        let a_type = a.get_type();
+        let b_type = b.get_type();
+        if a_type.is_vector() && b_type.is_vector() {
+            let b = self.bitcast_if_needed(b, a_type);
+            a ^ b
+        } else if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type)
+        {
+            a ^ b
+        } else {
+            self.concat_low_high_rvalues(
+                a_type,
+                self.low(a) ^ self.low(b),
+                self.high(a) ^ self.high(b),
+            )
+        }
+    }
+
+    pub fn gcc_shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        let a_type = a.get_type();
+        let b_type = b.get_type();
+        let a_native = self.is_native_int_type(a_type);
+        let b_native = self.is_native_int_type(b_type);
+        if a_native && b_native {
+            // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
+            if a_type.is_unsigned(self) && b_type.is_signed(self) {
+                let a = self.context.new_cast(self.location, a, b_type);
+                let result = a << b;
+                self.context.new_cast(self.location, result, a_type)
+            } else if a_type.is_signed(self) && b_type.is_unsigned(self) {
+                let b = self.context.new_cast(self.location, b, a_type);
+                a << b
+            } else {
+                let a_size = a_type.get_size();
+                let b_size = b_type.get_size();
+                match a_size.cmp(&b_size) {
+                    std::cmp::Ordering::Less => {
+                        let a = self.context.new_cast(self.location, a, b_type);
+                        a << b
+                    }
+                    std::cmp::Ordering::Equal => a << b,
+                    std::cmp::Ordering::Greater => {
+                        let b = self.context.new_cast(self.location, b, a_type);
+                        a << b
+                    }
+                }
+            }
+        } else if a_type.is_vector() && b_type.is_vector() {
+            a << b
+        } else if a_native && !b_native {
+            self.gcc_shl(a, self.gcc_int_cast(b, a_type))
+        } else {
+            // NOTE: we cannot use the ashl builtin because it's calling widen_hi() which uses ashl.
+            let native_int_type = a_type.dyncast_array().expect("get element type");
+
+            let func = self.current_func();
+            let then_block = func.new_block("then");
+            let else_block = func.new_block("else");
+            let after_block = func.new_block("after");
+            let b0_block = func.new_block("b0");
+            let actual_else_block = func.new_block("actual_else");
+
+            let result = func.new_local(self.location, a_type, "shiftResult");
+
+            let b = self.gcc_int_cast(b, native_int_type);
+            let sixty_four = self.gcc_int(native_int_type, 64);
+            let zero = self.gcc_zero(native_int_type);
+            let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
+            self.llbb().end_with_conditional(self.location, condition, then_block, else_block);
+
+            let array_value =
+                self.concat_low_high_rvalues(a_type, zero, self.low(a) << (b - sixty_four));
+            then_block.add_assignment(self.location, result, array_value);
+            then_block.end_with_jump(self.location, after_block);
+
+            let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
+            else_block.end_with_conditional(self.location, condition, b0_block, actual_else_block);
+
+            b0_block.add_assignment(self.location, result, a);
+            b0_block.end_with_jump(self.location, after_block);
+
+            // NOTE: cast low to its unsigned type in order to perform a logical right shift.
+            // TODO(antoyo): adjust this ^ comment.
+            let unsigned_type = native_int_type.to_unsigned(self.cx);
+            let casted_low = self.context.new_cast(self.location, self.low(a), unsigned_type);
+            let shift_value = self.context.new_cast(self.location, sixty_four - b, unsigned_type);
+            let high_low =
+                self.context.new_cast(self.location, casted_low >> shift_value, native_int_type);
+
+            let array_value = self.concat_low_high_rvalues(
+                a_type,
+                self.low(a) << b,
+                (self.high(a) << b) | high_low,
+            );
+            actual_else_block.add_assignment(self.location, result, array_value);
+            actual_else_block.end_with_jump(self.location, after_block);
+
+            // NOTE: since jumps were added in a place rustc does not expect, the current block in the
+            // state need to be updated.
+            self.switch_to_block(after_block);
+
+            result.to_rvalue()
+        }
+    }
+
+    pub fn gcc_bswap(&mut self, mut arg: RValue<'gcc>, width: u64) -> RValue<'gcc> {
+        let arg_type = arg.get_type();
+        if !self.is_native_int_type(arg_type) {
+            let native_int_type = arg_type.dyncast_array().expect("get element type");
+            let lsb = self.low(arg);
+            let swapped_lsb = self.gcc_bswap(lsb, width / 2);
+            let swapped_lsb = self.context.new_cast(self.location, swapped_lsb, native_int_type);
+            let msb = self.high(arg);
+            let swapped_msb = self.gcc_bswap(msb, width / 2);
+            let swapped_msb = self.context.new_cast(self.location, swapped_msb, native_int_type);
+
+            // NOTE: we also need to swap the two elements here, in addition to swapping inside
+            // the elements themselves like done above.
+            return self.concat_low_high_rvalues(arg_type, swapped_msb, swapped_lsb);
+        }
+
+        // TODO(antoyo): check if it's faster to use string literals and a
+        // match instead of format!.
+        let bswap = self.cx.context.get_builtin_function(format!("__builtin_bswap{}", width));
+        // FIXME(antoyo): this cast should not be necessary. Remove
+        // when having proper sized integer types.
+        let param_type = bswap.get_param(0).to_rvalue().get_type();
+        if param_type != arg_type {
+            arg = self.bitcast(arg, param_type);
+        }
+        self.cx.context.new_call(self.location, bswap, &[arg])
+    }
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+    pub fn gcc_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
+        if self.is_native_int_type_or_bool(typ) {
+            self.context.new_rvalue_from_long(typ, int)
+        } else {
+            // NOTE: set the sign in high.
+            self.concat_low_high(typ, int, -(int.is_negative() as i64))
+        }
+    }
+
+    pub fn gcc_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
+        if typ.is_u128(self) {
+            // FIXME(antoyo): libgccjit cannot create 128-bit values yet.
+            let num = self.context.new_rvalue_from_long(self.u64_type, int as i64);
+            self.gcc_int_cast(num, typ)
+        } else if self.is_native_int_type_or_bool(typ) {
+            self.context.new_rvalue_from_long(typ, int as i64)
+        } else {
+            self.concat_low_high(typ, int as i64, 0)
+        }
+    }
+
+    pub fn gcc_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
+        let low = num as u64;
+        let high = (num >> 64) as u64;
+        if num >> 64 != 0 {
+            // FIXME(antoyo): use a new function new_rvalue_from_unsigned_long()?
+            if self.is_native_int_type(typ) {
+                let low = self.context.new_rvalue_from_long(self.u64_type, low as i64);
+                let high = self.context.new_rvalue_from_long(typ, high as i64);
+
+                let sixty_four = self.context.new_rvalue_from_long(typ, 64);
+                let shift = high << sixty_four;
+                shift | self.context.new_cast(None, low, typ)
+            } else {
+                self.concat_low_high(typ, low as i64, high as i64)
+            }
+        } else if typ.is_i128(self) {
+            // FIXME(antoyo): libgccjit cannot create 128-bit values yet.
+            let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
+            self.gcc_int_cast(num, typ)
+        } else {
+            self.gcc_uint(typ, num as u64)
+        }
+    }
+
+    pub fn gcc_zero(&self, typ: Type<'gcc>) -> RValue<'gcc> {
+        if self.is_native_int_type_or_bool(typ) {
+            self.context.new_rvalue_zero(typ)
+        } else {
+            self.concat_low_high(typ, 0, 0)
+        }
+    }
+
+    pub fn gcc_int_width(&self, typ: Type<'gcc>) -> u64 {
+        if self.is_native_int_type_or_bool(typ) {
+            typ.get_size() as u64 * 8
+        } else {
+            // NOTE: the only unsupported types are u128 and i128.
+            128
+        }
+    }
+
+    fn bitwise_operation(
+        &self,
+        operation: BinaryOp,
+        a: RValue<'gcc>,
+        mut b: RValue<'gcc>,
+        loc: Option<Location<'gcc>>,
+    ) -> RValue<'gcc> {
+        let a_type = a.get_type();
+        let b_type = b.get_type();
+        let a_native = self.is_native_int_type_or_bool(a_type);
+        let b_native = self.is_native_int_type_or_bool(b_type);
+        if a_type.is_vector() && b_type.is_vector() {
+            let b = self.bitcast_if_needed(b, a_type);
+            self.context.new_binary_op(loc, operation, a_type, a, b)
+        } else if a_native && b_native {
+            if a_type != b_type {
+                b = self.context.new_cast(loc, b, a_type);
+            }
+            self.context.new_binary_op(loc, operation, a_type, a, b)
+        } else {
+            assert!(
+                !a_native && !b_native,
+                "both types should either be native or non-native for or operation"
+            );
+            let native_int_type = a_type.dyncast_array().expect("get element type");
+            self.concat_low_high_rvalues(
+                a_type,
+                self.context.new_binary_op(
+                    loc,
+                    operation,
+                    native_int_type,
+                    self.low(a),
+                    self.low(b),
+                ),
+                self.context.new_binary_op(
+                    loc,
+                    operation,
+                    native_int_type,
+                    self.high(a),
+                    self.high(b),
+                ),
+            )
+        }
+    }
+
+    pub fn gcc_or(
+        &self,
+        a: RValue<'gcc>,
+        b: RValue<'gcc>,
+        loc: Option<Location<'gcc>>,
+    ) -> RValue<'gcc> {
+        self.bitwise_operation(BinaryOp::BitwiseOr, a, b, loc)
+    }
+
+    // TODO(antoyo): can we use https://github.com/rust-lang/compiler-builtins/blob/master/src/int/mod.rs#L379 instead?
+    pub fn gcc_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+        let value_type = value.get_type();
+        if self.is_native_int_type_or_bool(dest_typ) && self.is_native_int_type_or_bool(value_type)
+        {
+            // TODO: use self.location.
+            self.context.new_cast(None, value, dest_typ)
+        } else if self.is_native_int_type_or_bool(dest_typ) {
+            self.context.new_cast(None, self.low(value), dest_typ)
+        } else if self.is_native_int_type_or_bool(value_type) {
+            let dest_element_type = dest_typ.dyncast_array().expect("get element type");
+
+            // NOTE: set the sign of the value.
+            let zero = self.context.new_rvalue_zero(value_type);
+            let is_negative =
+                self.context.new_comparison(None, ComparisonOp::LessThan, value, zero);
+            let is_negative = self.gcc_int_cast(is_negative, dest_element_type);
+            self.concat_low_high_rvalues(
+                dest_typ,
+                self.context.new_cast(None, value, dest_element_type),
+                self.context.new_unary_op(None, UnaryOp::Minus, dest_element_type, is_negative),
+            )
+        } else {
+            // Since u128 and i128 are the only types that can be unsupported, we know the type of
+            // value and the destination type have the same size, so a bitcast is fine.
+
+            // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
+            self.context.new_bitcast(None, value, dest_typ)
+        }
+    }
+
+    fn int_to_float_cast(
+        &self,
+        signed: bool,
+        value: RValue<'gcc>,
+        dest_typ: Type<'gcc>,
+    ) -> RValue<'gcc> {
+        let value_type = value.get_type();
+        if self.is_native_int_type_or_bool(value_type) {
+            return self.context.new_cast(None, value, dest_typ);
+        }
+
+        debug_assert!(value_type.dyncast_array().is_some());
+        let name_suffix = match self.type_kind(dest_typ) {
+            TypeKind::Float => "tisf",
+            TypeKind::Double => "tidf",
+            TypeKind::FP128 => "tixf",
+            kind => panic!("cannot cast a non-native integer to type {:?}", kind),
+        };
+        let sign = if signed { "" } else { "un" };
+        let func_name = format!("__float{}{}", sign, name_suffix);
+        let param = self.context.new_parameter(None, value_type, "n");
+        let func = self.context.new_function(
+            None,
+            FunctionType::Extern,
+            dest_typ,
+            &[param],
+            func_name,
+            false,
+        );
+        self.context.new_call(None, func, &[value])
+    }
+
+    pub fn gcc_int_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+        self.int_to_float_cast(true, value, dest_typ)
+    }
+
+    pub fn gcc_uint_to_float_cast(
+        &self,
+        value: RValue<'gcc>,
+        dest_typ: Type<'gcc>,
+    ) -> RValue<'gcc> {
+        self.int_to_float_cast(false, value, dest_typ)
+    }
+
+    fn float_to_int_cast(
+        &self,
+        signed: bool,
+        value: RValue<'gcc>,
+        dest_typ: Type<'gcc>,
+    ) -> RValue<'gcc> {
+        let value_type = value.get_type();
+        if self.is_native_int_type_or_bool(dest_typ) {
+            return self.context.new_cast(None, value, dest_typ);
+        }
+
+        debug_assert!(dest_typ.dyncast_array().is_some());
+        let name_suffix = match self.type_kind(value_type) {
+            TypeKind::Float => "sfti",
+            TypeKind::Double => "dfti",
+            kind => panic!("cannot cast a {:?} to non-native integer", kind),
+        };
+        let sign = if signed { "" } else { "uns" };
+        let func_name = format!("__fix{}{}", sign, name_suffix);
+        let param = self.context.new_parameter(None, value_type, "n");
+        let func = self.context.new_function(
+            None,
+            FunctionType::Extern,
+            dest_typ,
+            &[param],
+            func_name,
+            false,
+        );
+        self.context.new_call(None, func, &[value])
+    }
+
+    pub fn gcc_float_to_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+        self.float_to_int_cast(true, value, dest_typ)
+    }
+
+    pub fn gcc_float_to_uint_cast(
+        &self,
+        value: RValue<'gcc>,
+        dest_typ: Type<'gcc>,
+    ) -> RValue<'gcc> {
+        self.float_to_int_cast(false, value, dest_typ)
+    }
+
+    fn high(&self, value: RValue<'gcc>) -> RValue<'gcc> {
+        let index = match self.sess().target.options.endian {
+            Endian::Little => 1,
+            Endian::Big => 0,
+        };
+        self.context
+            .new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, index))
+            .to_rvalue()
+    }
+
+    fn low(&self, value: RValue<'gcc>) -> RValue<'gcc> {
+        let index = match self.sess().target.options.endian {
+            Endian::Little => 0,
+            Endian::Big => 1,
+        };
+        self.context
+            .new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, index))
+            .to_rvalue()
+    }
+
+    fn concat_low_high_rvalues(
+        &self,
+        typ: Type<'gcc>,
+        low: RValue<'gcc>,
+        high: RValue<'gcc>,
+    ) -> RValue<'gcc> {
+        let (first, last) = match self.sess().target.options.endian {
+            Endian::Little => (low, high),
+            Endian::Big => (high, low),
+        };
+
+        let values = [first, last];
+        self.context.new_array_constructor(None, typ, &values)
+    }
+
+    fn concat_low_high(&self, typ: Type<'gcc>, low: i64, high: i64) -> RValue<'gcc> {
+        let (first, last) = match self.sess().target.options.endian {
+            Endian::Little => (low, high),
+            Endian::Big => (high, low),
+        };
+
+        let native_int_type = typ.dyncast_array().expect("get element type");
+        let values = [
+            self.context.new_rvalue_from_long(native_int_type, first),
+            self.context.new_rvalue_from_long(native_int_type, last),
+        ];
+        self.context.new_array_constructor(None, typ, &values)
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs b/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs
new file mode 100644
index 00000000000..b8d1cde1d5d
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs
@@ -0,0 +1,9696 @@
+// File generated by `rustc_codegen_gcc/tools/generate_intrinsics.py`
+// DO NOT EDIT IT!
+match name {
+    // AMDGPU
+    "llvm.AMDGPU.div.fixup.f32" => "__builtin_amdgpu_div_fixup",
+    "llvm.AMDGPU.div.fixup.f64" => "__builtin_amdgpu_div_fixup",
+    "llvm.AMDGPU.div.fixup.v2f64" => "__builtin_amdgpu_div_fixup",
+    "llvm.AMDGPU.div.fixup.v4f32" => "__builtin_amdgpu_div_fixup",
+    "llvm.AMDGPU.div.fmas.f32" => "__builtin_amdgpu_div_fmas",
+    "llvm.AMDGPU.div.fmas.f64" => "__builtin_amdgpu_div_fmas",
+    "llvm.AMDGPU.div.fmas.v2f64" => "__builtin_amdgpu_div_fmas",
+    "llvm.AMDGPU.div.fmas.v4f32" => "__builtin_amdgpu_div_fmas",
+    "llvm.AMDGPU.ldexp.f32" => "__builtin_amdgpu_ldexp",
+    "llvm.AMDGPU.ldexp.f64" => "__builtin_amdgpu_ldexp",
+    "llvm.AMDGPU.ldexp.v2f64" => "__builtin_amdgpu_ldexp",
+    "llvm.AMDGPU.ldexp.v4f32" => "__builtin_amdgpu_ldexp",
+    "llvm.AMDGPU.rcp.f32" => "__builtin_amdgpu_rcp",
+    "llvm.AMDGPU.rcp.f64" => "__builtin_amdgpu_rcp",
+    "llvm.AMDGPU.rcp.v2f64" => "__builtin_amdgpu_rcp",
+    "llvm.AMDGPU.rcp.v4f32" => "__builtin_amdgpu_rcp",
+    "llvm.AMDGPU.rsq.clamped.f32" => "__builtin_amdgpu_rsq_clamped",
+    "llvm.AMDGPU.rsq.clamped.f64" => "__builtin_amdgpu_rsq_clamped",
+    "llvm.AMDGPU.rsq.clamped.v2f64" => "__builtin_amdgpu_rsq_clamped",
+    "llvm.AMDGPU.rsq.clamped.v4f32" => "__builtin_amdgpu_rsq_clamped",
+    "llvm.AMDGPU.rsq.f32" => "__builtin_amdgpu_rsq",
+    "llvm.AMDGPU.rsq.f64" => "__builtin_amdgpu_rsq",
+    "llvm.AMDGPU.rsq.v2f64" => "__builtin_amdgpu_rsq",
+    "llvm.AMDGPU.rsq.v4f32" => "__builtin_amdgpu_rsq",
+    "llvm.AMDGPU.trig.preop.f32" => "__builtin_amdgpu_trig_preop",
+    "llvm.AMDGPU.trig.preop.f64" => "__builtin_amdgpu_trig_preop",
+    "llvm.AMDGPU.trig.preop.v2f64" => "__builtin_amdgpu_trig_preop",
+    "llvm.AMDGPU.trig.preop.v4f32" => "__builtin_amdgpu_trig_preop",
+    // aarch64
+    "llvm.aarch64.chkfeat" => "__builtin_arm_chkfeat",
+    "llvm.aarch64.dmb" => "__builtin_arm_dmb",
+    "llvm.aarch64.dsb" => "__builtin_arm_dsb",
+    "llvm.aarch64.gcspopm" => "__builtin_arm_gcspopm",
+    "llvm.aarch64.gcsss" => "__builtin_arm_gcsss",
+    "llvm.aarch64.isb" => "__builtin_arm_isb",
+    "llvm.aarch64.prefetch" => "__builtin_arm_prefetch",
+    "llvm.aarch64.sve.aesd" => "__builtin_sve_svaesd_u8",
+    "llvm.aarch64.sve.aese" => "__builtin_sve_svaese_u8",
+    "llvm.aarch64.sve.aesimc" => "__builtin_sve_svaesimc_u8",
+    "llvm.aarch64.sve.aesmc" => "__builtin_sve_svaesmc_u8",
+    "llvm.aarch64.sve.rax1" => "__builtin_sve_svrax1_u64",
+    "llvm.aarch64.sve.rdffr" => "__builtin_sve_svrdffr",
+    "llvm.aarch64.sve.rdffr.z" => "__builtin_sve_svrdffr_z",
+    "llvm.aarch64.sve.setffr" => "__builtin_sve_svsetffr",
+    "llvm.aarch64.sve.sm4e" => "__builtin_sve_svsm4e_u32",
+    "llvm.aarch64.sve.sm4ekey" => "__builtin_sve_svsm4ekey_u32",
+    "llvm.aarch64.sve.wrffr" => "__builtin_sve_svwrffr",
+    "llvm.aarch64.tcancel" => "__builtin_arm_tcancel",
+    "llvm.aarch64.tcommit" => "__builtin_arm_tcommit",
+    "llvm.aarch64.tstart" => "__builtin_arm_tstart",
+    "llvm.aarch64.ttest" => "__builtin_arm_ttest",
+    // amdgcn
+    "llvm.amdgcn.alignbyte" => "__builtin_amdgcn_alignbyte",
+    "llvm.amdgcn.buffer.wbinvl1" => "__builtin_amdgcn_buffer_wbinvl1",
+    "llvm.amdgcn.buffer.wbinvl1.sc" => "__builtin_amdgcn_buffer_wbinvl1_sc",
+    "llvm.amdgcn.buffer.wbinvl1.vol" => "__builtin_amdgcn_buffer_wbinvl1_vol",
+    "llvm.amdgcn.cubeid" => "__builtin_amdgcn_cubeid",
+    "llvm.amdgcn.cubema" => "__builtin_amdgcn_cubema",
+    "llvm.amdgcn.cubesc" => "__builtin_amdgcn_cubesc",
+    "llvm.amdgcn.cubetc" => "__builtin_amdgcn_cubetc",
+    "llvm.amdgcn.cvt.f32.bf8" => "__builtin_amdgcn_cvt_f32_bf8",
+    "llvm.amdgcn.cvt.f32.fp8" => "__builtin_amdgcn_cvt_f32_fp8",
+    "llvm.amdgcn.cvt.pk.bf8.f32" => "__builtin_amdgcn_cvt_pk_bf8_f32",
+    "llvm.amdgcn.cvt.pk.f32.bf8" => "__builtin_amdgcn_cvt_pk_f32_bf8",
+    "llvm.amdgcn.cvt.pk.f32.fp8" => "__builtin_amdgcn_cvt_pk_f32_fp8",
+    "llvm.amdgcn.cvt.pk.fp8.f32" => "__builtin_amdgcn_cvt_pk_fp8_f32",
+    "llvm.amdgcn.cvt.pk.i16" => "__builtin_amdgcn_cvt_pk_i16",
+    "llvm.amdgcn.cvt.pk.u16" => "__builtin_amdgcn_cvt_pk_u16",
+    "llvm.amdgcn.cvt.pk.u8.f32" => "__builtin_amdgcn_cvt_pk_u8_f32",
+    "llvm.amdgcn.cvt.pknorm.i16" => "__builtin_amdgcn_cvt_pknorm_i16",
+    "llvm.amdgcn.cvt.pknorm.u16" => "__builtin_amdgcn_cvt_pknorm_u16",
+    "llvm.amdgcn.cvt.pkrtz" => "__builtin_amdgcn_cvt_pkrtz",
+    "llvm.amdgcn.cvt.sr.bf8.f32" => "__builtin_amdgcn_cvt_sr_bf8_f32",
+    "llvm.amdgcn.cvt.sr.fp8.f32" => "__builtin_amdgcn_cvt_sr_fp8_f32",
+    "llvm.amdgcn.dispatch.id" => "__builtin_amdgcn_dispatch_id",
+    "llvm.amdgcn.dot4.f32.bf8.bf8" => "__builtin_amdgcn_dot4_f32_bf8_bf8",
+    "llvm.amdgcn.dot4.f32.bf8.fp8" => "__builtin_amdgcn_dot4_f32_bf8_fp8",
+    "llvm.amdgcn.dot4.f32.fp8.bf8" => "__builtin_amdgcn_dot4_f32_fp8_bf8",
+    "llvm.amdgcn.dot4.f32.fp8.fp8" => "__builtin_amdgcn_dot4_f32_fp8_fp8",
+    "llvm.amdgcn.ds.add.gs.reg.rtn" => "__builtin_amdgcn_ds_add_gs_reg_rtn",
+    "llvm.amdgcn.ds.bpermute" => "__builtin_amdgcn_ds_bpermute",
+    "llvm.amdgcn.ds.gws.barrier" => "__builtin_amdgcn_ds_gws_barrier",
+    "llvm.amdgcn.ds.gws.init" => "__builtin_amdgcn_ds_gws_init",
+    "llvm.amdgcn.ds.gws.sema.br" => "__builtin_amdgcn_ds_gws_sema_br",
+    "llvm.amdgcn.ds.gws.sema.p" => "__builtin_amdgcn_ds_gws_sema_p",
+    "llvm.amdgcn.ds.gws.sema.release.all" => "__builtin_amdgcn_ds_gws_sema_release_all",
+    "llvm.amdgcn.ds.gws.sema.v" => "__builtin_amdgcn_ds_gws_sema_v",
+    "llvm.amdgcn.ds.permute" => "__builtin_amdgcn_ds_permute",
+    "llvm.amdgcn.ds.sub.gs.reg.rtn" => "__builtin_amdgcn_ds_sub_gs_reg_rtn",
+    "llvm.amdgcn.ds.swizzle" => "__builtin_amdgcn_ds_swizzle",
+    "llvm.amdgcn.endpgm" => "__builtin_amdgcn_endpgm",
+    "llvm.amdgcn.fdot2" => "__builtin_amdgcn_fdot2",
+    "llvm.amdgcn.fdot2.bf16.bf16" => "__builtin_amdgcn_fdot2_bf16_bf16",
+    "llvm.amdgcn.fdot2.f16.f16" => "__builtin_amdgcn_fdot2_f16_f16",
+    "llvm.amdgcn.fdot2.f32.bf16" => "__builtin_amdgcn_fdot2_f32_bf16",
+    "llvm.amdgcn.fmul.legacy" => "__builtin_amdgcn_fmul_legacy",
+    "llvm.amdgcn.global.load.lds" => "__builtin_amdgcn_global_load_lds",
+    "llvm.amdgcn.groupstaticsize" => "__builtin_amdgcn_groupstaticsize",
+    "llvm.amdgcn.iglp.opt" => "__builtin_amdgcn_iglp_opt",
+    "llvm.amdgcn.implicit.buffer.ptr" => "__builtin_amdgcn_implicit_buffer_ptr",
+    "llvm.amdgcn.implicitarg.ptr" => "__builtin_amdgcn_implicitarg_ptr",
+    "llvm.amdgcn.interp.mov" => "__builtin_amdgcn_interp_mov",
+    "llvm.amdgcn.interp.p1" => "__builtin_amdgcn_interp_p1",
+    "llvm.amdgcn.interp.p1.f16" => "__builtin_amdgcn_interp_p1_f16",
+    "llvm.amdgcn.interp.p2" => "__builtin_amdgcn_interp_p2",
+    "llvm.amdgcn.interp.p2.f16" => "__builtin_amdgcn_interp_p2_f16",
+    "llvm.amdgcn.is.private" => "__builtin_amdgcn_is_private",
+    "llvm.amdgcn.is.shared" => "__builtin_amdgcn_is_shared",
+    "llvm.amdgcn.kernarg.segment.ptr" => "__builtin_amdgcn_kernarg_segment_ptr",
+    "llvm.amdgcn.lerp" => "__builtin_amdgcn_lerp",
+    "llvm.amdgcn.mbcnt.hi" => "__builtin_amdgcn_mbcnt_hi",
+    "llvm.amdgcn.mbcnt.lo" => "__builtin_amdgcn_mbcnt_lo",
+    "llvm.amdgcn.mfma.f32.16x16x16bf16.1k" => "__builtin_amdgcn_mfma_f32_16x16x16bf16_1k",
+    "llvm.amdgcn.mfma.f32.16x16x16f16" => "__builtin_amdgcn_mfma_f32_16x16x16f16",
+    "llvm.amdgcn.mfma.f32.16x16x1f32" => "__builtin_amdgcn_mfma_f32_16x16x1f32",
+    "llvm.amdgcn.mfma.f32.16x16x2bf16" => "__builtin_amdgcn_mfma_f32_16x16x2bf16",
+    "llvm.amdgcn.mfma.f32.16x16x32.bf8.bf8" => "__builtin_amdgcn_mfma_f32_16x16x32_bf8_bf8",
+    "llvm.amdgcn.mfma.f32.16x16x32.bf8.fp8" => "__builtin_amdgcn_mfma_f32_16x16x32_bf8_fp8",
+    "llvm.amdgcn.mfma.f32.16x16x32.fp8.bf8" => "__builtin_amdgcn_mfma_f32_16x16x32_fp8_bf8",
+    "llvm.amdgcn.mfma.f32.16x16x32.fp8.fp8" => "__builtin_amdgcn_mfma_f32_16x16x32_fp8_fp8",
+    "llvm.amdgcn.mfma.f32.16x16x4bf16.1k" => "__builtin_amdgcn_mfma_f32_16x16x4bf16_1k",
+    "llvm.amdgcn.mfma.f32.16x16x4f16" => "__builtin_amdgcn_mfma_f32_16x16x4f16",
+    "llvm.amdgcn.mfma.f32.16x16x4f32" => "__builtin_amdgcn_mfma_f32_16x16x4f32",
+    "llvm.amdgcn.mfma.f32.16x16x8.xf32" => "__builtin_amdgcn_mfma_f32_16x16x8_xf32",
+    "llvm.amdgcn.mfma.f32.16x16x8bf16" => "__builtin_amdgcn_mfma_f32_16x16x8bf16",
+    "llvm.amdgcn.mfma.f32.32x32x16.bf8.bf8" => "__builtin_amdgcn_mfma_f32_32x32x16_bf8_bf8",
+    "llvm.amdgcn.mfma.f32.32x32x16.bf8.fp8" => "__builtin_amdgcn_mfma_f32_32x32x16_bf8_fp8",
+    "llvm.amdgcn.mfma.f32.32x32x16.fp8.bf8" => "__builtin_amdgcn_mfma_f32_32x32x16_fp8_bf8",
+    "llvm.amdgcn.mfma.f32.32x32x16.fp8.fp8" => "__builtin_amdgcn_mfma_f32_32x32x16_fp8_fp8",
+    "llvm.amdgcn.mfma.f32.32x32x1f32" => "__builtin_amdgcn_mfma_f32_32x32x1f32",
+    "llvm.amdgcn.mfma.f32.32x32x2bf16" => "__builtin_amdgcn_mfma_f32_32x32x2bf16",
+    "llvm.amdgcn.mfma.f32.32x32x2f32" => "__builtin_amdgcn_mfma_f32_32x32x2f32",
+    "llvm.amdgcn.mfma.f32.32x32x4.xf32" => "__builtin_amdgcn_mfma_f32_32x32x4_xf32",
+    "llvm.amdgcn.mfma.f32.32x32x4bf16" => "__builtin_amdgcn_mfma_f32_32x32x4bf16",
+    "llvm.amdgcn.mfma.f32.32x32x4bf16.1k" => "__builtin_amdgcn_mfma_f32_32x32x4bf16_1k",
+    "llvm.amdgcn.mfma.f32.32x32x4f16" => "__builtin_amdgcn_mfma_f32_32x32x4f16",
+    "llvm.amdgcn.mfma.f32.32x32x8bf16.1k" => "__builtin_amdgcn_mfma_f32_32x32x8bf16_1k",
+    "llvm.amdgcn.mfma.f32.32x32x8f16" => "__builtin_amdgcn_mfma_f32_32x32x8f16",
+    "llvm.amdgcn.mfma.f32.4x4x1f32" => "__builtin_amdgcn_mfma_f32_4x4x1f32",
+    "llvm.amdgcn.mfma.f32.4x4x2bf16" => "__builtin_amdgcn_mfma_f32_4x4x2bf16",
+    "llvm.amdgcn.mfma.f32.4x4x4bf16.1k" => "__builtin_amdgcn_mfma_f32_4x4x4bf16_1k",
+    "llvm.amdgcn.mfma.f32.4x4x4f16" => "__builtin_amdgcn_mfma_f32_4x4x4f16",
+    "llvm.amdgcn.mfma.f64.16x16x4f64" => "__builtin_amdgcn_mfma_f64_16x16x4f64",
+    "llvm.amdgcn.mfma.f64.4x4x4f64" => "__builtin_amdgcn_mfma_f64_4x4x4f64",
+    "llvm.amdgcn.mfma.i32.16x16x16i8" => "__builtin_amdgcn_mfma_i32_16x16x16i8",
+    "llvm.amdgcn.mfma.i32.16x16x32.i8" => "__builtin_amdgcn_mfma_i32_16x16x32_i8",
+    "llvm.amdgcn.mfma.i32.16x16x4i8" => "__builtin_amdgcn_mfma_i32_16x16x4i8",
+    "llvm.amdgcn.mfma.i32.32x32x16.i8" => "__builtin_amdgcn_mfma_i32_32x32x16_i8",
+    "llvm.amdgcn.mfma.i32.32x32x4i8" => "__builtin_amdgcn_mfma_i32_32x32x4i8",
+    "llvm.amdgcn.mfma.i32.32x32x8i8" => "__builtin_amdgcn_mfma_i32_32x32x8i8",
+    "llvm.amdgcn.mfma.i32.4x4x4i8" => "__builtin_amdgcn_mfma_i32_4x4x4i8",
+    "llvm.amdgcn.mqsad.pk.u16.u8" => "__builtin_amdgcn_mqsad_pk_u16_u8",
+    "llvm.amdgcn.mqsad.u32.u8" => "__builtin_amdgcn_mqsad_u32_u8",
+    "llvm.amdgcn.msad.u8" => "__builtin_amdgcn_msad_u8",
+    "llvm.amdgcn.perm" => "__builtin_amdgcn_perm",
+    "llvm.amdgcn.permlane16.var" => "__builtin_amdgcn_permlane16_var",
+    "llvm.amdgcn.permlanex16.var" => "__builtin_amdgcn_permlanex16_var",
+    "llvm.amdgcn.qsad.pk.u16.u8" => "__builtin_amdgcn_qsad_pk_u16_u8",
+    "llvm.amdgcn.queue.ptr" => "__builtin_amdgcn_queue_ptr",
+    "llvm.amdgcn.rcp.legacy" => "__builtin_amdgcn_rcp_legacy",
+    "llvm.amdgcn.rsq.legacy" => "__builtin_amdgcn_rsq_legacy",
+    "llvm.amdgcn.s.barrier" => "__builtin_amdgcn_s_barrier",
+    "llvm.amdgcn.s.barrier.init" => "__builtin_amdgcn_s_barrier_init",
+    "llvm.amdgcn.s.barrier.join" => "__builtin_amdgcn_s_barrier_join",
+    "llvm.amdgcn.s.barrier.leave" => "__builtin_amdgcn_s_barrier_leave",
+    "llvm.amdgcn.s.barrier.signal" => "__builtin_amdgcn_s_barrier_signal",
+    "llvm.amdgcn.s.barrier.signal.isfirst" => "__builtin_amdgcn_s_barrier_signal_isfirst",
+    "llvm.amdgcn.s.barrier.signal.isfirst.var" => "__builtin_amdgcn_s_barrier_signal_isfirst_var",
+    "llvm.amdgcn.s.barrier.signal.var" => "__builtin_amdgcn_s_barrier_signal_var",
+    "llvm.amdgcn.s.barrier.wait" => "__builtin_amdgcn_s_barrier_wait",
+    "llvm.amdgcn.s.dcache.inv" => "__builtin_amdgcn_s_dcache_inv",
+    "llvm.amdgcn.s.dcache.inv.vol" => "__builtin_amdgcn_s_dcache_inv_vol",
+    "llvm.amdgcn.s.dcache.wb" => "__builtin_amdgcn_s_dcache_wb",
+    "llvm.amdgcn.s.dcache.wb.vol" => "__builtin_amdgcn_s_dcache_wb_vol",
+    "llvm.amdgcn.s.decperflevel" => "__builtin_amdgcn_s_decperflevel",
+    "llvm.amdgcn.s.get.barrier.state" => "__builtin_amdgcn_s_get_barrier_state",
+    "llvm.amdgcn.s.get.waveid.in.workgroup" => "__builtin_amdgcn_s_get_waveid_in_workgroup",
+    "llvm.amdgcn.s.getpc" => "__builtin_amdgcn_s_getpc",
+    "llvm.amdgcn.s.getreg" => "__builtin_amdgcn_s_getreg",
+    "llvm.amdgcn.s.incperflevel" => "__builtin_amdgcn_s_incperflevel",
+    "llvm.amdgcn.s.memrealtime" => "__builtin_amdgcn_s_memrealtime",
+    "llvm.amdgcn.s.memtime" => "__builtin_amdgcn_s_memtime",
+    "llvm.amdgcn.s.sendmsg" => "__builtin_amdgcn_s_sendmsg",
+    "llvm.amdgcn.s.sendmsghalt" => "__builtin_amdgcn_s_sendmsghalt",
+    "llvm.amdgcn.s.setprio" => "__builtin_amdgcn_s_setprio",
+    "llvm.amdgcn.s.setreg" => "__builtin_amdgcn_s_setreg",
+    "llvm.amdgcn.s.sleep" => "__builtin_amdgcn_s_sleep",
+    "llvm.amdgcn.s.sleep.var" => "__builtin_amdgcn_s_sleep_var",
+    "llvm.amdgcn.s.ttracedata" => "__builtin_amdgcn_s_ttracedata",
+    "llvm.amdgcn.s.ttracedata.imm" => "__builtin_amdgcn_s_ttracedata_imm",
+    "llvm.amdgcn.s.wait.event.export.ready" => "__builtin_amdgcn_s_wait_event_export_ready",
+    "llvm.amdgcn.s.waitcnt" => "__builtin_amdgcn_s_waitcnt",
+    "llvm.amdgcn.s.wakeup.barrier" => "__builtin_amdgcn_s_wakeup_barrier",
+    "llvm.amdgcn.sad.hi.u8" => "__builtin_amdgcn_sad_hi_u8",
+    "llvm.amdgcn.sad.u16" => "__builtin_amdgcn_sad_u16",
+    "llvm.amdgcn.sad.u8" => "__builtin_amdgcn_sad_u8",
+    "llvm.amdgcn.sched.barrier" => "__builtin_amdgcn_sched_barrier",
+    "llvm.amdgcn.sched.group.barrier" => "__builtin_amdgcn_sched_group_barrier",
+    "llvm.amdgcn.sdot2" => "__builtin_amdgcn_sdot2",
+    "llvm.amdgcn.sdot4" => "__builtin_amdgcn_sdot4",
+    "llvm.amdgcn.sdot8" => "__builtin_amdgcn_sdot8",
+    "llvm.amdgcn.smfmac.f32.16x16x32.bf16" => "__builtin_amdgcn_smfmac_f32_16x16x32_bf16",
+    "llvm.amdgcn.smfmac.f32.16x16x32.f16" => "__builtin_amdgcn_smfmac_f32_16x16x32_f16",
+    "llvm.amdgcn.smfmac.f32.16x16x64.bf8.bf8" => "__builtin_amdgcn_smfmac_f32_16x16x64_bf8_bf8",
+    "llvm.amdgcn.smfmac.f32.16x16x64.bf8.fp8" => "__builtin_amdgcn_smfmac_f32_16x16x64_bf8_fp8",
+    "llvm.amdgcn.smfmac.f32.16x16x64.fp8.bf8" => "__builtin_amdgcn_smfmac_f32_16x16x64_fp8_bf8",
+    "llvm.amdgcn.smfmac.f32.16x16x64.fp8.fp8" => "__builtin_amdgcn_smfmac_f32_16x16x64_fp8_fp8",
+    "llvm.amdgcn.smfmac.f32.32x32x16.bf16" => "__builtin_amdgcn_smfmac_f32_32x32x16_bf16",
+    "llvm.amdgcn.smfmac.f32.32x32x16.f16" => "__builtin_amdgcn_smfmac_f32_32x32x16_f16",
+    "llvm.amdgcn.smfmac.f32.32x32x32.bf8.bf8" => "__builtin_amdgcn_smfmac_f32_32x32x32_bf8_bf8",
+    "llvm.amdgcn.smfmac.f32.32x32x32.bf8.fp8" => "__builtin_amdgcn_smfmac_f32_32x32x32_bf8_fp8",
+    "llvm.amdgcn.smfmac.f32.32x32x32.fp8.bf8" => "__builtin_amdgcn_smfmac_f32_32x32x32_fp8_bf8",
+    "llvm.amdgcn.smfmac.f32.32x32x32.fp8.fp8" => "__builtin_amdgcn_smfmac_f32_32x32x32_fp8_fp8",
+    "llvm.amdgcn.smfmac.i32.16x16x64.i8" => "__builtin_amdgcn_smfmac_i32_16x16x64_i8",
+    "llvm.amdgcn.smfmac.i32.32x32x32.i8" => "__builtin_amdgcn_smfmac_i32_32x32x32_i8",
+    "llvm.amdgcn.sudot4" => "__builtin_amdgcn_sudot4",
+    "llvm.amdgcn.sudot8" => "__builtin_amdgcn_sudot8",
+    "llvm.amdgcn.udot2" => "__builtin_amdgcn_udot2",
+    "llvm.amdgcn.udot4" => "__builtin_amdgcn_udot4",
+    "llvm.amdgcn.udot8" => "__builtin_amdgcn_udot8",
+    "llvm.amdgcn.wave.barrier" => "__builtin_amdgcn_wave_barrier",
+    "llvm.amdgcn.wavefrontsize" => "__builtin_amdgcn_wavefrontsize",
+    "llvm.amdgcn.workgroup.id.x" => "__builtin_amdgcn_workgroup_id_x",
+    "llvm.amdgcn.workgroup.id.y" => "__builtin_amdgcn_workgroup_id_y",
+    "llvm.amdgcn.workgroup.id.z" => "__builtin_amdgcn_workgroup_id_z",
+    // arm
+    "llvm.arm.cdp" => "__builtin_arm_cdp",
+    "llvm.arm.cdp2" => "__builtin_arm_cdp2",
+    "llvm.arm.cmse.tt" => "__builtin_arm_cmse_TT",
+    "llvm.arm.cmse.tta" => "__builtin_arm_cmse_TTA",
+    "llvm.arm.cmse.ttat" => "__builtin_arm_cmse_TTAT",
+    "llvm.arm.cmse.ttt" => "__builtin_arm_cmse_TTT",
+    "llvm.arm.dmb" => "__builtin_arm_dmb",
+    "llvm.arm.dsb" => "__builtin_arm_dsb",
+    "llvm.arm.get.fpscr" => "__builtin_arm_get_fpscr",
+    "llvm.arm.isb" => "__builtin_arm_isb",
+    "llvm.arm.ldc" => "__builtin_arm_ldc",
+    "llvm.arm.ldc2" => "__builtin_arm_ldc2",
+    "llvm.arm.ldc2l" => "__builtin_arm_ldc2l",
+    "llvm.arm.ldcl" => "__builtin_arm_ldcl",
+    "llvm.arm.mcr" => "__builtin_arm_mcr",
+    "llvm.arm.mcr2" => "__builtin_arm_mcr2",
+    "llvm.arm.mcrr" => "__builtin_arm_mcrr",
+    "llvm.arm.mcrr2" => "__builtin_arm_mcrr2",
+    "llvm.arm.mrc" => "__builtin_arm_mrc",
+    "llvm.arm.mrc2" => "__builtin_arm_mrc2",
+    "llvm.arm.qadd" => "__builtin_arm_qadd",
+    "llvm.arm.qadd16" => "__builtin_arm_qadd16",
+    "llvm.arm.qadd8" => "__builtin_arm_qadd8",
+    "llvm.arm.qasx" => "__builtin_arm_qasx",
+    "llvm.arm.qsax" => "__builtin_arm_qsax",
+    "llvm.arm.qsub" => "__builtin_arm_qsub",
+    "llvm.arm.qsub16" => "__builtin_arm_qsub16",
+    "llvm.arm.qsub8" => "__builtin_arm_qsub8",
+    "llvm.arm.sadd16" => "__builtin_arm_sadd16",
+    "llvm.arm.sadd8" => "__builtin_arm_sadd8",
+    "llvm.arm.sasx" => "__builtin_arm_sasx",
+    "llvm.arm.sel" => "__builtin_arm_sel",
+    "llvm.arm.set.fpscr" => "__builtin_arm_set_fpscr",
+    "llvm.arm.shadd16" => "__builtin_arm_shadd16",
+    "llvm.arm.shadd8" => "__builtin_arm_shadd8",
+    "llvm.arm.shasx" => "__builtin_arm_shasx",
+    "llvm.arm.shsax" => "__builtin_arm_shsax",
+    "llvm.arm.shsub16" => "__builtin_arm_shsub16",
+    "llvm.arm.shsub8" => "__builtin_arm_shsub8",
+    "llvm.arm.smlabb" => "__builtin_arm_smlabb",
+    "llvm.arm.smlabt" => "__builtin_arm_smlabt",
+    "llvm.arm.smlad" => "__builtin_arm_smlad",
+    "llvm.arm.smladx" => "__builtin_arm_smladx",
+    "llvm.arm.smlald" => "__builtin_arm_smlald",
+    "llvm.arm.smlaldx" => "__builtin_arm_smlaldx",
+    "llvm.arm.smlatb" => "__builtin_arm_smlatb",
+    "llvm.arm.smlatt" => "__builtin_arm_smlatt",
+    "llvm.arm.smlawb" => "__builtin_arm_smlawb",
+    "llvm.arm.smlawt" => "__builtin_arm_smlawt",
+    "llvm.arm.smlsd" => "__builtin_arm_smlsd",
+    "llvm.arm.smlsdx" => "__builtin_arm_smlsdx",
+    "llvm.arm.smlsld" => "__builtin_arm_smlsld",
+    "llvm.arm.smlsldx" => "__builtin_arm_smlsldx",
+    "llvm.arm.smuad" => "__builtin_arm_smuad",
+    "llvm.arm.smuadx" => "__builtin_arm_smuadx",
+    "llvm.arm.smulbb" => "__builtin_arm_smulbb",
+    "llvm.arm.smulbt" => "__builtin_arm_smulbt",
+    "llvm.arm.smultb" => "__builtin_arm_smultb",
+    "llvm.arm.smultt" => "__builtin_arm_smultt",
+    "llvm.arm.smulwb" => "__builtin_arm_smulwb",
+    "llvm.arm.smulwt" => "__builtin_arm_smulwt",
+    "llvm.arm.smusd" => "__builtin_arm_smusd",
+    "llvm.arm.smusdx" => "__builtin_arm_smusdx",
+    "llvm.arm.ssat" => "__builtin_arm_ssat",
+    "llvm.arm.ssat16" => "__builtin_arm_ssat16",
+    "llvm.arm.ssax" => "__builtin_arm_ssax",
+    "llvm.arm.ssub16" => "__builtin_arm_ssub16",
+    "llvm.arm.ssub8" => "__builtin_arm_ssub8",
+    "llvm.arm.stc" => "__builtin_arm_stc",
+    "llvm.arm.stc2" => "__builtin_arm_stc2",
+    "llvm.arm.stc2l" => "__builtin_arm_stc2l",
+    "llvm.arm.stcl" => "__builtin_arm_stcl",
+    "llvm.arm.sxtab16" => "__builtin_arm_sxtab16",
+    "llvm.arm.sxtb16" => "__builtin_arm_sxtb16",
+    "llvm.arm.thread.pointer" => "__builtin_thread_pointer",
+    "llvm.arm.uadd16" => "__builtin_arm_uadd16",
+    "llvm.arm.uadd8" => "__builtin_arm_uadd8",
+    "llvm.arm.uasx" => "__builtin_arm_uasx",
+    "llvm.arm.uhadd16" => "__builtin_arm_uhadd16",
+    "llvm.arm.uhadd8" => "__builtin_arm_uhadd8",
+    "llvm.arm.uhasx" => "__builtin_arm_uhasx",
+    "llvm.arm.uhsax" => "__builtin_arm_uhsax",
+    "llvm.arm.uhsub16" => "__builtin_arm_uhsub16",
+    "llvm.arm.uhsub8" => "__builtin_arm_uhsub8",
+    "llvm.arm.uqadd16" => "__builtin_arm_uqadd16",
+    "llvm.arm.uqadd8" => "__builtin_arm_uqadd8",
+    "llvm.arm.uqasx" => "__builtin_arm_uqasx",
+    "llvm.arm.uqsax" => "__builtin_arm_uqsax",
+    "llvm.arm.uqsub16" => "__builtin_arm_uqsub16",
+    "llvm.arm.uqsub8" => "__builtin_arm_uqsub8",
+    "llvm.arm.usad8" => "__builtin_arm_usad8",
+    "llvm.arm.usada8" => "__builtin_arm_usada8",
+    "llvm.arm.usat" => "__builtin_arm_usat",
+    "llvm.arm.usat16" => "__builtin_arm_usat16",
+    "llvm.arm.usax" => "__builtin_arm_usax",
+    "llvm.arm.usub16" => "__builtin_arm_usub16",
+    "llvm.arm.usub8" => "__builtin_arm_usub8",
+    "llvm.arm.uxtab16" => "__builtin_arm_uxtab16",
+    "llvm.arm.uxtb16" => "__builtin_arm_uxtb16",
+    // bpf
+    "llvm.bpf.btf.type.id" => "__builtin_bpf_btf_type_id",
+    "llvm.bpf.compare" => "__builtin_bpf_compare",
+    "llvm.bpf.getelementptr.and.load" => "__builtin_bpf_getelementptr_and_load",
+    "llvm.bpf.getelementptr.and.store" => "__builtin_bpf_getelementptr_and_store",
+    "llvm.bpf.load.byte" => "__builtin_bpf_load_byte",
+    "llvm.bpf.load.half" => "__builtin_bpf_load_half",
+    "llvm.bpf.load.word" => "__builtin_bpf_load_word",
+    "llvm.bpf.passthrough" => "__builtin_bpf_passthrough",
+    "llvm.bpf.preserve.enum.value" => "__builtin_bpf_preserve_enum_value",
+    "llvm.bpf.preserve.field.info" => "__builtin_bpf_preserve_field_info",
+    "llvm.bpf.preserve.type.info" => "__builtin_bpf_preserve_type_info",
+    "llvm.bpf.pseudo" => "__builtin_bpf_pseudo",
+    // cuda
+    "llvm.cuda.syncthreads" => "__syncthreads",
+    // dx
+    "llvm.dx.create.handle" => "__builtin_hlsl_create_handle",
+    // hexagon
+    "llvm.hexagon.A2.abs" => "__builtin_HEXAGON_A2_abs",
+    "llvm.hexagon.A2.absp" => "__builtin_HEXAGON_A2_absp",
+    "llvm.hexagon.A2.abssat" => "__builtin_HEXAGON_A2_abssat",
+    "llvm.hexagon.A2.add" => "__builtin_HEXAGON_A2_add",
+    "llvm.hexagon.A2.addh.h16.hh" => "__builtin_HEXAGON_A2_addh_h16_hh",
+    "llvm.hexagon.A2.addh.h16.hl" => "__builtin_HEXAGON_A2_addh_h16_hl",
+    "llvm.hexagon.A2.addh.h16.lh" => "__builtin_HEXAGON_A2_addh_h16_lh",
+    "llvm.hexagon.A2.addh.h16.ll" => "__builtin_HEXAGON_A2_addh_h16_ll",
+    "llvm.hexagon.A2.addh.h16.sat.hh" => "__builtin_HEXAGON_A2_addh_h16_sat_hh",
+    "llvm.hexagon.A2.addh.h16.sat.hl" => "__builtin_HEXAGON_A2_addh_h16_sat_hl",
+    "llvm.hexagon.A2.addh.h16.sat.lh" => "__builtin_HEXAGON_A2_addh_h16_sat_lh",
+    "llvm.hexagon.A2.addh.h16.sat.ll" => "__builtin_HEXAGON_A2_addh_h16_sat_ll",
+    "llvm.hexagon.A2.addh.l16.hl" => "__builtin_HEXAGON_A2_addh_l16_hl",
+    "llvm.hexagon.A2.addh.l16.ll" => "__builtin_HEXAGON_A2_addh_l16_ll",
+    "llvm.hexagon.A2.addh.l16.sat.hl" => "__builtin_HEXAGON_A2_addh_l16_sat_hl",
+    "llvm.hexagon.A2.addh.l16.sat.ll" => "__builtin_HEXAGON_A2_addh_l16_sat_ll",
+    "llvm.hexagon.A2.addi" => "__builtin_HEXAGON_A2_addi",
+    "llvm.hexagon.A2.addp" => "__builtin_HEXAGON_A2_addp",
+    "llvm.hexagon.A2.addpsat" => "__builtin_HEXAGON_A2_addpsat",
+    "llvm.hexagon.A2.addsat" => "__builtin_HEXAGON_A2_addsat",
+    "llvm.hexagon.A2.addsp" => "__builtin_HEXAGON_A2_addsp",
+    "llvm.hexagon.A2.and" => "__builtin_HEXAGON_A2_and",
+    "llvm.hexagon.A2.andir" => "__builtin_HEXAGON_A2_andir",
+    "llvm.hexagon.A2.andp" => "__builtin_HEXAGON_A2_andp",
+    "llvm.hexagon.A2.aslh" => "__builtin_HEXAGON_A2_aslh",
+    "llvm.hexagon.A2.asrh" => "__builtin_HEXAGON_A2_asrh",
+    "llvm.hexagon.A2.combine.hh" => "__builtin_HEXAGON_A2_combine_hh",
+    "llvm.hexagon.A2.combine.hl" => "__builtin_HEXAGON_A2_combine_hl",
+    "llvm.hexagon.A2.combine.lh" => "__builtin_HEXAGON_A2_combine_lh",
+    "llvm.hexagon.A2.combine.ll" => "__builtin_HEXAGON_A2_combine_ll",
+    "llvm.hexagon.A2.combineii" => "__builtin_HEXAGON_A2_combineii",
+    "llvm.hexagon.A2.combinew" => "__builtin_HEXAGON_A2_combinew",
+    "llvm.hexagon.A2.max" => "__builtin_HEXAGON_A2_max",
+    "llvm.hexagon.A2.maxp" => "__builtin_HEXAGON_A2_maxp",
+    "llvm.hexagon.A2.maxu" => "__builtin_HEXAGON_A2_maxu",
+    "llvm.hexagon.A2.maxup" => "__builtin_HEXAGON_A2_maxup",
+    "llvm.hexagon.A2.min" => "__builtin_HEXAGON_A2_min",
+    "llvm.hexagon.A2.minp" => "__builtin_HEXAGON_A2_minp",
+    "llvm.hexagon.A2.minu" => "__builtin_HEXAGON_A2_minu",
+    "llvm.hexagon.A2.minup" => "__builtin_HEXAGON_A2_minup",
+    "llvm.hexagon.A2.neg" => "__builtin_HEXAGON_A2_neg",
+    "llvm.hexagon.A2.negp" => "__builtin_HEXAGON_A2_negp",
+    "llvm.hexagon.A2.negsat" => "__builtin_HEXAGON_A2_negsat",
+    "llvm.hexagon.A2.not" => "__builtin_HEXAGON_A2_not",
+    "llvm.hexagon.A2.notp" => "__builtin_HEXAGON_A2_notp",
+    "llvm.hexagon.A2.or" => "__builtin_HEXAGON_A2_or",
+    "llvm.hexagon.A2.orir" => "__builtin_HEXAGON_A2_orir",
+    "llvm.hexagon.A2.orp" => "__builtin_HEXAGON_A2_orp",
+    "llvm.hexagon.A2.roundsat" => "__builtin_HEXAGON_A2_roundsat",
+    "llvm.hexagon.A2.sat" => "__builtin_HEXAGON_A2_sat",
+    "llvm.hexagon.A2.satb" => "__builtin_HEXAGON_A2_satb",
+    "llvm.hexagon.A2.sath" => "__builtin_HEXAGON_A2_sath",
+    "llvm.hexagon.A2.satub" => "__builtin_HEXAGON_A2_satub",
+    "llvm.hexagon.A2.satuh" => "__builtin_HEXAGON_A2_satuh",
+    "llvm.hexagon.A2.sub" => "__builtin_HEXAGON_A2_sub",
+    "llvm.hexagon.A2.subh.h16.hh" => "__builtin_HEXAGON_A2_subh_h16_hh",
+    "llvm.hexagon.A2.subh.h16.hl" => "__builtin_HEXAGON_A2_subh_h16_hl",
+    "llvm.hexagon.A2.subh.h16.lh" => "__builtin_HEXAGON_A2_subh_h16_lh",
+    "llvm.hexagon.A2.subh.h16.ll" => "__builtin_HEXAGON_A2_subh_h16_ll",
+    "llvm.hexagon.A2.subh.h16.sat.hh" => "__builtin_HEXAGON_A2_subh_h16_sat_hh",
+    "llvm.hexagon.A2.subh.h16.sat.hl" => "__builtin_HEXAGON_A2_subh_h16_sat_hl",
+    "llvm.hexagon.A2.subh.h16.sat.lh" => "__builtin_HEXAGON_A2_subh_h16_sat_lh",
+    "llvm.hexagon.A2.subh.h16.sat.ll" => "__builtin_HEXAGON_A2_subh_h16_sat_ll",
+    "llvm.hexagon.A2.subh.l16.hl" => "__builtin_HEXAGON_A2_subh_l16_hl",
+    "llvm.hexagon.A2.subh.l16.ll" => "__builtin_HEXAGON_A2_subh_l16_ll",
+    "llvm.hexagon.A2.subh.l16.sat.hl" => "__builtin_HEXAGON_A2_subh_l16_sat_hl",
+    "llvm.hexagon.A2.subh.l16.sat.ll" => "__builtin_HEXAGON_A2_subh_l16_sat_ll",
+    "llvm.hexagon.A2.subp" => "__builtin_HEXAGON_A2_subp",
+    "llvm.hexagon.A2.subri" => "__builtin_HEXAGON_A2_subri",
+    "llvm.hexagon.A2.subsat" => "__builtin_HEXAGON_A2_subsat",
+    "llvm.hexagon.A2.svaddh" => "__builtin_HEXAGON_A2_svaddh",
+    "llvm.hexagon.A2.svaddhs" => "__builtin_HEXAGON_A2_svaddhs",
+    "llvm.hexagon.A2.svadduhs" => "__builtin_HEXAGON_A2_svadduhs",
+    "llvm.hexagon.A2.svavgh" => "__builtin_HEXAGON_A2_svavgh",
+    "llvm.hexagon.A2.svavghs" => "__builtin_HEXAGON_A2_svavghs",
+    "llvm.hexagon.A2.svnavgh" => "__builtin_HEXAGON_A2_svnavgh",
+    "llvm.hexagon.A2.svsubh" => "__builtin_HEXAGON_A2_svsubh",
+    "llvm.hexagon.A2.svsubhs" => "__builtin_HEXAGON_A2_svsubhs",
+    "llvm.hexagon.A2.svsubuhs" => "__builtin_HEXAGON_A2_svsubuhs",
+    "llvm.hexagon.A2.swiz" => "__builtin_HEXAGON_A2_swiz",
+    "llvm.hexagon.A2.sxtb" => "__builtin_HEXAGON_A2_sxtb",
+    "llvm.hexagon.A2.sxth" => "__builtin_HEXAGON_A2_sxth",
+    "llvm.hexagon.A2.sxtw" => "__builtin_HEXAGON_A2_sxtw",
+    "llvm.hexagon.A2.tfr" => "__builtin_HEXAGON_A2_tfr",
+    "llvm.hexagon.A2.tfrih" => "__builtin_HEXAGON_A2_tfrih",
+    "llvm.hexagon.A2.tfril" => "__builtin_HEXAGON_A2_tfril",
+    "llvm.hexagon.A2.tfrp" => "__builtin_HEXAGON_A2_tfrp",
+    "llvm.hexagon.A2.tfrpi" => "__builtin_HEXAGON_A2_tfrpi",
+    "llvm.hexagon.A2.tfrsi" => "__builtin_HEXAGON_A2_tfrsi",
+    "llvm.hexagon.A2.vabsh" => "__builtin_HEXAGON_A2_vabsh",
+    "llvm.hexagon.A2.vabshsat" => "__builtin_HEXAGON_A2_vabshsat",
+    "llvm.hexagon.A2.vabsw" => "__builtin_HEXAGON_A2_vabsw",
+    "llvm.hexagon.A2.vabswsat" => "__builtin_HEXAGON_A2_vabswsat",
+    "llvm.hexagon.A2.vaddb.map" => "__builtin_HEXAGON_A2_vaddb_map",
+    "llvm.hexagon.A2.vaddh" => "__builtin_HEXAGON_A2_vaddh",
+    "llvm.hexagon.A2.vaddhs" => "__builtin_HEXAGON_A2_vaddhs",
+    "llvm.hexagon.A2.vaddub" => "__builtin_HEXAGON_A2_vaddub",
+    "llvm.hexagon.A2.vaddubs" => "__builtin_HEXAGON_A2_vaddubs",
+    "llvm.hexagon.A2.vadduhs" => "__builtin_HEXAGON_A2_vadduhs",
+    "llvm.hexagon.A2.vaddw" => "__builtin_HEXAGON_A2_vaddw",
+    "llvm.hexagon.A2.vaddws" => "__builtin_HEXAGON_A2_vaddws",
+    "llvm.hexagon.A2.vavgh" => "__builtin_HEXAGON_A2_vavgh",
+    "llvm.hexagon.A2.vavghcr" => "__builtin_HEXAGON_A2_vavghcr",
+    "llvm.hexagon.A2.vavghr" => "__builtin_HEXAGON_A2_vavghr",
+    "llvm.hexagon.A2.vavgub" => "__builtin_HEXAGON_A2_vavgub",
+    "llvm.hexagon.A2.vavgubr" => "__builtin_HEXAGON_A2_vavgubr",
+    "llvm.hexagon.A2.vavguh" => "__builtin_HEXAGON_A2_vavguh",
+    "llvm.hexagon.A2.vavguhr" => "__builtin_HEXAGON_A2_vavguhr",
+    "llvm.hexagon.A2.vavguw" => "__builtin_HEXAGON_A2_vavguw",
+    "llvm.hexagon.A2.vavguwr" => "__builtin_HEXAGON_A2_vavguwr",
+    "llvm.hexagon.A2.vavgw" => "__builtin_HEXAGON_A2_vavgw",
+    "llvm.hexagon.A2.vavgwcr" => "__builtin_HEXAGON_A2_vavgwcr",
+    "llvm.hexagon.A2.vavgwr" => "__builtin_HEXAGON_A2_vavgwr",
+    "llvm.hexagon.A2.vcmpbeq" => "__builtin_HEXAGON_A2_vcmpbeq",
+    "llvm.hexagon.A2.vcmpbgtu" => "__builtin_HEXAGON_A2_vcmpbgtu",
+    "llvm.hexagon.A2.vcmpheq" => "__builtin_HEXAGON_A2_vcmpheq",
+    "llvm.hexagon.A2.vcmphgt" => "__builtin_HEXAGON_A2_vcmphgt",
+    "llvm.hexagon.A2.vcmphgtu" => "__builtin_HEXAGON_A2_vcmphgtu",
+    "llvm.hexagon.A2.vcmpweq" => "__builtin_HEXAGON_A2_vcmpweq",
+    "llvm.hexagon.A2.vcmpwgt" => "__builtin_HEXAGON_A2_vcmpwgt",
+    "llvm.hexagon.A2.vcmpwgtu" => "__builtin_HEXAGON_A2_vcmpwgtu",
+    "llvm.hexagon.A2.vconj" => "__builtin_HEXAGON_A2_vconj",
+    "llvm.hexagon.A2.vmaxb" => "__builtin_HEXAGON_A2_vmaxb",
+    "llvm.hexagon.A2.vmaxh" => "__builtin_HEXAGON_A2_vmaxh",
+    "llvm.hexagon.A2.vmaxub" => "__builtin_HEXAGON_A2_vmaxub",
+    "llvm.hexagon.A2.vmaxuh" => "__builtin_HEXAGON_A2_vmaxuh",
+    "llvm.hexagon.A2.vmaxuw" => "__builtin_HEXAGON_A2_vmaxuw",
+    "llvm.hexagon.A2.vmaxw" => "__builtin_HEXAGON_A2_vmaxw",
+    "llvm.hexagon.A2.vminb" => "__builtin_HEXAGON_A2_vminb",
+    "llvm.hexagon.A2.vminh" => "__builtin_HEXAGON_A2_vminh",
+    "llvm.hexagon.A2.vminub" => "__builtin_HEXAGON_A2_vminub",
+    "llvm.hexagon.A2.vminuh" => "__builtin_HEXAGON_A2_vminuh",
+    "llvm.hexagon.A2.vminuw" => "__builtin_HEXAGON_A2_vminuw",
+    "llvm.hexagon.A2.vminw" => "__builtin_HEXAGON_A2_vminw",
+    "llvm.hexagon.A2.vnavgh" => "__builtin_HEXAGON_A2_vnavgh",
+    "llvm.hexagon.A2.vnavghcr" => "__builtin_HEXAGON_A2_vnavghcr",
+    "llvm.hexagon.A2.vnavghr" => "__builtin_HEXAGON_A2_vnavghr",
+    "llvm.hexagon.A2.vnavgw" => "__builtin_HEXAGON_A2_vnavgw",
+    "llvm.hexagon.A2.vnavgwcr" => "__builtin_HEXAGON_A2_vnavgwcr",
+    "llvm.hexagon.A2.vnavgwr" => "__builtin_HEXAGON_A2_vnavgwr",
+    "llvm.hexagon.A2.vraddub" => "__builtin_HEXAGON_A2_vraddub",
+    "llvm.hexagon.A2.vraddub.acc" => "__builtin_HEXAGON_A2_vraddub_acc",
+    "llvm.hexagon.A2.vrsadub" => "__builtin_HEXAGON_A2_vrsadub",
+    "llvm.hexagon.A2.vrsadub.acc" => "__builtin_HEXAGON_A2_vrsadub_acc",
+    "llvm.hexagon.A2.vsubb.map" => "__builtin_HEXAGON_A2_vsubb_map",
+    "llvm.hexagon.A2.vsubh" => "__builtin_HEXAGON_A2_vsubh",
+    "llvm.hexagon.A2.vsubhs" => "__builtin_HEXAGON_A2_vsubhs",
+    "llvm.hexagon.A2.vsubub" => "__builtin_HEXAGON_A2_vsubub",
+    "llvm.hexagon.A2.vsububs" => "__builtin_HEXAGON_A2_vsububs",
+    "llvm.hexagon.A2.vsubuhs" => "__builtin_HEXAGON_A2_vsubuhs",
+    "llvm.hexagon.A2.vsubw" => "__builtin_HEXAGON_A2_vsubw",
+    "llvm.hexagon.A2.vsubws" => "__builtin_HEXAGON_A2_vsubws",
+    "llvm.hexagon.A2.xor" => "__builtin_HEXAGON_A2_xor",
+    "llvm.hexagon.A2.xorp" => "__builtin_HEXAGON_A2_xorp",
+    "llvm.hexagon.A2.zxtb" => "__builtin_HEXAGON_A2_zxtb",
+    "llvm.hexagon.A2.zxth" => "__builtin_HEXAGON_A2_zxth",
+    "llvm.hexagon.A4.andn" => "__builtin_HEXAGON_A4_andn",
+    "llvm.hexagon.A4.andnp" => "__builtin_HEXAGON_A4_andnp",
+    "llvm.hexagon.A4.bitsplit" => "__builtin_HEXAGON_A4_bitsplit",
+    "llvm.hexagon.A4.bitspliti" => "__builtin_HEXAGON_A4_bitspliti",
+    "llvm.hexagon.A4.boundscheck" => "__builtin_HEXAGON_A4_boundscheck",
+    "llvm.hexagon.A4.cmpbeq" => "__builtin_HEXAGON_A4_cmpbeq",
+    "llvm.hexagon.A4.cmpbeqi" => "__builtin_HEXAGON_A4_cmpbeqi",
+    "llvm.hexagon.A4.cmpbgt" => "__builtin_HEXAGON_A4_cmpbgt",
+    "llvm.hexagon.A4.cmpbgti" => "__builtin_HEXAGON_A4_cmpbgti",
+    "llvm.hexagon.A4.cmpbgtu" => "__builtin_HEXAGON_A4_cmpbgtu",
+    "llvm.hexagon.A4.cmpbgtui" => "__builtin_HEXAGON_A4_cmpbgtui",
+    "llvm.hexagon.A4.cmpheq" => "__builtin_HEXAGON_A4_cmpheq",
+    "llvm.hexagon.A4.cmpheqi" => "__builtin_HEXAGON_A4_cmpheqi",
+    "llvm.hexagon.A4.cmphgt" => "__builtin_HEXAGON_A4_cmphgt",
+    "llvm.hexagon.A4.cmphgti" => "__builtin_HEXAGON_A4_cmphgti",
+    "llvm.hexagon.A4.cmphgtu" => "__builtin_HEXAGON_A4_cmphgtu",
+    "llvm.hexagon.A4.cmphgtui" => "__builtin_HEXAGON_A4_cmphgtui",
+    "llvm.hexagon.A4.combineir" => "__builtin_HEXAGON_A4_combineir",
+    "llvm.hexagon.A4.combineri" => "__builtin_HEXAGON_A4_combineri",
+    "llvm.hexagon.A4.cround.ri" => "__builtin_HEXAGON_A4_cround_ri",
+    "llvm.hexagon.A4.cround.rr" => "__builtin_HEXAGON_A4_cround_rr",
+    "llvm.hexagon.A4.modwrapu" => "__builtin_HEXAGON_A4_modwrapu",
+    "llvm.hexagon.A4.orn" => "__builtin_HEXAGON_A4_orn",
+    "llvm.hexagon.A4.ornp" => "__builtin_HEXAGON_A4_ornp",
+    "llvm.hexagon.A4.rcmpeq" => "__builtin_HEXAGON_A4_rcmpeq",
+    "llvm.hexagon.A4.rcmpeqi" => "__builtin_HEXAGON_A4_rcmpeqi",
+    "llvm.hexagon.A4.rcmpneq" => "__builtin_HEXAGON_A4_rcmpneq",
+    "llvm.hexagon.A4.rcmpneqi" => "__builtin_HEXAGON_A4_rcmpneqi",
+    "llvm.hexagon.A4.round.ri" => "__builtin_HEXAGON_A4_round_ri",
+    "llvm.hexagon.A4.round.ri.sat" => "__builtin_HEXAGON_A4_round_ri_sat",
+    "llvm.hexagon.A4.round.rr" => "__builtin_HEXAGON_A4_round_rr",
+    "llvm.hexagon.A4.round.rr.sat" => "__builtin_HEXAGON_A4_round_rr_sat",
+    "llvm.hexagon.A4.tlbmatch" => "__builtin_HEXAGON_A4_tlbmatch",
+    "llvm.hexagon.A4.vcmpbeq.any" => "__builtin_HEXAGON_A4_vcmpbeq_any",
+    "llvm.hexagon.A4.vcmpbeqi" => "__builtin_HEXAGON_A4_vcmpbeqi",
+    "llvm.hexagon.A4.vcmpbgt" => "__builtin_HEXAGON_A4_vcmpbgt",
+    "llvm.hexagon.A4.vcmpbgti" => "__builtin_HEXAGON_A4_vcmpbgti",
+    "llvm.hexagon.A4.vcmpbgtui" => "__builtin_HEXAGON_A4_vcmpbgtui",
+    "llvm.hexagon.A4.vcmpheqi" => "__builtin_HEXAGON_A4_vcmpheqi",
+    "llvm.hexagon.A4.vcmphgti" => "__builtin_HEXAGON_A4_vcmphgti",
+    "llvm.hexagon.A4.vcmphgtui" => "__builtin_HEXAGON_A4_vcmphgtui",
+    "llvm.hexagon.A4.vcmpweqi" => "__builtin_HEXAGON_A4_vcmpweqi",
+    "llvm.hexagon.A4.vcmpwgti" => "__builtin_HEXAGON_A4_vcmpwgti",
+    "llvm.hexagon.A4.vcmpwgtui" => "__builtin_HEXAGON_A4_vcmpwgtui",
+    "llvm.hexagon.A4.vrmaxh" => "__builtin_HEXAGON_A4_vrmaxh",
+    "llvm.hexagon.A4.vrmaxuh" => "__builtin_HEXAGON_A4_vrmaxuh",
+    "llvm.hexagon.A4.vrmaxuw" => "__builtin_HEXAGON_A4_vrmaxuw",
+    "llvm.hexagon.A4.vrmaxw" => "__builtin_HEXAGON_A4_vrmaxw",
+    "llvm.hexagon.A4.vrminh" => "__builtin_HEXAGON_A4_vrminh",
+    "llvm.hexagon.A4.vrminuh" => "__builtin_HEXAGON_A4_vrminuh",
+    "llvm.hexagon.A4.vrminuw" => "__builtin_HEXAGON_A4_vrminuw",
+    "llvm.hexagon.A4.vrminw" => "__builtin_HEXAGON_A4_vrminw",
+    "llvm.hexagon.A5.vaddhubs" => "__builtin_HEXAGON_A5_vaddhubs",
+    "llvm.hexagon.A6.vcmpbeq.notany" => "__builtin_HEXAGON_A6_vcmpbeq_notany",
+    "llvm.hexagon.A7.clip" => "__builtin_HEXAGON_A7_clip",
+    "llvm.hexagon.A7.croundd.ri" => "__builtin_HEXAGON_A7_croundd_ri",
+    "llvm.hexagon.A7.croundd.rr" => "__builtin_HEXAGON_A7_croundd_rr",
+    "llvm.hexagon.A7.vclip" => "__builtin_HEXAGON_A7_vclip",
+    "llvm.hexagon.C2.all8" => "__builtin_HEXAGON_C2_all8",
+    "llvm.hexagon.C2.and" => "__builtin_HEXAGON_C2_and",
+    "llvm.hexagon.C2.andn" => "__builtin_HEXAGON_C2_andn",
+    "llvm.hexagon.C2.any8" => "__builtin_HEXAGON_C2_any8",
+    "llvm.hexagon.C2.bitsclr" => "__builtin_HEXAGON_C2_bitsclr",
+    "llvm.hexagon.C2.bitsclri" => "__builtin_HEXAGON_C2_bitsclri",
+    "llvm.hexagon.C2.bitsset" => "__builtin_HEXAGON_C2_bitsset",
+    "llvm.hexagon.C2.cmpeq" => "__builtin_HEXAGON_C2_cmpeq",
+    "llvm.hexagon.C2.cmpeqi" => "__builtin_HEXAGON_C2_cmpeqi",
+    "llvm.hexagon.C2.cmpeqp" => "__builtin_HEXAGON_C2_cmpeqp",
+    "llvm.hexagon.C2.cmpgei" => "__builtin_HEXAGON_C2_cmpgei",
+    "llvm.hexagon.C2.cmpgeui" => "__builtin_HEXAGON_C2_cmpgeui",
+    "llvm.hexagon.C2.cmpgt" => "__builtin_HEXAGON_C2_cmpgt",
+    "llvm.hexagon.C2.cmpgti" => "__builtin_HEXAGON_C2_cmpgti",
+    "llvm.hexagon.C2.cmpgtp" => "__builtin_HEXAGON_C2_cmpgtp",
+    "llvm.hexagon.C2.cmpgtu" => "__builtin_HEXAGON_C2_cmpgtu",
+    "llvm.hexagon.C2.cmpgtui" => "__builtin_HEXAGON_C2_cmpgtui",
+    "llvm.hexagon.C2.cmpgtup" => "__builtin_HEXAGON_C2_cmpgtup",
+    "llvm.hexagon.C2.cmplt" => "__builtin_HEXAGON_C2_cmplt",
+    "llvm.hexagon.C2.cmpltu" => "__builtin_HEXAGON_C2_cmpltu",
+    "llvm.hexagon.C2.mask" => "__builtin_HEXAGON_C2_mask",
+    "llvm.hexagon.C2.mux" => "__builtin_HEXAGON_C2_mux",
+    "llvm.hexagon.C2.muxii" => "__builtin_HEXAGON_C2_muxii",
+    "llvm.hexagon.C2.muxir" => "__builtin_HEXAGON_C2_muxir",
+    "llvm.hexagon.C2.muxri" => "__builtin_HEXAGON_C2_muxri",
+    "llvm.hexagon.C2.not" => "__builtin_HEXAGON_C2_not",
+    "llvm.hexagon.C2.or" => "__builtin_HEXAGON_C2_or",
+    "llvm.hexagon.C2.orn" => "__builtin_HEXAGON_C2_orn",
+    "llvm.hexagon.C2.pxfer.map" => "__builtin_HEXAGON_C2_pxfer_map",
+    "llvm.hexagon.C2.tfrpr" => "__builtin_HEXAGON_C2_tfrpr",
+    "llvm.hexagon.C2.tfrrp" => "__builtin_HEXAGON_C2_tfrrp",
+    "llvm.hexagon.C2.vitpack" => "__builtin_HEXAGON_C2_vitpack",
+    "llvm.hexagon.C2.vmux" => "__builtin_HEXAGON_C2_vmux",
+    "llvm.hexagon.C2.xor" => "__builtin_HEXAGON_C2_xor",
+    "llvm.hexagon.C4.and.and" => "__builtin_HEXAGON_C4_and_and",
+    "llvm.hexagon.C4.and.andn" => "__builtin_HEXAGON_C4_and_andn",
+    "llvm.hexagon.C4.and.or" => "__builtin_HEXAGON_C4_and_or",
+    "llvm.hexagon.C4.and.orn" => "__builtin_HEXAGON_C4_and_orn",
+    "llvm.hexagon.C4.cmplte" => "__builtin_HEXAGON_C4_cmplte",
+    "llvm.hexagon.C4.cmpltei" => "__builtin_HEXAGON_C4_cmpltei",
+    "llvm.hexagon.C4.cmplteu" => "__builtin_HEXAGON_C4_cmplteu",
+    "llvm.hexagon.C4.cmplteui" => "__builtin_HEXAGON_C4_cmplteui",
+    "llvm.hexagon.C4.cmpneq" => "__builtin_HEXAGON_C4_cmpneq",
+    "llvm.hexagon.C4.cmpneqi" => "__builtin_HEXAGON_C4_cmpneqi",
+    "llvm.hexagon.C4.fastcorner9" => "__builtin_HEXAGON_C4_fastcorner9",
+    "llvm.hexagon.C4.fastcorner9.not" => "__builtin_HEXAGON_C4_fastcorner9_not",
+    "llvm.hexagon.C4.nbitsclr" => "__builtin_HEXAGON_C4_nbitsclr",
+    "llvm.hexagon.C4.nbitsclri" => "__builtin_HEXAGON_C4_nbitsclri",
+    "llvm.hexagon.C4.nbitsset" => "__builtin_HEXAGON_C4_nbitsset",
+    "llvm.hexagon.C4.or.and" => "__builtin_HEXAGON_C4_or_and",
+    "llvm.hexagon.C4.or.andn" => "__builtin_HEXAGON_C4_or_andn",
+    "llvm.hexagon.C4.or.or" => "__builtin_HEXAGON_C4_or_or",
+    "llvm.hexagon.C4.or.orn" => "__builtin_HEXAGON_C4_or_orn",
+    "llvm.hexagon.F2.conv.d2df" => "__builtin_HEXAGON_F2_conv_d2df",
+    "llvm.hexagon.F2.conv.d2sf" => "__builtin_HEXAGON_F2_conv_d2sf",
+    "llvm.hexagon.F2.conv.df2d" => "__builtin_HEXAGON_F2_conv_df2d",
+    "llvm.hexagon.F2.conv.df2d.chop" => "__builtin_HEXAGON_F2_conv_df2d_chop",
+    "llvm.hexagon.F2.conv.df2sf" => "__builtin_HEXAGON_F2_conv_df2sf",
+    "llvm.hexagon.F2.conv.df2ud" => "__builtin_HEXAGON_F2_conv_df2ud",
+    "llvm.hexagon.F2.conv.df2ud.chop" => "__builtin_HEXAGON_F2_conv_df2ud_chop",
+    "llvm.hexagon.F2.conv.df2uw" => "__builtin_HEXAGON_F2_conv_df2uw",
+    "llvm.hexagon.F2.conv.df2uw.chop" => "__builtin_HEXAGON_F2_conv_df2uw_chop",
+    "llvm.hexagon.F2.conv.df2w" => "__builtin_HEXAGON_F2_conv_df2w",
+    "llvm.hexagon.F2.conv.df2w.chop" => "__builtin_HEXAGON_F2_conv_df2w_chop",
+    "llvm.hexagon.F2.conv.sf2d" => "__builtin_HEXAGON_F2_conv_sf2d",
+    "llvm.hexagon.F2.conv.sf2d.chop" => "__builtin_HEXAGON_F2_conv_sf2d_chop",
+    "llvm.hexagon.F2.conv.sf2df" => "__builtin_HEXAGON_F2_conv_sf2df",
+    "llvm.hexagon.F2.conv.sf2ud" => "__builtin_HEXAGON_F2_conv_sf2ud",
+    "llvm.hexagon.F2.conv.sf2ud.chop" => "__builtin_HEXAGON_F2_conv_sf2ud_chop",
+    "llvm.hexagon.F2.conv.sf2uw" => "__builtin_HEXAGON_F2_conv_sf2uw",
+    "llvm.hexagon.F2.conv.sf2uw.chop" => "__builtin_HEXAGON_F2_conv_sf2uw_chop",
+    "llvm.hexagon.F2.conv.sf2w" => "__builtin_HEXAGON_F2_conv_sf2w",
+    "llvm.hexagon.F2.conv.sf2w.chop" => "__builtin_HEXAGON_F2_conv_sf2w_chop",
+    "llvm.hexagon.F2.conv.ud2df" => "__builtin_HEXAGON_F2_conv_ud2df",
+    "llvm.hexagon.F2.conv.ud2sf" => "__builtin_HEXAGON_F2_conv_ud2sf",
+    "llvm.hexagon.F2.conv.uw2df" => "__builtin_HEXAGON_F2_conv_uw2df",
+    "llvm.hexagon.F2.conv.uw2sf" => "__builtin_HEXAGON_F2_conv_uw2sf",
+    "llvm.hexagon.F2.conv.w2df" => "__builtin_HEXAGON_F2_conv_w2df",
+    "llvm.hexagon.F2.conv.w2sf" => "__builtin_HEXAGON_F2_conv_w2sf",
+    "llvm.hexagon.F2.dfadd" => "__builtin_HEXAGON_F2_dfadd",
+    "llvm.hexagon.F2.dfclass" => "__builtin_HEXAGON_F2_dfclass",
+    "llvm.hexagon.F2.dfcmpeq" => "__builtin_HEXAGON_F2_dfcmpeq",
+    "llvm.hexagon.F2.dfcmpge" => "__builtin_HEXAGON_F2_dfcmpge",
+    "llvm.hexagon.F2.dfcmpgt" => "__builtin_HEXAGON_F2_dfcmpgt",
+    "llvm.hexagon.F2.dfcmpuo" => "__builtin_HEXAGON_F2_dfcmpuo",
+    "llvm.hexagon.F2.dffixupd" => "__builtin_HEXAGON_F2_dffixupd",
+    "llvm.hexagon.F2.dffixupn" => "__builtin_HEXAGON_F2_dffixupn",
+    "llvm.hexagon.F2.dffixupr" => "__builtin_HEXAGON_F2_dffixupr",
+    "llvm.hexagon.F2.dffma" => "__builtin_HEXAGON_F2_dffma",
+    "llvm.hexagon.F2.dffma.lib" => "__builtin_HEXAGON_F2_dffma_lib",
+    "llvm.hexagon.F2.dffma.sc" => "__builtin_HEXAGON_F2_dffma_sc",
+    "llvm.hexagon.F2.dffms" => "__builtin_HEXAGON_F2_dffms",
+    "llvm.hexagon.F2.dffms.lib" => "__builtin_HEXAGON_F2_dffms_lib",
+    "llvm.hexagon.F2.dfimm.n" => "__builtin_HEXAGON_F2_dfimm_n",
+    "llvm.hexagon.F2.dfimm.p" => "__builtin_HEXAGON_F2_dfimm_p",
+    "llvm.hexagon.F2.dfmax" => "__builtin_HEXAGON_F2_dfmax",
+    "llvm.hexagon.F2.dfmin" => "__builtin_HEXAGON_F2_dfmin",
+    "llvm.hexagon.F2.dfmpy" => "__builtin_HEXAGON_F2_dfmpy",
+    "llvm.hexagon.F2.dfmpyfix" => "__builtin_HEXAGON_F2_dfmpyfix",
+    "llvm.hexagon.F2.dfmpyhh" => "__builtin_HEXAGON_F2_dfmpyhh",
+    "llvm.hexagon.F2.dfmpylh" => "__builtin_HEXAGON_F2_dfmpylh",
+    "llvm.hexagon.F2.dfmpyll" => "__builtin_HEXAGON_F2_dfmpyll",
+    "llvm.hexagon.F2.dfsub" => "__builtin_HEXAGON_F2_dfsub",
+    "llvm.hexagon.F2.sfadd" => "__builtin_HEXAGON_F2_sfadd",
+    "llvm.hexagon.F2.sfclass" => "__builtin_HEXAGON_F2_sfclass",
+    "llvm.hexagon.F2.sfcmpeq" => "__builtin_HEXAGON_F2_sfcmpeq",
+    "llvm.hexagon.F2.sfcmpge" => "__builtin_HEXAGON_F2_sfcmpge",
+    "llvm.hexagon.F2.sfcmpgt" => "__builtin_HEXAGON_F2_sfcmpgt",
+    "llvm.hexagon.F2.sfcmpuo" => "__builtin_HEXAGON_F2_sfcmpuo",
+    "llvm.hexagon.F2.sffixupd" => "__builtin_HEXAGON_F2_sffixupd",
+    "llvm.hexagon.F2.sffixupn" => "__builtin_HEXAGON_F2_sffixupn",
+    "llvm.hexagon.F2.sffixupr" => "__builtin_HEXAGON_F2_sffixupr",
+    "llvm.hexagon.F2.sffma" => "__builtin_HEXAGON_F2_sffma",
+    "llvm.hexagon.F2.sffma.lib" => "__builtin_HEXAGON_F2_sffma_lib",
+    "llvm.hexagon.F2.sffma.sc" => "__builtin_HEXAGON_F2_sffma_sc",
+    "llvm.hexagon.F2.sffms" => "__builtin_HEXAGON_F2_sffms",
+    "llvm.hexagon.F2.sffms.lib" => "__builtin_HEXAGON_F2_sffms_lib",
+    "llvm.hexagon.F2.sfimm.n" => "__builtin_HEXAGON_F2_sfimm_n",
+    "llvm.hexagon.F2.sfimm.p" => "__builtin_HEXAGON_F2_sfimm_p",
+    "llvm.hexagon.F2.sfmax" => "__builtin_HEXAGON_F2_sfmax",
+    "llvm.hexagon.F2.sfmin" => "__builtin_HEXAGON_F2_sfmin",
+    "llvm.hexagon.F2.sfmpy" => "__builtin_HEXAGON_F2_sfmpy",
+    "llvm.hexagon.F2.sfsub" => "__builtin_HEXAGON_F2_sfsub",
+    "llvm.hexagon.L2.loadw.locked" => "__builtin_HEXAGON_L2_loadw_locked",
+    "llvm.hexagon.L4.loadd.locked" => "__builtin__HEXAGON_L4_loadd_locked",
+    "llvm.hexagon.M2.acci" => "__builtin_HEXAGON_M2_acci",
+    "llvm.hexagon.M2.accii" => "__builtin_HEXAGON_M2_accii",
+    "llvm.hexagon.M2.cmaci.s0" => "__builtin_HEXAGON_M2_cmaci_s0",
+    "llvm.hexagon.M2.cmacr.s0" => "__builtin_HEXAGON_M2_cmacr_s0",
+    "llvm.hexagon.M2.cmacs.s0" => "__builtin_HEXAGON_M2_cmacs_s0",
+    "llvm.hexagon.M2.cmacs.s1" => "__builtin_HEXAGON_M2_cmacs_s1",
+    "llvm.hexagon.M2.cmacsc.s0" => "__builtin_HEXAGON_M2_cmacsc_s0",
+    "llvm.hexagon.M2.cmacsc.s1" => "__builtin_HEXAGON_M2_cmacsc_s1",
+    "llvm.hexagon.M2.cmpyi.s0" => "__builtin_HEXAGON_M2_cmpyi_s0",
+    "llvm.hexagon.M2.cmpyr.s0" => "__builtin_HEXAGON_M2_cmpyr_s0",
+    "llvm.hexagon.M2.cmpyrs.s0" => "__builtin_HEXAGON_M2_cmpyrs_s0",
+    "llvm.hexagon.M2.cmpyrs.s1" => "__builtin_HEXAGON_M2_cmpyrs_s1",
+    "llvm.hexagon.M2.cmpyrsc.s0" => "__builtin_HEXAGON_M2_cmpyrsc_s0",
+    "llvm.hexagon.M2.cmpyrsc.s1" => "__builtin_HEXAGON_M2_cmpyrsc_s1",
+    "llvm.hexagon.M2.cmpys.s0" => "__builtin_HEXAGON_M2_cmpys_s0",
+    "llvm.hexagon.M2.cmpys.s1" => "__builtin_HEXAGON_M2_cmpys_s1",
+    "llvm.hexagon.M2.cmpysc.s0" => "__builtin_HEXAGON_M2_cmpysc_s0",
+    "llvm.hexagon.M2.cmpysc.s1" => "__builtin_HEXAGON_M2_cmpysc_s1",
+    "llvm.hexagon.M2.cnacs.s0" => "__builtin_HEXAGON_M2_cnacs_s0",
+    "llvm.hexagon.M2.cnacs.s1" => "__builtin_HEXAGON_M2_cnacs_s1",
+    "llvm.hexagon.M2.cnacsc.s0" => "__builtin_HEXAGON_M2_cnacsc_s0",
+    "llvm.hexagon.M2.cnacsc.s1" => "__builtin_HEXAGON_M2_cnacsc_s1",
+    "llvm.hexagon.M2.dpmpyss.acc.s0" => "__builtin_HEXAGON_M2_dpmpyss_acc_s0",
+    "llvm.hexagon.M2.dpmpyss.nac.s0" => "__builtin_HEXAGON_M2_dpmpyss_nac_s0",
+    "llvm.hexagon.M2.dpmpyss.rnd.s0" => "__builtin_HEXAGON_M2_dpmpyss_rnd_s0",
+    "llvm.hexagon.M2.dpmpyss.s0" => "__builtin_HEXAGON_M2_dpmpyss_s0",
+    "llvm.hexagon.M2.dpmpyuu.acc.s0" => "__builtin_HEXAGON_M2_dpmpyuu_acc_s0",
+    "llvm.hexagon.M2.dpmpyuu.nac.s0" => "__builtin_HEXAGON_M2_dpmpyuu_nac_s0",
+    "llvm.hexagon.M2.dpmpyuu.s0" => "__builtin_HEXAGON_M2_dpmpyuu_s0",
+    "llvm.hexagon.M2.hmmpyh.rs1" => "__builtin_HEXAGON_M2_hmmpyh_rs1",
+    "llvm.hexagon.M2.hmmpyh.s1" => "__builtin_HEXAGON_M2_hmmpyh_s1",
+    "llvm.hexagon.M2.hmmpyl.rs1" => "__builtin_HEXAGON_M2_hmmpyl_rs1",
+    "llvm.hexagon.M2.hmmpyl.s1" => "__builtin_HEXAGON_M2_hmmpyl_s1",
+    "llvm.hexagon.M2.maci" => "__builtin_HEXAGON_M2_maci",
+    "llvm.hexagon.M2.macsin" => "__builtin_HEXAGON_M2_macsin",
+    "llvm.hexagon.M2.macsip" => "__builtin_HEXAGON_M2_macsip",
+    "llvm.hexagon.M2.mmachs.rs0" => "__builtin_HEXAGON_M2_mmachs_rs0",
+    "llvm.hexagon.M2.mmachs.rs1" => "__builtin_HEXAGON_M2_mmachs_rs1",
+    "llvm.hexagon.M2.mmachs.s0" => "__builtin_HEXAGON_M2_mmachs_s0",
+    "llvm.hexagon.M2.mmachs.s1" => "__builtin_HEXAGON_M2_mmachs_s1",
+    "llvm.hexagon.M2.mmacls.rs0" => "__builtin_HEXAGON_M2_mmacls_rs0",
+    "llvm.hexagon.M2.mmacls.rs1" => "__builtin_HEXAGON_M2_mmacls_rs1",
+    "llvm.hexagon.M2.mmacls.s0" => "__builtin_HEXAGON_M2_mmacls_s0",
+    "llvm.hexagon.M2.mmacls.s1" => "__builtin_HEXAGON_M2_mmacls_s1",
+    "llvm.hexagon.M2.mmacuhs.rs0" => "__builtin_HEXAGON_M2_mmacuhs_rs0",
+    "llvm.hexagon.M2.mmacuhs.rs1" => "__builtin_HEXAGON_M2_mmacuhs_rs1",
+    "llvm.hexagon.M2.mmacuhs.s0" => "__builtin_HEXAGON_M2_mmacuhs_s0",
+    "llvm.hexagon.M2.mmacuhs.s1" => "__builtin_HEXAGON_M2_mmacuhs_s1",
+    "llvm.hexagon.M2.mmaculs.rs0" => "__builtin_HEXAGON_M2_mmaculs_rs0",
+    "llvm.hexagon.M2.mmaculs.rs1" => "__builtin_HEXAGON_M2_mmaculs_rs1",
+    "llvm.hexagon.M2.mmaculs.s0" => "__builtin_HEXAGON_M2_mmaculs_s0",
+    "llvm.hexagon.M2.mmaculs.s1" => "__builtin_HEXAGON_M2_mmaculs_s1",
+    "llvm.hexagon.M2.mmpyh.rs0" => "__builtin_HEXAGON_M2_mmpyh_rs0",
+    "llvm.hexagon.M2.mmpyh.rs1" => "__builtin_HEXAGON_M2_mmpyh_rs1",
+    "llvm.hexagon.M2.mmpyh.s0" => "__builtin_HEXAGON_M2_mmpyh_s0",
+    "llvm.hexagon.M2.mmpyh.s1" => "__builtin_HEXAGON_M2_mmpyh_s1",
+    "llvm.hexagon.M2.mmpyl.rs0" => "__builtin_HEXAGON_M2_mmpyl_rs0",
+    "llvm.hexagon.M2.mmpyl.rs1" => "__builtin_HEXAGON_M2_mmpyl_rs1",
+    "llvm.hexagon.M2.mmpyl.s0" => "__builtin_HEXAGON_M2_mmpyl_s0",
+    "llvm.hexagon.M2.mmpyl.s1" => "__builtin_HEXAGON_M2_mmpyl_s1",
+    "llvm.hexagon.M2.mmpyuh.rs0" => "__builtin_HEXAGON_M2_mmpyuh_rs0",
+    "llvm.hexagon.M2.mmpyuh.rs1" => "__builtin_HEXAGON_M2_mmpyuh_rs1",
+    "llvm.hexagon.M2.mmpyuh.s0" => "__builtin_HEXAGON_M2_mmpyuh_s0",
+    "llvm.hexagon.M2.mmpyuh.s1" => "__builtin_HEXAGON_M2_mmpyuh_s1",
+    "llvm.hexagon.M2.mmpyul.rs0" => "__builtin_HEXAGON_M2_mmpyul_rs0",
+    "llvm.hexagon.M2.mmpyul.rs1" => "__builtin_HEXAGON_M2_mmpyul_rs1",
+    "llvm.hexagon.M2.mmpyul.s0" => "__builtin_HEXAGON_M2_mmpyul_s0",
+    "llvm.hexagon.M2.mmpyul.s1" => "__builtin_HEXAGON_M2_mmpyul_s1",
+    "llvm.hexagon.M2.mnaci" => "__builtin_HEXAGON_M2_mnaci",
+    "llvm.hexagon.M2.mpy.acc.hh.s0" => "__builtin_HEXAGON_M2_mpy_acc_hh_s0",
+    "llvm.hexagon.M2.mpy.acc.hh.s1" => "__builtin_HEXAGON_M2_mpy_acc_hh_s1",
+    "llvm.hexagon.M2.mpy.acc.hl.s0" => "__builtin_HEXAGON_M2_mpy_acc_hl_s0",
+    "llvm.hexagon.M2.mpy.acc.hl.s1" => "__builtin_HEXAGON_M2_mpy_acc_hl_s1",
+    "llvm.hexagon.M2.mpy.acc.lh.s0" => "__builtin_HEXAGON_M2_mpy_acc_lh_s0",
+    "llvm.hexagon.M2.mpy.acc.lh.s1" => "__builtin_HEXAGON_M2_mpy_acc_lh_s1",
+    "llvm.hexagon.M2.mpy.acc.ll.s0" => "__builtin_HEXAGON_M2_mpy_acc_ll_s0",
+    "llvm.hexagon.M2.mpy.acc.ll.s1" => "__builtin_HEXAGON_M2_mpy_acc_ll_s1",
+    "llvm.hexagon.M2.mpy.acc.sat.hh.s0" => "__builtin_HEXAGON_M2_mpy_acc_sat_hh_s0",
+    "llvm.hexagon.M2.mpy.acc.sat.hh.s1" => "__builtin_HEXAGON_M2_mpy_acc_sat_hh_s1",
+    "llvm.hexagon.M2.mpy.acc.sat.hl.s0" => "__builtin_HEXAGON_M2_mpy_acc_sat_hl_s0",
+    "llvm.hexagon.M2.mpy.acc.sat.hl.s1" => "__builtin_HEXAGON_M2_mpy_acc_sat_hl_s1",
+    "llvm.hexagon.M2.mpy.acc.sat.lh.s0" => "__builtin_HEXAGON_M2_mpy_acc_sat_lh_s0",
+    "llvm.hexagon.M2.mpy.acc.sat.lh.s1" => "__builtin_HEXAGON_M2_mpy_acc_sat_lh_s1",
+    "llvm.hexagon.M2.mpy.acc.sat.ll.s0" => "__builtin_HEXAGON_M2_mpy_acc_sat_ll_s0",
+    "llvm.hexagon.M2.mpy.acc.sat.ll.s1" => "__builtin_HEXAGON_M2_mpy_acc_sat_ll_s1",
+    "llvm.hexagon.M2.mpy.hh.s0" => "__builtin_HEXAGON_M2_mpy_hh_s0",
+    "llvm.hexagon.M2.mpy.hh.s1" => "__builtin_HEXAGON_M2_mpy_hh_s1",
+    "llvm.hexagon.M2.mpy.hl.s0" => "__builtin_HEXAGON_M2_mpy_hl_s0",
+    "llvm.hexagon.M2.mpy.hl.s1" => "__builtin_HEXAGON_M2_mpy_hl_s1",
+    "llvm.hexagon.M2.mpy.lh.s0" => "__builtin_HEXAGON_M2_mpy_lh_s0",
+    "llvm.hexagon.M2.mpy.lh.s1" => "__builtin_HEXAGON_M2_mpy_lh_s1",
+    "llvm.hexagon.M2.mpy.ll.s0" => "__builtin_HEXAGON_M2_mpy_ll_s0",
+    "llvm.hexagon.M2.mpy.ll.s1" => "__builtin_HEXAGON_M2_mpy_ll_s1",
+    "llvm.hexagon.M2.mpy.nac.hh.s0" => "__builtin_HEXAGON_M2_mpy_nac_hh_s0",
+    "llvm.hexagon.M2.mpy.nac.hh.s1" => "__builtin_HEXAGON_M2_mpy_nac_hh_s1",
+    "llvm.hexagon.M2.mpy.nac.hl.s0" => "__builtin_HEXAGON_M2_mpy_nac_hl_s0",
+    "llvm.hexagon.M2.mpy.nac.hl.s1" => "__builtin_HEXAGON_M2_mpy_nac_hl_s1",
+    "llvm.hexagon.M2.mpy.nac.lh.s0" => "__builtin_HEXAGON_M2_mpy_nac_lh_s0",
+    "llvm.hexagon.M2.mpy.nac.lh.s1" => "__builtin_HEXAGON_M2_mpy_nac_lh_s1",
+    "llvm.hexagon.M2.mpy.nac.ll.s0" => "__builtin_HEXAGON_M2_mpy_nac_ll_s0",
+    "llvm.hexagon.M2.mpy.nac.ll.s1" => "__builtin_HEXAGON_M2_mpy_nac_ll_s1",
+    "llvm.hexagon.M2.mpy.nac.sat.hh.s0" => "__builtin_HEXAGON_M2_mpy_nac_sat_hh_s0",
+    "llvm.hexagon.M2.mpy.nac.sat.hh.s1" => "__builtin_HEXAGON_M2_mpy_nac_sat_hh_s1",
+    "llvm.hexagon.M2.mpy.nac.sat.hl.s0" => "__builtin_HEXAGON_M2_mpy_nac_sat_hl_s0",
+    "llvm.hexagon.M2.mpy.nac.sat.hl.s1" => "__builtin_HEXAGON_M2_mpy_nac_sat_hl_s1",
+    "llvm.hexagon.M2.mpy.nac.sat.lh.s0" => "__builtin_HEXAGON_M2_mpy_nac_sat_lh_s0",
+    "llvm.hexagon.M2.mpy.nac.sat.lh.s1" => "__builtin_HEXAGON_M2_mpy_nac_sat_lh_s1",
+    "llvm.hexagon.M2.mpy.nac.sat.ll.s0" => "__builtin_HEXAGON_M2_mpy_nac_sat_ll_s0",
+    "llvm.hexagon.M2.mpy.nac.sat.ll.s1" => "__builtin_HEXAGON_M2_mpy_nac_sat_ll_s1",
+    "llvm.hexagon.M2.mpy.rnd.hh.s0" => "__builtin_HEXAGON_M2_mpy_rnd_hh_s0",
+    "llvm.hexagon.M2.mpy.rnd.hh.s1" => "__builtin_HEXAGON_M2_mpy_rnd_hh_s1",
+    "llvm.hexagon.M2.mpy.rnd.hl.s0" => "__builtin_HEXAGON_M2_mpy_rnd_hl_s0",
+    "llvm.hexagon.M2.mpy.rnd.hl.s1" => "__builtin_HEXAGON_M2_mpy_rnd_hl_s1",
+    "llvm.hexagon.M2.mpy.rnd.lh.s0" => "__builtin_HEXAGON_M2_mpy_rnd_lh_s0",
+    "llvm.hexagon.M2.mpy.rnd.lh.s1" => "__builtin_HEXAGON_M2_mpy_rnd_lh_s1",
+    "llvm.hexagon.M2.mpy.rnd.ll.s0" => "__builtin_HEXAGON_M2_mpy_rnd_ll_s0",
+    "llvm.hexagon.M2.mpy.rnd.ll.s1" => "__builtin_HEXAGON_M2_mpy_rnd_ll_s1",
+    "llvm.hexagon.M2.mpy.sat.hh.s0" => "__builtin_HEXAGON_M2_mpy_sat_hh_s0",
+    "llvm.hexagon.M2.mpy.sat.hh.s1" => "__builtin_HEXAGON_M2_mpy_sat_hh_s1",
+    "llvm.hexagon.M2.mpy.sat.hl.s0" => "__builtin_HEXAGON_M2_mpy_sat_hl_s0",
+    "llvm.hexagon.M2.mpy.sat.hl.s1" => "__builtin_HEXAGON_M2_mpy_sat_hl_s1",
+    "llvm.hexagon.M2.mpy.sat.lh.s0" => "__builtin_HEXAGON_M2_mpy_sat_lh_s0",
+    "llvm.hexagon.M2.mpy.sat.lh.s1" => "__builtin_HEXAGON_M2_mpy_sat_lh_s1",
+    "llvm.hexagon.M2.mpy.sat.ll.s0" => "__builtin_HEXAGON_M2_mpy_sat_ll_s0",
+    "llvm.hexagon.M2.mpy.sat.ll.s1" => "__builtin_HEXAGON_M2_mpy_sat_ll_s1",
+    "llvm.hexagon.M2.mpy.sat.rnd.hh.s0" => "__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0",
+    "llvm.hexagon.M2.mpy.sat.rnd.hh.s1" => "__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1",
+    "llvm.hexagon.M2.mpy.sat.rnd.hl.s0" => "__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0",
+    "llvm.hexagon.M2.mpy.sat.rnd.hl.s1" => "__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1",
+    "llvm.hexagon.M2.mpy.sat.rnd.lh.s0" => "__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0",
+    "llvm.hexagon.M2.mpy.sat.rnd.lh.s1" => "__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1",
+    "llvm.hexagon.M2.mpy.sat.rnd.ll.s0" => "__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0",
+    "llvm.hexagon.M2.mpy.sat.rnd.ll.s1" => "__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1",
+    "llvm.hexagon.M2.mpy.up" => "__builtin_HEXAGON_M2_mpy_up",
+    "llvm.hexagon.M2.mpy.up.s1" => "__builtin_HEXAGON_M2_mpy_up_s1",
+    "llvm.hexagon.M2.mpy.up.s1.sat" => "__builtin_HEXAGON_M2_mpy_up_s1_sat",
+    "llvm.hexagon.M2.mpyd.acc.hh.s0" => "__builtin_HEXAGON_M2_mpyd_acc_hh_s0",
+    "llvm.hexagon.M2.mpyd.acc.hh.s1" => "__builtin_HEXAGON_M2_mpyd_acc_hh_s1",
+    "llvm.hexagon.M2.mpyd.acc.hl.s0" => "__builtin_HEXAGON_M2_mpyd_acc_hl_s0",
+    "llvm.hexagon.M2.mpyd.acc.hl.s1" => "__builtin_HEXAGON_M2_mpyd_acc_hl_s1",
+    "llvm.hexagon.M2.mpyd.acc.lh.s0" => "__builtin_HEXAGON_M2_mpyd_acc_lh_s0",
+    "llvm.hexagon.M2.mpyd.acc.lh.s1" => "__builtin_HEXAGON_M2_mpyd_acc_lh_s1",
+    "llvm.hexagon.M2.mpyd.acc.ll.s0" => "__builtin_HEXAGON_M2_mpyd_acc_ll_s0",
+    "llvm.hexagon.M2.mpyd.acc.ll.s1" => "__builtin_HEXAGON_M2_mpyd_acc_ll_s1",
+    "llvm.hexagon.M2.mpyd.hh.s0" => "__builtin_HEXAGON_M2_mpyd_hh_s0",
+    "llvm.hexagon.M2.mpyd.hh.s1" => "__builtin_HEXAGON_M2_mpyd_hh_s1",
+    "llvm.hexagon.M2.mpyd.hl.s0" => "__builtin_HEXAGON_M2_mpyd_hl_s0",
+    "llvm.hexagon.M2.mpyd.hl.s1" => "__builtin_HEXAGON_M2_mpyd_hl_s1",
+    "llvm.hexagon.M2.mpyd.lh.s0" => "__builtin_HEXAGON_M2_mpyd_lh_s0",
+    "llvm.hexagon.M2.mpyd.lh.s1" => "__builtin_HEXAGON_M2_mpyd_lh_s1",
+    "llvm.hexagon.M2.mpyd.ll.s0" => "__builtin_HEXAGON_M2_mpyd_ll_s0",
+    "llvm.hexagon.M2.mpyd.ll.s1" => "__builtin_HEXAGON_M2_mpyd_ll_s1",
+    "llvm.hexagon.M2.mpyd.nac.hh.s0" => "__builtin_HEXAGON_M2_mpyd_nac_hh_s0",
+    "llvm.hexagon.M2.mpyd.nac.hh.s1" => "__builtin_HEXAGON_M2_mpyd_nac_hh_s1",
+    "llvm.hexagon.M2.mpyd.nac.hl.s0" => "__builtin_HEXAGON_M2_mpyd_nac_hl_s0",
+    "llvm.hexagon.M2.mpyd.nac.hl.s1" => "__builtin_HEXAGON_M2_mpyd_nac_hl_s1",
+    "llvm.hexagon.M2.mpyd.nac.lh.s0" => "__builtin_HEXAGON_M2_mpyd_nac_lh_s0",
+    "llvm.hexagon.M2.mpyd.nac.lh.s1" => "__builtin_HEXAGON_M2_mpyd_nac_lh_s1",
+    "llvm.hexagon.M2.mpyd.nac.ll.s0" => "__builtin_HEXAGON_M2_mpyd_nac_ll_s0",
+    "llvm.hexagon.M2.mpyd.nac.ll.s1" => "__builtin_HEXAGON_M2_mpyd_nac_ll_s1",
+    "llvm.hexagon.M2.mpyd.rnd.hh.s0" => "__builtin_HEXAGON_M2_mpyd_rnd_hh_s0",
+    "llvm.hexagon.M2.mpyd.rnd.hh.s1" => "__builtin_HEXAGON_M2_mpyd_rnd_hh_s1",
+    "llvm.hexagon.M2.mpyd.rnd.hl.s0" => "__builtin_HEXAGON_M2_mpyd_rnd_hl_s0",
+    "llvm.hexagon.M2.mpyd.rnd.hl.s1" => "__builtin_HEXAGON_M2_mpyd_rnd_hl_s1",
+    "llvm.hexagon.M2.mpyd.rnd.lh.s0" => "__builtin_HEXAGON_M2_mpyd_rnd_lh_s0",
+    "llvm.hexagon.M2.mpyd.rnd.lh.s1" => "__builtin_HEXAGON_M2_mpyd_rnd_lh_s1",
+    "llvm.hexagon.M2.mpyd.rnd.ll.s0" => "__builtin_HEXAGON_M2_mpyd_rnd_ll_s0",
+    "llvm.hexagon.M2.mpyd.rnd.ll.s1" => "__builtin_HEXAGON_M2_mpyd_rnd_ll_s1",
+    "llvm.hexagon.M2.mpyi" => "__builtin_HEXAGON_M2_mpyi",
+    "llvm.hexagon.M2.mpysmi" => "__builtin_HEXAGON_M2_mpysmi",
+    "llvm.hexagon.M2.mpysu.up" => "__builtin_HEXAGON_M2_mpysu_up",
+    "llvm.hexagon.M2.mpyu.acc.hh.s0" => "__builtin_HEXAGON_M2_mpyu_acc_hh_s0",
+    "llvm.hexagon.M2.mpyu.acc.hh.s1" => "__builtin_HEXAGON_M2_mpyu_acc_hh_s1",
+    "llvm.hexagon.M2.mpyu.acc.hl.s0" => "__builtin_HEXAGON_M2_mpyu_acc_hl_s0",
+    "llvm.hexagon.M2.mpyu.acc.hl.s1" => "__builtin_HEXAGON_M2_mpyu_acc_hl_s1",
+    "llvm.hexagon.M2.mpyu.acc.lh.s0" => "__builtin_HEXAGON_M2_mpyu_acc_lh_s0",
+    "llvm.hexagon.M2.mpyu.acc.lh.s1" => "__builtin_HEXAGON_M2_mpyu_acc_lh_s1",
+    "llvm.hexagon.M2.mpyu.acc.ll.s0" => "__builtin_HEXAGON_M2_mpyu_acc_ll_s0",
+    "llvm.hexagon.M2.mpyu.acc.ll.s1" => "__builtin_HEXAGON_M2_mpyu_acc_ll_s1",
+    "llvm.hexagon.M2.mpyu.hh.s0" => "__builtin_HEXAGON_M2_mpyu_hh_s0",
+    "llvm.hexagon.M2.mpyu.hh.s1" => "__builtin_HEXAGON_M2_mpyu_hh_s1",
+    "llvm.hexagon.M2.mpyu.hl.s0" => "__builtin_HEXAGON_M2_mpyu_hl_s0",
+    "llvm.hexagon.M2.mpyu.hl.s1" => "__builtin_HEXAGON_M2_mpyu_hl_s1",
+    "llvm.hexagon.M2.mpyu.lh.s0" => "__builtin_HEXAGON_M2_mpyu_lh_s0",
+    "llvm.hexagon.M2.mpyu.lh.s1" => "__builtin_HEXAGON_M2_mpyu_lh_s1",
+    "llvm.hexagon.M2.mpyu.ll.s0" => "__builtin_HEXAGON_M2_mpyu_ll_s0",
+    "llvm.hexagon.M2.mpyu.ll.s1" => "__builtin_HEXAGON_M2_mpyu_ll_s1",
+    "llvm.hexagon.M2.mpyu.nac.hh.s0" => "__builtin_HEXAGON_M2_mpyu_nac_hh_s0",
+    "llvm.hexagon.M2.mpyu.nac.hh.s1" => "__builtin_HEXAGON_M2_mpyu_nac_hh_s1",
+    "llvm.hexagon.M2.mpyu.nac.hl.s0" => "__builtin_HEXAGON_M2_mpyu_nac_hl_s0",
+    "llvm.hexagon.M2.mpyu.nac.hl.s1" => "__builtin_HEXAGON_M2_mpyu_nac_hl_s1",
+    "llvm.hexagon.M2.mpyu.nac.lh.s0" => "__builtin_HEXAGON_M2_mpyu_nac_lh_s0",
+    "llvm.hexagon.M2.mpyu.nac.lh.s1" => "__builtin_HEXAGON_M2_mpyu_nac_lh_s1",
+    "llvm.hexagon.M2.mpyu.nac.ll.s0" => "__builtin_HEXAGON_M2_mpyu_nac_ll_s0",
+    "llvm.hexagon.M2.mpyu.nac.ll.s1" => "__builtin_HEXAGON_M2_mpyu_nac_ll_s1",
+    "llvm.hexagon.M2.mpyu.up" => "__builtin_HEXAGON_M2_mpyu_up",
+    "llvm.hexagon.M2.mpyud.acc.hh.s0" => "__builtin_HEXAGON_M2_mpyud_acc_hh_s0",
+    "llvm.hexagon.M2.mpyud.acc.hh.s1" => "__builtin_HEXAGON_M2_mpyud_acc_hh_s1",
+    "llvm.hexagon.M2.mpyud.acc.hl.s0" => "__builtin_HEXAGON_M2_mpyud_acc_hl_s0",
+    "llvm.hexagon.M2.mpyud.acc.hl.s1" => "__builtin_HEXAGON_M2_mpyud_acc_hl_s1",
+    "llvm.hexagon.M2.mpyud.acc.lh.s0" => "__builtin_HEXAGON_M2_mpyud_acc_lh_s0",
+    "llvm.hexagon.M2.mpyud.acc.lh.s1" => "__builtin_HEXAGON_M2_mpyud_acc_lh_s1",
+    "llvm.hexagon.M2.mpyud.acc.ll.s0" => "__builtin_HEXAGON_M2_mpyud_acc_ll_s0",
+    "llvm.hexagon.M2.mpyud.acc.ll.s1" => "__builtin_HEXAGON_M2_mpyud_acc_ll_s1",
+    "llvm.hexagon.M2.mpyud.hh.s0" => "__builtin_HEXAGON_M2_mpyud_hh_s0",
+    "llvm.hexagon.M2.mpyud.hh.s1" => "__builtin_HEXAGON_M2_mpyud_hh_s1",
+    "llvm.hexagon.M2.mpyud.hl.s0" => "__builtin_HEXAGON_M2_mpyud_hl_s0",
+    "llvm.hexagon.M2.mpyud.hl.s1" => "__builtin_HEXAGON_M2_mpyud_hl_s1",
+    "llvm.hexagon.M2.mpyud.lh.s0" => "__builtin_HEXAGON_M2_mpyud_lh_s0",
+    "llvm.hexagon.M2.mpyud.lh.s1" => "__builtin_HEXAGON_M2_mpyud_lh_s1",
+    "llvm.hexagon.M2.mpyud.ll.s0" => "__builtin_HEXAGON_M2_mpyud_ll_s0",
+    "llvm.hexagon.M2.mpyud.ll.s1" => "__builtin_HEXAGON_M2_mpyud_ll_s1",
+    "llvm.hexagon.M2.mpyud.nac.hh.s0" => "__builtin_HEXAGON_M2_mpyud_nac_hh_s0",
+    "llvm.hexagon.M2.mpyud.nac.hh.s1" => "__builtin_HEXAGON_M2_mpyud_nac_hh_s1",
+    "llvm.hexagon.M2.mpyud.nac.hl.s0" => "__builtin_HEXAGON_M2_mpyud_nac_hl_s0",
+    "llvm.hexagon.M2.mpyud.nac.hl.s1" => "__builtin_HEXAGON_M2_mpyud_nac_hl_s1",
+    "llvm.hexagon.M2.mpyud.nac.lh.s0" => "__builtin_HEXAGON_M2_mpyud_nac_lh_s0",
+    "llvm.hexagon.M2.mpyud.nac.lh.s1" => "__builtin_HEXAGON_M2_mpyud_nac_lh_s1",
+    "llvm.hexagon.M2.mpyud.nac.ll.s0" => "__builtin_HEXAGON_M2_mpyud_nac_ll_s0",
+    "llvm.hexagon.M2.mpyud.nac.ll.s1" => "__builtin_HEXAGON_M2_mpyud_nac_ll_s1",
+    "llvm.hexagon.M2.mpyui" => "__builtin_HEXAGON_M2_mpyui",
+    "llvm.hexagon.M2.nacci" => "__builtin_HEXAGON_M2_nacci",
+    "llvm.hexagon.M2.naccii" => "__builtin_HEXAGON_M2_naccii",
+    "llvm.hexagon.M2.subacc" => "__builtin_HEXAGON_M2_subacc",
+    "llvm.hexagon.M2.vabsdiffh" => "__builtin_HEXAGON_M2_vabsdiffh",
+    "llvm.hexagon.M2.vabsdiffw" => "__builtin_HEXAGON_M2_vabsdiffw",
+    "llvm.hexagon.M2.vcmac.s0.sat.i" => "__builtin_HEXAGON_M2_vcmac_s0_sat_i",
+    "llvm.hexagon.M2.vcmac.s0.sat.r" => "__builtin_HEXAGON_M2_vcmac_s0_sat_r",
+    "llvm.hexagon.M2.vcmpy.s0.sat.i" => "__builtin_HEXAGON_M2_vcmpy_s0_sat_i",
+    "llvm.hexagon.M2.vcmpy.s0.sat.r" => "__builtin_HEXAGON_M2_vcmpy_s0_sat_r",
+    "llvm.hexagon.M2.vcmpy.s1.sat.i" => "__builtin_HEXAGON_M2_vcmpy_s1_sat_i",
+    "llvm.hexagon.M2.vcmpy.s1.sat.r" => "__builtin_HEXAGON_M2_vcmpy_s1_sat_r",
+    "llvm.hexagon.M2.vdmacs.s0" => "__builtin_HEXAGON_M2_vdmacs_s0",
+    "llvm.hexagon.M2.vdmacs.s1" => "__builtin_HEXAGON_M2_vdmacs_s1",
+    "llvm.hexagon.M2.vdmpyrs.s0" => "__builtin_HEXAGON_M2_vdmpyrs_s0",
+    "llvm.hexagon.M2.vdmpyrs.s1" => "__builtin_HEXAGON_M2_vdmpyrs_s1",
+    "llvm.hexagon.M2.vdmpys.s0" => "__builtin_HEXAGON_M2_vdmpys_s0",
+    "llvm.hexagon.M2.vdmpys.s1" => "__builtin_HEXAGON_M2_vdmpys_s1",
+    "llvm.hexagon.M2.vmac2" => "__builtin_HEXAGON_M2_vmac2",
+    "llvm.hexagon.M2.vmac2es" => "__builtin_HEXAGON_M2_vmac2es",
+    "llvm.hexagon.M2.vmac2es.s0" => "__builtin_HEXAGON_M2_vmac2es_s0",
+    "llvm.hexagon.M2.vmac2es.s1" => "__builtin_HEXAGON_M2_vmac2es_s1",
+    "llvm.hexagon.M2.vmac2s.s0" => "__builtin_HEXAGON_M2_vmac2s_s0",
+    "llvm.hexagon.M2.vmac2s.s1" => "__builtin_HEXAGON_M2_vmac2s_s1",
+    "llvm.hexagon.M2.vmac2su.s0" => "__builtin_HEXAGON_M2_vmac2su_s0",
+    "llvm.hexagon.M2.vmac2su.s1" => "__builtin_HEXAGON_M2_vmac2su_s1",
+    "llvm.hexagon.M2.vmpy2es.s0" => "__builtin_HEXAGON_M2_vmpy2es_s0",
+    "llvm.hexagon.M2.vmpy2es.s1" => "__builtin_HEXAGON_M2_vmpy2es_s1",
+    "llvm.hexagon.M2.vmpy2s.s0" => "__builtin_HEXAGON_M2_vmpy2s_s0",
+    "llvm.hexagon.M2.vmpy2s.s0pack" => "__builtin_HEXAGON_M2_vmpy2s_s0pack",
+    "llvm.hexagon.M2.vmpy2s.s1" => "__builtin_HEXAGON_M2_vmpy2s_s1",
+    "llvm.hexagon.M2.vmpy2s.s1pack" => "__builtin_HEXAGON_M2_vmpy2s_s1pack",
+    "llvm.hexagon.M2.vmpy2su.s0" => "__builtin_HEXAGON_M2_vmpy2su_s0",
+    "llvm.hexagon.M2.vmpy2su.s1" => "__builtin_HEXAGON_M2_vmpy2su_s1",
+    "llvm.hexagon.M2.vraddh" => "__builtin_HEXAGON_M2_vraddh",
+    "llvm.hexagon.M2.vradduh" => "__builtin_HEXAGON_M2_vradduh",
+    "llvm.hexagon.M2.vrcmaci.s0" => "__builtin_HEXAGON_M2_vrcmaci_s0",
+    "llvm.hexagon.M2.vrcmaci.s0c" => "__builtin_HEXAGON_M2_vrcmaci_s0c",
+    "llvm.hexagon.M2.vrcmacr.s0" => "__builtin_HEXAGON_M2_vrcmacr_s0",
+    "llvm.hexagon.M2.vrcmacr.s0c" => "__builtin_HEXAGON_M2_vrcmacr_s0c",
+    "llvm.hexagon.M2.vrcmpyi.s0" => "__builtin_HEXAGON_M2_vrcmpyi_s0",
+    "llvm.hexagon.M2.vrcmpyi.s0c" => "__builtin_HEXAGON_M2_vrcmpyi_s0c",
+    "llvm.hexagon.M2.vrcmpyr.s0" => "__builtin_HEXAGON_M2_vrcmpyr_s0",
+    "llvm.hexagon.M2.vrcmpyr.s0c" => "__builtin_HEXAGON_M2_vrcmpyr_s0c",
+    "llvm.hexagon.M2.vrcmpys.acc.s1" => "__builtin_HEXAGON_M2_vrcmpys_acc_s1",
+    "llvm.hexagon.M2.vrcmpys.s1" => "__builtin_HEXAGON_M2_vrcmpys_s1",
+    "llvm.hexagon.M2.vrcmpys.s1rp" => "__builtin_HEXAGON_M2_vrcmpys_s1rp",
+    "llvm.hexagon.M2.vrmac.s0" => "__builtin_HEXAGON_M2_vrmac_s0",
+    "llvm.hexagon.M2.vrmpy.s0" => "__builtin_HEXAGON_M2_vrmpy_s0",
+    "llvm.hexagon.M2.xor.xacc" => "__builtin_HEXAGON_M2_xor_xacc",
+    "llvm.hexagon.M4.and.and" => "__builtin_HEXAGON_M4_and_and",
+    "llvm.hexagon.M4.and.andn" => "__builtin_HEXAGON_M4_and_andn",
+    "llvm.hexagon.M4.and.or" => "__builtin_HEXAGON_M4_and_or",
+    "llvm.hexagon.M4.and.xor" => "__builtin_HEXAGON_M4_and_xor",
+    "llvm.hexagon.M4.cmpyi.wh" => "__builtin_HEXAGON_M4_cmpyi_wh",
+    "llvm.hexagon.M4.cmpyi.whc" => "__builtin_HEXAGON_M4_cmpyi_whc",
+    "llvm.hexagon.M4.cmpyr.wh" => "__builtin_HEXAGON_M4_cmpyr_wh",
+    "llvm.hexagon.M4.cmpyr.whc" => "__builtin_HEXAGON_M4_cmpyr_whc",
+    "llvm.hexagon.M4.mac.up.s1.sat" => "__builtin_HEXAGON_M4_mac_up_s1_sat",
+    "llvm.hexagon.M4.mpyri.addi" => "__builtin_HEXAGON_M4_mpyri_addi",
+    "llvm.hexagon.M4.mpyri.addr" => "__builtin_HEXAGON_M4_mpyri_addr",
+    "llvm.hexagon.M4.mpyri.addr.u2" => "__builtin_HEXAGON_M4_mpyri_addr_u2",
+    "llvm.hexagon.M4.mpyrr.addi" => "__builtin_HEXAGON_M4_mpyrr_addi",
+    "llvm.hexagon.M4.mpyrr.addr" => "__builtin_HEXAGON_M4_mpyrr_addr",
+    "llvm.hexagon.M4.nac.up.s1.sat" => "__builtin_HEXAGON_M4_nac_up_s1_sat",
+    "llvm.hexagon.M4.or.and" => "__builtin_HEXAGON_M4_or_and",
+    "llvm.hexagon.M4.or.andn" => "__builtin_HEXAGON_M4_or_andn",
+    "llvm.hexagon.M4.or.or" => "__builtin_HEXAGON_M4_or_or",
+    "llvm.hexagon.M4.or.xor" => "__builtin_HEXAGON_M4_or_xor",
+    "llvm.hexagon.M4.pmpyw" => "__builtin_HEXAGON_M4_pmpyw",
+    "llvm.hexagon.M4.pmpyw.acc" => "__builtin_HEXAGON_M4_pmpyw_acc",
+    "llvm.hexagon.M4.vpmpyh" => "__builtin_HEXAGON_M4_vpmpyh",
+    "llvm.hexagon.M4.vpmpyh.acc" => "__builtin_HEXAGON_M4_vpmpyh_acc",
+    "llvm.hexagon.M4.vrmpyeh.acc.s0" => "__builtin_HEXAGON_M4_vrmpyeh_acc_s0",
+    "llvm.hexagon.M4.vrmpyeh.acc.s1" => "__builtin_HEXAGON_M4_vrmpyeh_acc_s1",
+    "llvm.hexagon.M4.vrmpyeh.s0" => "__builtin_HEXAGON_M4_vrmpyeh_s0",
+    "llvm.hexagon.M4.vrmpyeh.s1" => "__builtin_HEXAGON_M4_vrmpyeh_s1",
+    "llvm.hexagon.M4.vrmpyoh.acc.s0" => "__builtin_HEXAGON_M4_vrmpyoh_acc_s0",
+    "llvm.hexagon.M4.vrmpyoh.acc.s1" => "__builtin_HEXAGON_M4_vrmpyoh_acc_s1",
+    "llvm.hexagon.M4.vrmpyoh.s0" => "__builtin_HEXAGON_M4_vrmpyoh_s0",
+    "llvm.hexagon.M4.vrmpyoh.s1" => "__builtin_HEXAGON_M4_vrmpyoh_s1",
+    "llvm.hexagon.M4.xor.and" => "__builtin_HEXAGON_M4_xor_and",
+    "llvm.hexagon.M4.xor.andn" => "__builtin_HEXAGON_M4_xor_andn",
+    "llvm.hexagon.M4.xor.or" => "__builtin_HEXAGON_M4_xor_or",
+    "llvm.hexagon.M4.xor.xacc" => "__builtin_HEXAGON_M4_xor_xacc",
+    "llvm.hexagon.M5.vdmacbsu" => "__builtin_HEXAGON_M5_vdmacbsu",
+    "llvm.hexagon.M5.vdmpybsu" => "__builtin_HEXAGON_M5_vdmpybsu",
+    "llvm.hexagon.M5.vmacbsu" => "__builtin_HEXAGON_M5_vmacbsu",
+    "llvm.hexagon.M5.vmacbuu" => "__builtin_HEXAGON_M5_vmacbuu",
+    "llvm.hexagon.M5.vmpybsu" => "__builtin_HEXAGON_M5_vmpybsu",
+    "llvm.hexagon.M5.vmpybuu" => "__builtin_HEXAGON_M5_vmpybuu",
+    "llvm.hexagon.M5.vrmacbsu" => "__builtin_HEXAGON_M5_vrmacbsu",
+    "llvm.hexagon.M5.vrmacbuu" => "__builtin_HEXAGON_M5_vrmacbuu",
+    "llvm.hexagon.M5.vrmpybsu" => "__builtin_HEXAGON_M5_vrmpybsu",
+    "llvm.hexagon.M5.vrmpybuu" => "__builtin_HEXAGON_M5_vrmpybuu",
+    "llvm.hexagon.M6.vabsdiffb" => "__builtin_HEXAGON_M6_vabsdiffb",
+    "llvm.hexagon.M6.vabsdiffub" => "__builtin_HEXAGON_M6_vabsdiffub",
+    "llvm.hexagon.M7.dcmpyiw" => "__builtin_HEXAGON_M7_dcmpyiw",
+    "llvm.hexagon.M7.dcmpyiw.acc" => "__builtin_HEXAGON_M7_dcmpyiw_acc",
+    "llvm.hexagon.M7.dcmpyiwc" => "__builtin_HEXAGON_M7_dcmpyiwc",
+    "llvm.hexagon.M7.dcmpyiwc.acc" => "__builtin_HEXAGON_M7_dcmpyiwc_acc",
+    "llvm.hexagon.M7.dcmpyrw" => "__builtin_HEXAGON_M7_dcmpyrw",
+    "llvm.hexagon.M7.dcmpyrw.acc" => "__builtin_HEXAGON_M7_dcmpyrw_acc",
+    "llvm.hexagon.M7.dcmpyrwc" => "__builtin_HEXAGON_M7_dcmpyrwc",
+    "llvm.hexagon.M7.dcmpyrwc.acc" => "__builtin_HEXAGON_M7_dcmpyrwc_acc",
+    "llvm.hexagon.M7.vdmpy" => "__builtin_HEXAGON_M7_vdmpy",
+    "llvm.hexagon.M7.vdmpy.acc" => "__builtin_HEXAGON_M7_vdmpy_acc",
+    "llvm.hexagon.M7.wcmpyiw" => "__builtin_HEXAGON_M7_wcmpyiw",
+    "llvm.hexagon.M7.wcmpyiw.rnd" => "__builtin_HEXAGON_M7_wcmpyiw_rnd",
+    "llvm.hexagon.M7.wcmpyiwc" => "__builtin_HEXAGON_M7_wcmpyiwc",
+    "llvm.hexagon.M7.wcmpyiwc.rnd" => "__builtin_HEXAGON_M7_wcmpyiwc_rnd",
+    "llvm.hexagon.M7.wcmpyrw" => "__builtin_HEXAGON_M7_wcmpyrw",
+    "llvm.hexagon.M7.wcmpyrw.rnd" => "__builtin_HEXAGON_M7_wcmpyrw_rnd",
+    "llvm.hexagon.M7.wcmpyrwc" => "__builtin_HEXAGON_M7_wcmpyrwc",
+    "llvm.hexagon.M7.wcmpyrwc.rnd" => "__builtin_HEXAGON_M7_wcmpyrwc_rnd",
+    "llvm.hexagon.S2.addasl.rrri" => "__builtin_HEXAGON_S2_addasl_rrri",
+    "llvm.hexagon.S2.asl.i.p" => "__builtin_HEXAGON_S2_asl_i_p",
+    "llvm.hexagon.S2.asl.i.p.acc" => "__builtin_HEXAGON_S2_asl_i_p_acc",
+    "llvm.hexagon.S2.asl.i.p.and" => "__builtin_HEXAGON_S2_asl_i_p_and",
+    "llvm.hexagon.S2.asl.i.p.nac" => "__builtin_HEXAGON_S2_asl_i_p_nac",
+    "llvm.hexagon.S2.asl.i.p.or" => "__builtin_HEXAGON_S2_asl_i_p_or",
+    "llvm.hexagon.S2.asl.i.p.xacc" => "__builtin_HEXAGON_S2_asl_i_p_xacc",
+    "llvm.hexagon.S2.asl.i.r" => "__builtin_HEXAGON_S2_asl_i_r",
+    "llvm.hexagon.S2.asl.i.r.acc" => "__builtin_HEXAGON_S2_asl_i_r_acc",
+    "llvm.hexagon.S2.asl.i.r.and" => "__builtin_HEXAGON_S2_asl_i_r_and",
+    "llvm.hexagon.S2.asl.i.r.nac" => "__builtin_HEXAGON_S2_asl_i_r_nac",
+    "llvm.hexagon.S2.asl.i.r.or" => "__builtin_HEXAGON_S2_asl_i_r_or",
+    "llvm.hexagon.S2.asl.i.r.sat" => "__builtin_HEXAGON_S2_asl_i_r_sat",
+    "llvm.hexagon.S2.asl.i.r.xacc" => "__builtin_HEXAGON_S2_asl_i_r_xacc",
+    "llvm.hexagon.S2.asl.i.vh" => "__builtin_HEXAGON_S2_asl_i_vh",
+    "llvm.hexagon.S2.asl.i.vw" => "__builtin_HEXAGON_S2_asl_i_vw",
+    "llvm.hexagon.S2.asl.r.p" => "__builtin_HEXAGON_S2_asl_r_p",
+    "llvm.hexagon.S2.asl.r.p.acc" => "__builtin_HEXAGON_S2_asl_r_p_acc",
+    "llvm.hexagon.S2.asl.r.p.and" => "__builtin_HEXAGON_S2_asl_r_p_and",
+    "llvm.hexagon.S2.asl.r.p.nac" => "__builtin_HEXAGON_S2_asl_r_p_nac",
+    "llvm.hexagon.S2.asl.r.p.or" => "__builtin_HEXAGON_S2_asl_r_p_or",
+    "llvm.hexagon.S2.asl.r.p.xor" => "__builtin_HEXAGON_S2_asl_r_p_xor",
+    "llvm.hexagon.S2.asl.r.r" => "__builtin_HEXAGON_S2_asl_r_r",
+    "llvm.hexagon.S2.asl.r.r.acc" => "__builtin_HEXAGON_S2_asl_r_r_acc",
+    "llvm.hexagon.S2.asl.r.r.and" => "__builtin_HEXAGON_S2_asl_r_r_and",
+    "llvm.hexagon.S2.asl.r.r.nac" => "__builtin_HEXAGON_S2_asl_r_r_nac",
+    "llvm.hexagon.S2.asl.r.r.or" => "__builtin_HEXAGON_S2_asl_r_r_or",
+    "llvm.hexagon.S2.asl.r.r.sat" => "__builtin_HEXAGON_S2_asl_r_r_sat",
+    "llvm.hexagon.S2.asl.r.vh" => "__builtin_HEXAGON_S2_asl_r_vh",
+    "llvm.hexagon.S2.asl.r.vw" => "__builtin_HEXAGON_S2_asl_r_vw",
+    "llvm.hexagon.S2.asr.i.p" => "__builtin_HEXAGON_S2_asr_i_p",
+    "llvm.hexagon.S2.asr.i.p.acc" => "__builtin_HEXAGON_S2_asr_i_p_acc",
+    "llvm.hexagon.S2.asr.i.p.and" => "__builtin_HEXAGON_S2_asr_i_p_and",
+    "llvm.hexagon.S2.asr.i.p.nac" => "__builtin_HEXAGON_S2_asr_i_p_nac",
+    "llvm.hexagon.S2.asr.i.p.or" => "__builtin_HEXAGON_S2_asr_i_p_or",
+    "llvm.hexagon.S2.asr.i.p.rnd" => "__builtin_HEXAGON_S2_asr_i_p_rnd",
+    "llvm.hexagon.S2.asr.i.p.rnd.goodsyntax" => "__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax",
+    "llvm.hexagon.S2.asr.i.r" => "__builtin_HEXAGON_S2_asr_i_r",
+    "llvm.hexagon.S2.asr.i.r.acc" => "__builtin_HEXAGON_S2_asr_i_r_acc",
+    "llvm.hexagon.S2.asr.i.r.and" => "__builtin_HEXAGON_S2_asr_i_r_and",
+    "llvm.hexagon.S2.asr.i.r.nac" => "__builtin_HEXAGON_S2_asr_i_r_nac",
+    "llvm.hexagon.S2.asr.i.r.or" => "__builtin_HEXAGON_S2_asr_i_r_or",
+    "llvm.hexagon.S2.asr.i.r.rnd" => "__builtin_HEXAGON_S2_asr_i_r_rnd",
+    "llvm.hexagon.S2.asr.i.r.rnd.goodsyntax" => "__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax",
+    "llvm.hexagon.S2.asr.i.svw.trun" => "__builtin_HEXAGON_S2_asr_i_svw_trun",
+    "llvm.hexagon.S2.asr.i.vh" => "__builtin_HEXAGON_S2_asr_i_vh",
+    "llvm.hexagon.S2.asr.i.vw" => "__builtin_HEXAGON_S2_asr_i_vw",
+    "llvm.hexagon.S2.asr.r.p" => "__builtin_HEXAGON_S2_asr_r_p",
+    "llvm.hexagon.S2.asr.r.p.acc" => "__builtin_HEXAGON_S2_asr_r_p_acc",
+    "llvm.hexagon.S2.asr.r.p.and" => "__builtin_HEXAGON_S2_asr_r_p_and",
+    "llvm.hexagon.S2.asr.r.p.nac" => "__builtin_HEXAGON_S2_asr_r_p_nac",
+    "llvm.hexagon.S2.asr.r.p.or" => "__builtin_HEXAGON_S2_asr_r_p_or",
+    "llvm.hexagon.S2.asr.r.p.xor" => "__builtin_HEXAGON_S2_asr_r_p_xor",
+    "llvm.hexagon.S2.asr.r.r" => "__builtin_HEXAGON_S2_asr_r_r",
+    "llvm.hexagon.S2.asr.r.r.acc" => "__builtin_HEXAGON_S2_asr_r_r_acc",
+    "llvm.hexagon.S2.asr.r.r.and" => "__builtin_HEXAGON_S2_asr_r_r_and",
+    "llvm.hexagon.S2.asr.r.r.nac" => "__builtin_HEXAGON_S2_asr_r_r_nac",
+    "llvm.hexagon.S2.asr.r.r.or" => "__builtin_HEXAGON_S2_asr_r_r_or",
+    "llvm.hexagon.S2.asr.r.r.sat" => "__builtin_HEXAGON_S2_asr_r_r_sat",
+    "llvm.hexagon.S2.asr.r.svw.trun" => "__builtin_HEXAGON_S2_asr_r_svw_trun",
+    "llvm.hexagon.S2.asr.r.vh" => "__builtin_HEXAGON_S2_asr_r_vh",
+    "llvm.hexagon.S2.asr.r.vw" => "__builtin_HEXAGON_S2_asr_r_vw",
+    "llvm.hexagon.S2.brev" => "__builtin_HEXAGON_S2_brev",
+    "llvm.hexagon.S2.brevp" => "__builtin_HEXAGON_S2_brevp",
+    "llvm.hexagon.S2.cabacencbin" => "__builtin_HEXAGON_S2_cabacencbin",
+    "llvm.hexagon.S2.cl0" => "__builtin_HEXAGON_S2_cl0",
+    "llvm.hexagon.S2.cl0p" => "__builtin_HEXAGON_S2_cl0p",
+    "llvm.hexagon.S2.cl1" => "__builtin_HEXAGON_S2_cl1",
+    "llvm.hexagon.S2.cl1p" => "__builtin_HEXAGON_S2_cl1p",
+    "llvm.hexagon.S2.clb" => "__builtin_HEXAGON_S2_clb",
+    "llvm.hexagon.S2.clbnorm" => "__builtin_HEXAGON_S2_clbnorm",
+    "llvm.hexagon.S2.clbp" => "__builtin_HEXAGON_S2_clbp",
+    "llvm.hexagon.S2.clrbit.i" => "__builtin_HEXAGON_S2_clrbit_i",
+    "llvm.hexagon.S2.clrbit.r" => "__builtin_HEXAGON_S2_clrbit_r",
+    "llvm.hexagon.S2.ct0" => "__builtin_HEXAGON_S2_ct0",
+    "llvm.hexagon.S2.ct0p" => "__builtin_HEXAGON_S2_ct0p",
+    "llvm.hexagon.S2.ct1" => "__builtin_HEXAGON_S2_ct1",
+    "llvm.hexagon.S2.ct1p" => "__builtin_HEXAGON_S2_ct1p",
+    "llvm.hexagon.S2.deinterleave" => "__builtin_HEXAGON_S2_deinterleave",
+    "llvm.hexagon.S2.extractu" => "__builtin_HEXAGON_S2_extractu",
+    "llvm.hexagon.S2.extractu.rp" => "__builtin_HEXAGON_S2_extractu_rp",
+    "llvm.hexagon.S2.extractup" => "__builtin_HEXAGON_S2_extractup",
+    "llvm.hexagon.S2.extractup.rp" => "__builtin_HEXAGON_S2_extractup_rp",
+    "llvm.hexagon.S2.insert" => "__builtin_HEXAGON_S2_insert",
+    "llvm.hexagon.S2.insert.rp" => "__builtin_HEXAGON_S2_insert_rp",
+    "llvm.hexagon.S2.insertp" => "__builtin_HEXAGON_S2_insertp",
+    "llvm.hexagon.S2.insertp.rp" => "__builtin_HEXAGON_S2_insertp_rp",
+    "llvm.hexagon.S2.interleave" => "__builtin_HEXAGON_S2_interleave",
+    "llvm.hexagon.S2.lfsp" => "__builtin_HEXAGON_S2_lfsp",
+    "llvm.hexagon.S2.lsl.r.p" => "__builtin_HEXAGON_S2_lsl_r_p",
+    "llvm.hexagon.S2.lsl.r.p.acc" => "__builtin_HEXAGON_S2_lsl_r_p_acc",
+    "llvm.hexagon.S2.lsl.r.p.and" => "__builtin_HEXAGON_S2_lsl_r_p_and",
+    "llvm.hexagon.S2.lsl.r.p.nac" => "__builtin_HEXAGON_S2_lsl_r_p_nac",
+    "llvm.hexagon.S2.lsl.r.p.or" => "__builtin_HEXAGON_S2_lsl_r_p_or",
+    "llvm.hexagon.S2.lsl.r.p.xor" => "__builtin_HEXAGON_S2_lsl_r_p_xor",
+    "llvm.hexagon.S2.lsl.r.r" => "__builtin_HEXAGON_S2_lsl_r_r",
+    "llvm.hexagon.S2.lsl.r.r.acc" => "__builtin_HEXAGON_S2_lsl_r_r_acc",
+    "llvm.hexagon.S2.lsl.r.r.and" => "__builtin_HEXAGON_S2_lsl_r_r_and",
+    "llvm.hexagon.S2.lsl.r.r.nac" => "__builtin_HEXAGON_S2_lsl_r_r_nac",
+    "llvm.hexagon.S2.lsl.r.r.or" => "__builtin_HEXAGON_S2_lsl_r_r_or",
+    "llvm.hexagon.S2.lsl.r.vh" => "__builtin_HEXAGON_S2_lsl_r_vh",
+    "llvm.hexagon.S2.lsl.r.vw" => "__builtin_HEXAGON_S2_lsl_r_vw",
+    "llvm.hexagon.S2.lsr.i.p" => "__builtin_HEXAGON_S2_lsr_i_p",
+    "llvm.hexagon.S2.lsr.i.p.acc" => "__builtin_HEXAGON_S2_lsr_i_p_acc",
+    "llvm.hexagon.S2.lsr.i.p.and" => "__builtin_HEXAGON_S2_lsr_i_p_and",
+    "llvm.hexagon.S2.lsr.i.p.nac" => "__builtin_HEXAGON_S2_lsr_i_p_nac",
+    "llvm.hexagon.S2.lsr.i.p.or" => "__builtin_HEXAGON_S2_lsr_i_p_or",
+    "llvm.hexagon.S2.lsr.i.p.xacc" => "__builtin_HEXAGON_S2_lsr_i_p_xacc",
+    "llvm.hexagon.S2.lsr.i.r" => "__builtin_HEXAGON_S2_lsr_i_r",
+    "llvm.hexagon.S2.lsr.i.r.acc" => "__builtin_HEXAGON_S2_lsr_i_r_acc",
+    "llvm.hexagon.S2.lsr.i.r.and" => "__builtin_HEXAGON_S2_lsr_i_r_and",
+    "llvm.hexagon.S2.lsr.i.r.nac" => "__builtin_HEXAGON_S2_lsr_i_r_nac",
+    "llvm.hexagon.S2.lsr.i.r.or" => "__builtin_HEXAGON_S2_lsr_i_r_or",
+    "llvm.hexagon.S2.lsr.i.r.xacc" => "__builtin_HEXAGON_S2_lsr_i_r_xacc",
+    "llvm.hexagon.S2.lsr.i.vh" => "__builtin_HEXAGON_S2_lsr_i_vh",
+    "llvm.hexagon.S2.lsr.i.vw" => "__builtin_HEXAGON_S2_lsr_i_vw",
+    "llvm.hexagon.S2.lsr.r.p" => "__builtin_HEXAGON_S2_lsr_r_p",
+    "llvm.hexagon.S2.lsr.r.p.acc" => "__builtin_HEXAGON_S2_lsr_r_p_acc",
+    "llvm.hexagon.S2.lsr.r.p.and" => "__builtin_HEXAGON_S2_lsr_r_p_and",
+    "llvm.hexagon.S2.lsr.r.p.nac" => "__builtin_HEXAGON_S2_lsr_r_p_nac",
+    "llvm.hexagon.S2.lsr.r.p.or" => "__builtin_HEXAGON_S2_lsr_r_p_or",
+    "llvm.hexagon.S2.lsr.r.p.xor" => "__builtin_HEXAGON_S2_lsr_r_p_xor",
+    "llvm.hexagon.S2.lsr.r.r" => "__builtin_HEXAGON_S2_lsr_r_r",
+    "llvm.hexagon.S2.lsr.r.r.acc" => "__builtin_HEXAGON_S2_lsr_r_r_acc",
+    "llvm.hexagon.S2.lsr.r.r.and" => "__builtin_HEXAGON_S2_lsr_r_r_and",
+    "llvm.hexagon.S2.lsr.r.r.nac" => "__builtin_HEXAGON_S2_lsr_r_r_nac",
+    "llvm.hexagon.S2.lsr.r.r.or" => "__builtin_HEXAGON_S2_lsr_r_r_or",
+    "llvm.hexagon.S2.lsr.r.vh" => "__builtin_HEXAGON_S2_lsr_r_vh",
+    "llvm.hexagon.S2.lsr.r.vw" => "__builtin_HEXAGON_S2_lsr_r_vw",
+    "llvm.hexagon.S2.mask" => "__builtin_HEXAGON_S2_mask",
+    "llvm.hexagon.S2.packhl" => "__builtin_HEXAGON_S2_packhl",
+    "llvm.hexagon.S2.parityp" => "__builtin_HEXAGON_S2_parityp",
+    "llvm.hexagon.S2.setbit.i" => "__builtin_HEXAGON_S2_setbit_i",
+    "llvm.hexagon.S2.setbit.r" => "__builtin_HEXAGON_S2_setbit_r",
+    "llvm.hexagon.S2.shuffeb" => "__builtin_HEXAGON_S2_shuffeb",
+    "llvm.hexagon.S2.shuffeh" => "__builtin_HEXAGON_S2_shuffeh",
+    "llvm.hexagon.S2.shuffob" => "__builtin_HEXAGON_S2_shuffob",
+    "llvm.hexagon.S2.shuffoh" => "__builtin_HEXAGON_S2_shuffoh",
+    "llvm.hexagon.S2.storerb.pbr" => "__builtin_brev_stb",
+    "llvm.hexagon.S2.storerd.pbr" => "__builtin_brev_std",
+    "llvm.hexagon.S2.storerf.pbr" => "__builtin_brev_sthhi",
+    "llvm.hexagon.S2.storerh.pbr" => "__builtin_brev_sth",
+    "llvm.hexagon.S2.storeri.pbr" => "__builtin_brev_stw",
+    "llvm.hexagon.S2.storew.locked" => "__builtin_HEXAGON_S2_storew_locked",
+    "llvm.hexagon.S2.svsathb" => "__builtin_HEXAGON_S2_svsathb",
+    "llvm.hexagon.S2.svsathub" => "__builtin_HEXAGON_S2_svsathub",
+    "llvm.hexagon.S2.tableidxb.goodsyntax" => "__builtin_HEXAGON_S2_tableidxb_goodsyntax",
+    "llvm.hexagon.S2.tableidxd.goodsyntax" => "__builtin_HEXAGON_S2_tableidxd_goodsyntax",
+    "llvm.hexagon.S2.tableidxh.goodsyntax" => "__builtin_HEXAGON_S2_tableidxh_goodsyntax",
+    "llvm.hexagon.S2.tableidxw.goodsyntax" => "__builtin_HEXAGON_S2_tableidxw_goodsyntax",
+    "llvm.hexagon.S2.togglebit.i" => "__builtin_HEXAGON_S2_togglebit_i",
+    "llvm.hexagon.S2.togglebit.r" => "__builtin_HEXAGON_S2_togglebit_r",
+    "llvm.hexagon.S2.tstbit.i" => "__builtin_HEXAGON_S2_tstbit_i",
+    "llvm.hexagon.S2.tstbit.r" => "__builtin_HEXAGON_S2_tstbit_r",
+    "llvm.hexagon.S2.valignib" => "__builtin_HEXAGON_S2_valignib",
+    "llvm.hexagon.S2.valignrb" => "__builtin_HEXAGON_S2_valignrb",
+    "llvm.hexagon.S2.vcnegh" => "__builtin_HEXAGON_S2_vcnegh",
+    "llvm.hexagon.S2.vcrotate" => "__builtin_HEXAGON_S2_vcrotate",
+    "llvm.hexagon.S2.vrcnegh" => "__builtin_HEXAGON_S2_vrcnegh",
+    "llvm.hexagon.S2.vrndpackwh" => "__builtin_HEXAGON_S2_vrndpackwh",
+    "llvm.hexagon.S2.vrndpackwhs" => "__builtin_HEXAGON_S2_vrndpackwhs",
+    "llvm.hexagon.S2.vsathb" => "__builtin_HEXAGON_S2_vsathb",
+    "llvm.hexagon.S2.vsathb.nopack" => "__builtin_HEXAGON_S2_vsathb_nopack",
+    "llvm.hexagon.S2.vsathub" => "__builtin_HEXAGON_S2_vsathub",
+    "llvm.hexagon.S2.vsathub.nopack" => "__builtin_HEXAGON_S2_vsathub_nopack",
+    "llvm.hexagon.S2.vsatwh" => "__builtin_HEXAGON_S2_vsatwh",
+    "llvm.hexagon.S2.vsatwh.nopack" => "__builtin_HEXAGON_S2_vsatwh_nopack",
+    "llvm.hexagon.S2.vsatwuh" => "__builtin_HEXAGON_S2_vsatwuh",
+    "llvm.hexagon.S2.vsatwuh.nopack" => "__builtin_HEXAGON_S2_vsatwuh_nopack",
+    "llvm.hexagon.S2.vsplatrb" => "__builtin_HEXAGON_S2_vsplatrb",
+    "llvm.hexagon.S2.vsplatrh" => "__builtin_HEXAGON_S2_vsplatrh",
+    "llvm.hexagon.S2.vspliceib" => "__builtin_HEXAGON_S2_vspliceib",
+    "llvm.hexagon.S2.vsplicerb" => "__builtin_HEXAGON_S2_vsplicerb",
+    "llvm.hexagon.S2.vsxtbh" => "__builtin_HEXAGON_S2_vsxtbh",
+    "llvm.hexagon.S2.vsxthw" => "__builtin_HEXAGON_S2_vsxthw",
+    "llvm.hexagon.S2.vtrunehb" => "__builtin_HEXAGON_S2_vtrunehb",
+    "llvm.hexagon.S2.vtrunewh" => "__builtin_HEXAGON_S2_vtrunewh",
+    "llvm.hexagon.S2.vtrunohb" => "__builtin_HEXAGON_S2_vtrunohb",
+    "llvm.hexagon.S2.vtrunowh" => "__builtin_HEXAGON_S2_vtrunowh",
+    "llvm.hexagon.S2.vzxtbh" => "__builtin_HEXAGON_S2_vzxtbh",
+    "llvm.hexagon.S2.vzxthw" => "__builtin_HEXAGON_S2_vzxthw",
+    "llvm.hexagon.S4.addaddi" => "__builtin_HEXAGON_S4_addaddi",
+    "llvm.hexagon.S4.addi.asl.ri" => "__builtin_HEXAGON_S4_addi_asl_ri",
+    "llvm.hexagon.S4.addi.lsr.ri" => "__builtin_HEXAGON_S4_addi_lsr_ri",
+    "llvm.hexagon.S4.andi.asl.ri" => "__builtin_HEXAGON_S4_andi_asl_ri",
+    "llvm.hexagon.S4.andi.lsr.ri" => "__builtin_HEXAGON_S4_andi_lsr_ri",
+    "llvm.hexagon.S4.clbaddi" => "__builtin_HEXAGON_S4_clbaddi",
+    "llvm.hexagon.S4.clbpaddi" => "__builtin_HEXAGON_S4_clbpaddi",
+    "llvm.hexagon.S4.clbpnorm" => "__builtin_HEXAGON_S4_clbpnorm",
+    "llvm.hexagon.S4.extract" => "__builtin_HEXAGON_S4_extract",
+    "llvm.hexagon.S4.extract.rp" => "__builtin_HEXAGON_S4_extract_rp",
+    "llvm.hexagon.S4.extractp" => "__builtin_HEXAGON_S4_extractp",
+    "llvm.hexagon.S4.extractp.rp" => "__builtin_HEXAGON_S4_extractp_rp",
+    "llvm.hexagon.S4.lsli" => "__builtin_HEXAGON_S4_lsli",
+    "llvm.hexagon.S4.ntstbit.i" => "__builtin_HEXAGON_S4_ntstbit_i",
+    "llvm.hexagon.S4.ntstbit.r" => "__builtin_HEXAGON_S4_ntstbit_r",
+    "llvm.hexagon.S4.or.andi" => "__builtin_HEXAGON_S4_or_andi",
+    "llvm.hexagon.S4.or.andix" => "__builtin_HEXAGON_S4_or_andix",
+    "llvm.hexagon.S4.or.ori" => "__builtin_HEXAGON_S4_or_ori",
+    "llvm.hexagon.S4.ori.asl.ri" => "__builtin_HEXAGON_S4_ori_asl_ri",
+    "llvm.hexagon.S4.ori.lsr.ri" => "__builtin_HEXAGON_S4_ori_lsr_ri",
+    "llvm.hexagon.S4.parity" => "__builtin_HEXAGON_S4_parity",
+    "llvm.hexagon.S4.stored.locked" => "__builtin_HEXAGON_S4_stored_locked",
+    "llvm.hexagon.S4.subaddi" => "__builtin_HEXAGON_S4_subaddi",
+    "llvm.hexagon.S4.subi.asl.ri" => "__builtin_HEXAGON_S4_subi_asl_ri",
+    "llvm.hexagon.S4.subi.lsr.ri" => "__builtin_HEXAGON_S4_subi_lsr_ri",
+    "llvm.hexagon.S4.vrcrotate" => "__builtin_HEXAGON_S4_vrcrotate",
+    "llvm.hexagon.S4.vrcrotate.acc" => "__builtin_HEXAGON_S4_vrcrotate_acc",
+    "llvm.hexagon.S4.vxaddsubh" => "__builtin_HEXAGON_S4_vxaddsubh",
+    "llvm.hexagon.S4.vxaddsubhr" => "__builtin_HEXAGON_S4_vxaddsubhr",
+    "llvm.hexagon.S4.vxaddsubw" => "__builtin_HEXAGON_S4_vxaddsubw",
+    "llvm.hexagon.S4.vxsubaddh" => "__builtin_HEXAGON_S4_vxsubaddh",
+    "llvm.hexagon.S4.vxsubaddhr" => "__builtin_HEXAGON_S4_vxsubaddhr",
+    "llvm.hexagon.S4.vxsubaddw" => "__builtin_HEXAGON_S4_vxsubaddw",
+    "llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax" => "__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax",
+    "llvm.hexagon.S5.asrhub.sat" => "__builtin_HEXAGON_S5_asrhub_sat",
+    "llvm.hexagon.S5.popcountp" => "__builtin_HEXAGON_S5_popcountp",
+    "llvm.hexagon.S5.vasrhrnd.goodsyntax" => "__builtin_HEXAGON_S5_vasrhrnd_goodsyntax",
+    "llvm.hexagon.S6.rol.i.p" => "__builtin_HEXAGON_S6_rol_i_p",
+    "llvm.hexagon.S6.rol.i.p.acc" => "__builtin_HEXAGON_S6_rol_i_p_acc",
+    "llvm.hexagon.S6.rol.i.p.and" => "__builtin_HEXAGON_S6_rol_i_p_and",
+    "llvm.hexagon.S6.rol.i.p.nac" => "__builtin_HEXAGON_S6_rol_i_p_nac",
+    "llvm.hexagon.S6.rol.i.p.or" => "__builtin_HEXAGON_S6_rol_i_p_or",
+    "llvm.hexagon.S6.rol.i.p.xacc" => "__builtin_HEXAGON_S6_rol_i_p_xacc",
+    "llvm.hexagon.S6.rol.i.r" => "__builtin_HEXAGON_S6_rol_i_r",
+    "llvm.hexagon.S6.rol.i.r.acc" => "__builtin_HEXAGON_S6_rol_i_r_acc",
+    "llvm.hexagon.S6.rol.i.r.and" => "__builtin_HEXAGON_S6_rol_i_r_and",
+    "llvm.hexagon.S6.rol.i.r.nac" => "__builtin_HEXAGON_S6_rol_i_r_nac",
+    "llvm.hexagon.S6.rol.i.r.or" => "__builtin_HEXAGON_S6_rol_i_r_or",
+    "llvm.hexagon.S6.rol.i.r.xacc" => "__builtin_HEXAGON_S6_rol_i_r_xacc",
+    "llvm.hexagon.S6.vsplatrbp" => "__builtin_HEXAGON_S6_vsplatrbp",
+    "llvm.hexagon.S6.vtrunehb.ppp" => "__builtin_HEXAGON_S6_vtrunehb_ppp",
+    "llvm.hexagon.S6.vtrunohb.ppp" => "__builtin_HEXAGON_S6_vtrunohb_ppp",
+    "llvm.hexagon.SI.to.SXTHI.asrh" => "__builtin_SI_to_SXTHI_asrh",
+    "llvm.hexagon.V6.extractw" => "__builtin_HEXAGON_V6_extractw",
+    "llvm.hexagon.V6.extractw.128B" => "__builtin_HEXAGON_V6_extractw_128B",
+    "llvm.hexagon.V6.hi" => "__builtin_HEXAGON_V6_hi",
+    "llvm.hexagon.V6.hi.128B" => "__builtin_HEXAGON_V6_hi_128B",
+    "llvm.hexagon.V6.lo" => "__builtin_HEXAGON_V6_lo",
+    "llvm.hexagon.V6.lo.128B" => "__builtin_HEXAGON_V6_lo_128B",
+    "llvm.hexagon.V6.lvsplatb" => "__builtin_HEXAGON_V6_lvsplatb",
+    "llvm.hexagon.V6.lvsplatb.128B" => "__builtin_HEXAGON_V6_lvsplatb_128B",
+    "llvm.hexagon.V6.lvsplath" => "__builtin_HEXAGON_V6_lvsplath",
+    "llvm.hexagon.V6.lvsplath.128B" => "__builtin_HEXAGON_V6_lvsplath_128B",
+    "llvm.hexagon.V6.lvsplatw" => "__builtin_HEXAGON_V6_lvsplatw",
+    "llvm.hexagon.V6.lvsplatw.128B" => "__builtin_HEXAGON_V6_lvsplatw_128B",
+    "llvm.hexagon.V6.pred.and" => "__builtin_HEXAGON_V6_pred_and",
+    "llvm.hexagon.V6.pred.and.128B" => "__builtin_HEXAGON_V6_pred_and_128B",
+    "llvm.hexagon.V6.pred.and.n" => "__builtin_HEXAGON_V6_pred_and_n",
+    "llvm.hexagon.V6.pred.and.n.128B" => "__builtin_HEXAGON_V6_pred_and_n_128B",
+    "llvm.hexagon.V6.pred.not" => "__builtin_HEXAGON_V6_pred_not",
+    "llvm.hexagon.V6.pred.not.128B" => "__builtin_HEXAGON_V6_pred_not_128B",
+    "llvm.hexagon.V6.pred.or" => "__builtin_HEXAGON_V6_pred_or",
+    "llvm.hexagon.V6.pred.or.128B" => "__builtin_HEXAGON_V6_pred_or_128B",
+    "llvm.hexagon.V6.pred.or.n" => "__builtin_HEXAGON_V6_pred_or_n",
+    "llvm.hexagon.V6.pred.or.n.128B" => "__builtin_HEXAGON_V6_pred_or_n_128B",
+    "llvm.hexagon.V6.pred.scalar2" => "__builtin_HEXAGON_V6_pred_scalar2",
+    "llvm.hexagon.V6.pred.scalar2.128B" => "__builtin_HEXAGON_V6_pred_scalar2_128B",
+    "llvm.hexagon.V6.pred.scalar2v2" => "__builtin_HEXAGON_V6_pred_scalar2v2",
+    "llvm.hexagon.V6.pred.scalar2v2.128B" => "__builtin_HEXAGON_V6_pred_scalar2v2_128B",
+    "llvm.hexagon.V6.pred.xor" => "__builtin_HEXAGON_V6_pred_xor",
+    "llvm.hexagon.V6.pred.xor.128B" => "__builtin_HEXAGON_V6_pred_xor_128B",
+    "llvm.hexagon.V6.shuffeqh" => "__builtin_HEXAGON_V6_shuffeqh",
+    "llvm.hexagon.V6.shuffeqh.128B" => "__builtin_HEXAGON_V6_shuffeqh_128B",
+    "llvm.hexagon.V6.shuffeqw" => "__builtin_HEXAGON_V6_shuffeqw",
+    "llvm.hexagon.V6.shuffeqw.128B" => "__builtin_HEXAGON_V6_shuffeqw_128B",
+    "llvm.hexagon.V6.v6mpyhubs10" => "__builtin_HEXAGON_V6_v6mpyhubs10",
+    "llvm.hexagon.V6.v6mpyhubs10.128B" => "__builtin_HEXAGON_V6_v6mpyhubs10_128B",
+    "llvm.hexagon.V6.v6mpyhubs10.vxx" => "__builtin_HEXAGON_V6_v6mpyhubs10_vxx",
+    "llvm.hexagon.V6.v6mpyhubs10.vxx.128B" => "__builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B",
+    "llvm.hexagon.V6.v6mpyvubs10" => "__builtin_HEXAGON_V6_v6mpyvubs10",
+    "llvm.hexagon.V6.v6mpyvubs10.128B" => "__builtin_HEXAGON_V6_v6mpyvubs10_128B",
+    "llvm.hexagon.V6.v6mpyvubs10.vxx" => "__builtin_HEXAGON_V6_v6mpyvubs10_vxx",
+    "llvm.hexagon.V6.v6mpyvubs10.vxx.128B" => "__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B",
+    "llvm.hexagon.V6.vS32b.nqpred.ai" => "__builtin_HEXAGON_V6_vS32b_nqpred_ai",
+    "llvm.hexagon.V6.vS32b.nqpred.ai.128B" => "__builtin_HEXAGON_V6_vS32b_nqpred_ai_128B",
+    "llvm.hexagon.V6.vS32b.nt.nqpred.ai" => "__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai",
+    "llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B" => "__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai_128B",
+    "llvm.hexagon.V6.vS32b.nt.qpred.ai" => "__builtin_HEXAGON_V6_vS32b_nt_qpred_ai",
+    "llvm.hexagon.V6.vS32b.nt.qpred.ai.128B" => "__builtin_HEXAGON_V6_vS32b_nt_qpred_ai_128B",
+    "llvm.hexagon.V6.vS32b.qpred.ai" => "__builtin_HEXAGON_V6_vS32b_qpred_ai",
+    "llvm.hexagon.V6.vS32b.qpred.ai.128B" => "__builtin_HEXAGON_V6_vS32b_qpred_ai_128B",
+    "llvm.hexagon.V6.vabs.hf" => "__builtin_HEXAGON_V6_vabs_hf",
+    "llvm.hexagon.V6.vabs.hf.128B" => "__builtin_HEXAGON_V6_vabs_hf_128B",
+    "llvm.hexagon.V6.vabs.sf" => "__builtin_HEXAGON_V6_vabs_sf",
+    "llvm.hexagon.V6.vabs.sf.128B" => "__builtin_HEXAGON_V6_vabs_sf_128B",
+    "llvm.hexagon.V6.vabsb" => "__builtin_HEXAGON_V6_vabsb",
+    "llvm.hexagon.V6.vabsb.128B" => "__builtin_HEXAGON_V6_vabsb_128B",
+    "llvm.hexagon.V6.vabsb.sat" => "__builtin_HEXAGON_V6_vabsb_sat",
+    "llvm.hexagon.V6.vabsb.sat.128B" => "__builtin_HEXAGON_V6_vabsb_sat_128B",
+    "llvm.hexagon.V6.vabsdiffh" => "__builtin_HEXAGON_V6_vabsdiffh",
+    "llvm.hexagon.V6.vabsdiffh.128B" => "__builtin_HEXAGON_V6_vabsdiffh_128B",
+    "llvm.hexagon.V6.vabsdiffub" => "__builtin_HEXAGON_V6_vabsdiffub",
+    "llvm.hexagon.V6.vabsdiffub.128B" => "__builtin_HEXAGON_V6_vabsdiffub_128B",
+    "llvm.hexagon.V6.vabsdiffuh" => "__builtin_HEXAGON_V6_vabsdiffuh",
+    "llvm.hexagon.V6.vabsdiffuh.128B" => "__builtin_HEXAGON_V6_vabsdiffuh_128B",
+    "llvm.hexagon.V6.vabsdiffw" => "__builtin_HEXAGON_V6_vabsdiffw",
+    "llvm.hexagon.V6.vabsdiffw.128B" => "__builtin_HEXAGON_V6_vabsdiffw_128B",
+    "llvm.hexagon.V6.vabsh" => "__builtin_HEXAGON_V6_vabsh",
+    "llvm.hexagon.V6.vabsh.128B" => "__builtin_HEXAGON_V6_vabsh_128B",
+    "llvm.hexagon.V6.vabsh.sat" => "__builtin_HEXAGON_V6_vabsh_sat",
+    "llvm.hexagon.V6.vabsh.sat.128B" => "__builtin_HEXAGON_V6_vabsh_sat_128B",
+    "llvm.hexagon.V6.vabsw" => "__builtin_HEXAGON_V6_vabsw",
+    "llvm.hexagon.V6.vabsw.128B" => "__builtin_HEXAGON_V6_vabsw_128B",
+    "llvm.hexagon.V6.vabsw.sat" => "__builtin_HEXAGON_V6_vabsw_sat",
+    "llvm.hexagon.V6.vabsw.sat.128B" => "__builtin_HEXAGON_V6_vabsw_sat_128B",
+    "llvm.hexagon.V6.vadd.hf" => "__builtin_HEXAGON_V6_vadd_hf",
+    "llvm.hexagon.V6.vadd.hf.128B" => "__builtin_HEXAGON_V6_vadd_hf_128B",
+    "llvm.hexagon.V6.vadd.hf.hf" => "__builtin_HEXAGON_V6_vadd_hf_hf",
+    "llvm.hexagon.V6.vadd.hf.hf.128B" => "__builtin_HEXAGON_V6_vadd_hf_hf_128B",
+    "llvm.hexagon.V6.vadd.qf16" => "__builtin_HEXAGON_V6_vadd_qf16",
+    "llvm.hexagon.V6.vadd.qf16.128B" => "__builtin_HEXAGON_V6_vadd_qf16_128B",
+    "llvm.hexagon.V6.vadd.qf16.mix" => "__builtin_HEXAGON_V6_vadd_qf16_mix",
+    "llvm.hexagon.V6.vadd.qf16.mix.128B" => "__builtin_HEXAGON_V6_vadd_qf16_mix_128B",
+    "llvm.hexagon.V6.vadd.qf32" => "__builtin_HEXAGON_V6_vadd_qf32",
+    "llvm.hexagon.V6.vadd.qf32.128B" => "__builtin_HEXAGON_V6_vadd_qf32_128B",
+    "llvm.hexagon.V6.vadd.qf32.mix" => "__builtin_HEXAGON_V6_vadd_qf32_mix",
+    "llvm.hexagon.V6.vadd.qf32.mix.128B" => "__builtin_HEXAGON_V6_vadd_qf32_mix_128B",
+    "llvm.hexagon.V6.vadd.sf" => "__builtin_HEXAGON_V6_vadd_sf",
+    "llvm.hexagon.V6.vadd.sf.128B" => "__builtin_HEXAGON_V6_vadd_sf_128B",
+    "llvm.hexagon.V6.vadd.sf.bf" => "__builtin_HEXAGON_V6_vadd_sf_bf",
+    "llvm.hexagon.V6.vadd.sf.bf.128B" => "__builtin_HEXAGON_V6_vadd_sf_bf_128B",
+    "llvm.hexagon.V6.vadd.sf.hf" => "__builtin_HEXAGON_V6_vadd_sf_hf",
+    "llvm.hexagon.V6.vadd.sf.hf.128B" => "__builtin_HEXAGON_V6_vadd_sf_hf_128B",
+    "llvm.hexagon.V6.vadd.sf.sf" => "__builtin_HEXAGON_V6_vadd_sf_sf",
+    "llvm.hexagon.V6.vadd.sf.sf.128B" => "__builtin_HEXAGON_V6_vadd_sf_sf_128B",
+    "llvm.hexagon.V6.vaddb" => "__builtin_HEXAGON_V6_vaddb",
+    "llvm.hexagon.V6.vaddb.128B" => "__builtin_HEXAGON_V6_vaddb_128B",
+    "llvm.hexagon.V6.vaddb.dv" => "__builtin_HEXAGON_V6_vaddb_dv",
+    "llvm.hexagon.V6.vaddb.dv.128B" => "__builtin_HEXAGON_V6_vaddb_dv_128B",
+    "llvm.hexagon.V6.vaddbnq" => "__builtin_HEXAGON_V6_vaddbnq",
+    "llvm.hexagon.V6.vaddbnq.128B" => "__builtin_HEXAGON_V6_vaddbnq_128B",
+    "llvm.hexagon.V6.vaddbq" => "__builtin_HEXAGON_V6_vaddbq",
+    "llvm.hexagon.V6.vaddbq.128B" => "__builtin_HEXAGON_V6_vaddbq_128B",
+    "llvm.hexagon.V6.vaddbsat" => "__builtin_HEXAGON_V6_vaddbsat",
+    "llvm.hexagon.V6.vaddbsat.128B" => "__builtin_HEXAGON_V6_vaddbsat_128B",
+    "llvm.hexagon.V6.vaddbsat.dv" => "__builtin_HEXAGON_V6_vaddbsat_dv",
+    "llvm.hexagon.V6.vaddbsat.dv.128B" => "__builtin_HEXAGON_V6_vaddbsat_dv_128B",
+    "llvm.hexagon.V6.vaddcarrysat" => "__builtin_HEXAGON_V6_vaddcarrysat",
+    "llvm.hexagon.V6.vaddcarrysat.128B" => "__builtin_HEXAGON_V6_vaddcarrysat_128B",
+    "llvm.hexagon.V6.vaddclbh" => "__builtin_HEXAGON_V6_vaddclbh",
+    "llvm.hexagon.V6.vaddclbh.128B" => "__builtin_HEXAGON_V6_vaddclbh_128B",
+    "llvm.hexagon.V6.vaddclbw" => "__builtin_HEXAGON_V6_vaddclbw",
+    "llvm.hexagon.V6.vaddclbw.128B" => "__builtin_HEXAGON_V6_vaddclbw_128B",
+    "llvm.hexagon.V6.vaddh" => "__builtin_HEXAGON_V6_vaddh",
+    "llvm.hexagon.V6.vaddh.128B" => "__builtin_HEXAGON_V6_vaddh_128B",
+    "llvm.hexagon.V6.vaddh.dv" => "__builtin_HEXAGON_V6_vaddh_dv",
+    "llvm.hexagon.V6.vaddh.dv.128B" => "__builtin_HEXAGON_V6_vaddh_dv_128B",
+    "llvm.hexagon.V6.vaddhnq" => "__builtin_HEXAGON_V6_vaddhnq",
+    "llvm.hexagon.V6.vaddhnq.128B" => "__builtin_HEXAGON_V6_vaddhnq_128B",
+    "llvm.hexagon.V6.vaddhq" => "__builtin_HEXAGON_V6_vaddhq",
+    "llvm.hexagon.V6.vaddhq.128B" => "__builtin_HEXAGON_V6_vaddhq_128B",
+    "llvm.hexagon.V6.vaddhsat" => "__builtin_HEXAGON_V6_vaddhsat",
+    "llvm.hexagon.V6.vaddhsat.128B" => "__builtin_HEXAGON_V6_vaddhsat_128B",
+    "llvm.hexagon.V6.vaddhsat.dv" => "__builtin_HEXAGON_V6_vaddhsat_dv",
+    "llvm.hexagon.V6.vaddhsat.dv.128B" => "__builtin_HEXAGON_V6_vaddhsat_dv_128B",
+    "llvm.hexagon.V6.vaddhw" => "__builtin_HEXAGON_V6_vaddhw",
+    "llvm.hexagon.V6.vaddhw.128B" => "__builtin_HEXAGON_V6_vaddhw_128B",
+    "llvm.hexagon.V6.vaddhw.acc" => "__builtin_HEXAGON_V6_vaddhw_acc",
+    "llvm.hexagon.V6.vaddhw.acc.128B" => "__builtin_HEXAGON_V6_vaddhw_acc_128B",
+    "llvm.hexagon.V6.vaddubh" => "__builtin_HEXAGON_V6_vaddubh",
+    "llvm.hexagon.V6.vaddubh.128B" => "__builtin_HEXAGON_V6_vaddubh_128B",
+    "llvm.hexagon.V6.vaddubh.acc" => "__builtin_HEXAGON_V6_vaddubh_acc",
+    "llvm.hexagon.V6.vaddubh.acc.128B" => "__builtin_HEXAGON_V6_vaddubh_acc_128B",
+    "llvm.hexagon.V6.vaddubsat" => "__builtin_HEXAGON_V6_vaddubsat",
+    "llvm.hexagon.V6.vaddubsat.128B" => "__builtin_HEXAGON_V6_vaddubsat_128B",
+    "llvm.hexagon.V6.vaddubsat.dv" => "__builtin_HEXAGON_V6_vaddubsat_dv",
+    "llvm.hexagon.V6.vaddubsat.dv.128B" => "__builtin_HEXAGON_V6_vaddubsat_dv_128B",
+    "llvm.hexagon.V6.vaddububb.sat" => "__builtin_HEXAGON_V6_vaddububb_sat",
+    "llvm.hexagon.V6.vaddububb.sat.128B" => "__builtin_HEXAGON_V6_vaddububb_sat_128B",
+    "llvm.hexagon.V6.vadduhsat" => "__builtin_HEXAGON_V6_vadduhsat",
+    "llvm.hexagon.V6.vadduhsat.128B" => "__builtin_HEXAGON_V6_vadduhsat_128B",
+    "llvm.hexagon.V6.vadduhsat.dv" => "__builtin_HEXAGON_V6_vadduhsat_dv",
+    "llvm.hexagon.V6.vadduhsat.dv.128B" => "__builtin_HEXAGON_V6_vadduhsat_dv_128B",
+    "llvm.hexagon.V6.vadduhw" => "__builtin_HEXAGON_V6_vadduhw",
+    "llvm.hexagon.V6.vadduhw.128B" => "__builtin_HEXAGON_V6_vadduhw_128B",
+    "llvm.hexagon.V6.vadduhw.acc" => "__builtin_HEXAGON_V6_vadduhw_acc",
+    "llvm.hexagon.V6.vadduhw.acc.128B" => "__builtin_HEXAGON_V6_vadduhw_acc_128B",
+    "llvm.hexagon.V6.vadduwsat" => "__builtin_HEXAGON_V6_vadduwsat",
+    "llvm.hexagon.V6.vadduwsat.128B" => "__builtin_HEXAGON_V6_vadduwsat_128B",
+    "llvm.hexagon.V6.vadduwsat.dv" => "__builtin_HEXAGON_V6_vadduwsat_dv",
+    "llvm.hexagon.V6.vadduwsat.dv.128B" => "__builtin_HEXAGON_V6_vadduwsat_dv_128B",
+    "llvm.hexagon.V6.vaddw" => "__builtin_HEXAGON_V6_vaddw",
+    "llvm.hexagon.V6.vaddw.128B" => "__builtin_HEXAGON_V6_vaddw_128B",
+    "llvm.hexagon.V6.vaddw.dv" => "__builtin_HEXAGON_V6_vaddw_dv",
+    "llvm.hexagon.V6.vaddw.dv.128B" => "__builtin_HEXAGON_V6_vaddw_dv_128B",
+    "llvm.hexagon.V6.vaddwnq" => "__builtin_HEXAGON_V6_vaddwnq",
+    "llvm.hexagon.V6.vaddwnq.128B" => "__builtin_HEXAGON_V6_vaddwnq_128B",
+    "llvm.hexagon.V6.vaddwq" => "__builtin_HEXAGON_V6_vaddwq",
+    "llvm.hexagon.V6.vaddwq.128B" => "__builtin_HEXAGON_V6_vaddwq_128B",
+    "llvm.hexagon.V6.vaddwsat" => "__builtin_HEXAGON_V6_vaddwsat",
+    "llvm.hexagon.V6.vaddwsat.128B" => "__builtin_HEXAGON_V6_vaddwsat_128B",
+    "llvm.hexagon.V6.vaddwsat.dv" => "__builtin_HEXAGON_V6_vaddwsat_dv",
+    "llvm.hexagon.V6.vaddwsat.dv.128B" => "__builtin_HEXAGON_V6_vaddwsat_dv_128B",
+    "llvm.hexagon.V6.valignb" => "__builtin_HEXAGON_V6_valignb",
+    "llvm.hexagon.V6.valignb.128B" => "__builtin_HEXAGON_V6_valignb_128B",
+    "llvm.hexagon.V6.valignbi" => "__builtin_HEXAGON_V6_valignbi",
+    "llvm.hexagon.V6.valignbi.128B" => "__builtin_HEXAGON_V6_valignbi_128B",
+    "llvm.hexagon.V6.vand" => "__builtin_HEXAGON_V6_vand",
+    "llvm.hexagon.V6.vand.128B" => "__builtin_HEXAGON_V6_vand_128B",
+    "llvm.hexagon.V6.vandnqrt" => "__builtin_HEXAGON_V6_vandnqrt",
+    "llvm.hexagon.V6.vandnqrt.128B" => "__builtin_HEXAGON_V6_vandnqrt_128B",
+    "llvm.hexagon.V6.vandnqrt.acc" => "__builtin_HEXAGON_V6_vandnqrt_acc",
+    "llvm.hexagon.V6.vandnqrt.acc.128B" => "__builtin_HEXAGON_V6_vandnqrt_acc_128B",
+    "llvm.hexagon.V6.vandqrt" => "__builtin_HEXAGON_V6_vandqrt",
+    "llvm.hexagon.V6.vandqrt.128B" => "__builtin_HEXAGON_V6_vandqrt_128B",
+    "llvm.hexagon.V6.vandqrt.acc" => "__builtin_HEXAGON_V6_vandqrt_acc",
+    "llvm.hexagon.V6.vandqrt.acc.128B" => "__builtin_HEXAGON_V6_vandqrt_acc_128B",
+    "llvm.hexagon.V6.vandvnqv" => "__builtin_HEXAGON_V6_vandvnqv",
+    "llvm.hexagon.V6.vandvnqv.128B" => "__builtin_HEXAGON_V6_vandvnqv_128B",
+    "llvm.hexagon.V6.vandvqv" => "__builtin_HEXAGON_V6_vandvqv",
+    "llvm.hexagon.V6.vandvqv.128B" => "__builtin_HEXAGON_V6_vandvqv_128B",
+    "llvm.hexagon.V6.vandvrt" => "__builtin_HEXAGON_V6_vandvrt",
+    "llvm.hexagon.V6.vandvrt.128B" => "__builtin_HEXAGON_V6_vandvrt_128B",
+    "llvm.hexagon.V6.vandvrt.acc" => "__builtin_HEXAGON_V6_vandvrt_acc",
+    "llvm.hexagon.V6.vandvrt.acc.128B" => "__builtin_HEXAGON_V6_vandvrt_acc_128B",
+    "llvm.hexagon.V6.vaslh" => "__builtin_HEXAGON_V6_vaslh",
+    "llvm.hexagon.V6.vaslh.128B" => "__builtin_HEXAGON_V6_vaslh_128B",
+    "llvm.hexagon.V6.vaslh.acc" => "__builtin_HEXAGON_V6_vaslh_acc",
+    "llvm.hexagon.V6.vaslh.acc.128B" => "__builtin_HEXAGON_V6_vaslh_acc_128B",
+    "llvm.hexagon.V6.vaslhv" => "__builtin_HEXAGON_V6_vaslhv",
+    "llvm.hexagon.V6.vaslhv.128B" => "__builtin_HEXAGON_V6_vaslhv_128B",
+    "llvm.hexagon.V6.vaslw" => "__builtin_HEXAGON_V6_vaslw",
+    "llvm.hexagon.V6.vaslw.128B" => "__builtin_HEXAGON_V6_vaslw_128B",
+    "llvm.hexagon.V6.vaslw.acc" => "__builtin_HEXAGON_V6_vaslw_acc",
+    "llvm.hexagon.V6.vaslw.acc.128B" => "__builtin_HEXAGON_V6_vaslw_acc_128B",
+    "llvm.hexagon.V6.vaslwv" => "__builtin_HEXAGON_V6_vaslwv",
+    "llvm.hexagon.V6.vaslwv.128B" => "__builtin_HEXAGON_V6_vaslwv_128B",
+    "llvm.hexagon.V6.vasr.into" => "__builtin_HEXAGON_V6_vasr_into",
+    "llvm.hexagon.V6.vasr.into.128B" => "__builtin_HEXAGON_V6_vasr_into_128B",
+    "llvm.hexagon.V6.vasrh" => "__builtin_HEXAGON_V6_vasrh",
+    "llvm.hexagon.V6.vasrh.128B" => "__builtin_HEXAGON_V6_vasrh_128B",
+    "llvm.hexagon.V6.vasrh.acc" => "__builtin_HEXAGON_V6_vasrh_acc",
+    "llvm.hexagon.V6.vasrh.acc.128B" => "__builtin_HEXAGON_V6_vasrh_acc_128B",
+    "llvm.hexagon.V6.vasrhbrndsat" => "__builtin_HEXAGON_V6_vasrhbrndsat",
+    "llvm.hexagon.V6.vasrhbrndsat.128B" => "__builtin_HEXAGON_V6_vasrhbrndsat_128B",
+    "llvm.hexagon.V6.vasrhbsat" => "__builtin_HEXAGON_V6_vasrhbsat",
+    "llvm.hexagon.V6.vasrhbsat.128B" => "__builtin_HEXAGON_V6_vasrhbsat_128B",
+    "llvm.hexagon.V6.vasrhubrndsat" => "__builtin_HEXAGON_V6_vasrhubrndsat",
+    "llvm.hexagon.V6.vasrhubrndsat.128B" => "__builtin_HEXAGON_V6_vasrhubrndsat_128B",
+    "llvm.hexagon.V6.vasrhubsat" => "__builtin_HEXAGON_V6_vasrhubsat",
+    "llvm.hexagon.V6.vasrhubsat.128B" => "__builtin_HEXAGON_V6_vasrhubsat_128B",
+    "llvm.hexagon.V6.vasrhv" => "__builtin_HEXAGON_V6_vasrhv",
+    "llvm.hexagon.V6.vasrhv.128B" => "__builtin_HEXAGON_V6_vasrhv_128B",
+    "llvm.hexagon.V6.vasruhubrndsat" => "__builtin_HEXAGON_V6_vasruhubrndsat",
+    "llvm.hexagon.V6.vasruhubrndsat.128B" => "__builtin_HEXAGON_V6_vasruhubrndsat_128B",
+    "llvm.hexagon.V6.vasruhubsat" => "__builtin_HEXAGON_V6_vasruhubsat",
+    "llvm.hexagon.V6.vasruhubsat.128B" => "__builtin_HEXAGON_V6_vasruhubsat_128B",
+    "llvm.hexagon.V6.vasruwuhrndsat" => "__builtin_HEXAGON_V6_vasruwuhrndsat",
+    "llvm.hexagon.V6.vasruwuhrndsat.128B" => "__builtin_HEXAGON_V6_vasruwuhrndsat_128B",
+    "llvm.hexagon.V6.vasruwuhsat" => "__builtin_HEXAGON_V6_vasruwuhsat",
+    "llvm.hexagon.V6.vasruwuhsat.128B" => "__builtin_HEXAGON_V6_vasruwuhsat_128B",
+    "llvm.hexagon.V6.vasrvuhubrndsat" => "__builtin_HEXAGON_V6_vasrvuhubrndsat",
+    "llvm.hexagon.V6.vasrvuhubrndsat.128B" => "__builtin_HEXAGON_V6_vasrvuhubrndsat_128B",
+    "llvm.hexagon.V6.vasrvuhubsat" => "__builtin_HEXAGON_V6_vasrvuhubsat",
+    "llvm.hexagon.V6.vasrvuhubsat.128B" => "__builtin_HEXAGON_V6_vasrvuhubsat_128B",
+    "llvm.hexagon.V6.vasrvwuhrndsat" => "__builtin_HEXAGON_V6_vasrvwuhrndsat",
+    "llvm.hexagon.V6.vasrvwuhrndsat.128B" => "__builtin_HEXAGON_V6_vasrvwuhrndsat_128B",
+    "llvm.hexagon.V6.vasrvwuhsat" => "__builtin_HEXAGON_V6_vasrvwuhsat",
+    "llvm.hexagon.V6.vasrvwuhsat.128B" => "__builtin_HEXAGON_V6_vasrvwuhsat_128B",
+    "llvm.hexagon.V6.vasrw" => "__builtin_HEXAGON_V6_vasrw",
+    "llvm.hexagon.V6.vasrw.128B" => "__builtin_HEXAGON_V6_vasrw_128B",
+    "llvm.hexagon.V6.vasrw.acc" => "__builtin_HEXAGON_V6_vasrw_acc",
+    "llvm.hexagon.V6.vasrw.acc.128B" => "__builtin_HEXAGON_V6_vasrw_acc_128B",
+    "llvm.hexagon.V6.vasrwh" => "__builtin_HEXAGON_V6_vasrwh",
+    "llvm.hexagon.V6.vasrwh.128B" => "__builtin_HEXAGON_V6_vasrwh_128B",
+    "llvm.hexagon.V6.vasrwhrndsat" => "__builtin_HEXAGON_V6_vasrwhrndsat",
+    "llvm.hexagon.V6.vasrwhrndsat.128B" => "__builtin_HEXAGON_V6_vasrwhrndsat_128B",
+    "llvm.hexagon.V6.vasrwhsat" => "__builtin_HEXAGON_V6_vasrwhsat",
+    "llvm.hexagon.V6.vasrwhsat.128B" => "__builtin_HEXAGON_V6_vasrwhsat_128B",
+    "llvm.hexagon.V6.vasrwuhrndsat" => "__builtin_HEXAGON_V6_vasrwuhrndsat",
+    "llvm.hexagon.V6.vasrwuhrndsat.128B" => "__builtin_HEXAGON_V6_vasrwuhrndsat_128B",
+    "llvm.hexagon.V6.vasrwuhsat" => "__builtin_HEXAGON_V6_vasrwuhsat",
+    "llvm.hexagon.V6.vasrwuhsat.128B" => "__builtin_HEXAGON_V6_vasrwuhsat_128B",
+    "llvm.hexagon.V6.vasrwv" => "__builtin_HEXAGON_V6_vasrwv",
+    "llvm.hexagon.V6.vasrwv.128B" => "__builtin_HEXAGON_V6_vasrwv_128B",
+    "llvm.hexagon.V6.vassign" => "__builtin_HEXAGON_V6_vassign",
+    "llvm.hexagon.V6.vassign.128B" => "__builtin_HEXAGON_V6_vassign_128B",
+    "llvm.hexagon.V6.vassign.fp" => "__builtin_HEXAGON_V6_vassign_fp",
+    "llvm.hexagon.V6.vassign.fp.128B" => "__builtin_HEXAGON_V6_vassign_fp_128B",
+    "llvm.hexagon.V6.vassignp" => "__builtin_HEXAGON_V6_vassignp",
+    "llvm.hexagon.V6.vassignp.128B" => "__builtin_HEXAGON_V6_vassignp_128B",
+    "llvm.hexagon.V6.vavgb" => "__builtin_HEXAGON_V6_vavgb",
+    "llvm.hexagon.V6.vavgb.128B" => "__builtin_HEXAGON_V6_vavgb_128B",
+    "llvm.hexagon.V6.vavgbrnd" => "__builtin_HEXAGON_V6_vavgbrnd",
+    "llvm.hexagon.V6.vavgbrnd.128B" => "__builtin_HEXAGON_V6_vavgbrnd_128B",
+    "llvm.hexagon.V6.vavgh" => "__builtin_HEXAGON_V6_vavgh",
+    "llvm.hexagon.V6.vavgh.128B" => "__builtin_HEXAGON_V6_vavgh_128B",
+    "llvm.hexagon.V6.vavghrnd" => "__builtin_HEXAGON_V6_vavghrnd",
+    "llvm.hexagon.V6.vavghrnd.128B" => "__builtin_HEXAGON_V6_vavghrnd_128B",
+    "llvm.hexagon.V6.vavgub" => "__builtin_HEXAGON_V6_vavgub",
+    "llvm.hexagon.V6.vavgub.128B" => "__builtin_HEXAGON_V6_vavgub_128B",
+    "llvm.hexagon.V6.vavgubrnd" => "__builtin_HEXAGON_V6_vavgubrnd",
+    "llvm.hexagon.V6.vavgubrnd.128B" => "__builtin_HEXAGON_V6_vavgubrnd_128B",
+    "llvm.hexagon.V6.vavguh" => "__builtin_HEXAGON_V6_vavguh",
+    "llvm.hexagon.V6.vavguh.128B" => "__builtin_HEXAGON_V6_vavguh_128B",
+    "llvm.hexagon.V6.vavguhrnd" => "__builtin_HEXAGON_V6_vavguhrnd",
+    "llvm.hexagon.V6.vavguhrnd.128B" => "__builtin_HEXAGON_V6_vavguhrnd_128B",
+    "llvm.hexagon.V6.vavguw" => "__builtin_HEXAGON_V6_vavguw",
+    "llvm.hexagon.V6.vavguw.128B" => "__builtin_HEXAGON_V6_vavguw_128B",
+    "llvm.hexagon.V6.vavguwrnd" => "__builtin_HEXAGON_V6_vavguwrnd",
+    "llvm.hexagon.V6.vavguwrnd.128B" => "__builtin_HEXAGON_V6_vavguwrnd_128B",
+    "llvm.hexagon.V6.vavgw" => "__builtin_HEXAGON_V6_vavgw",
+    "llvm.hexagon.V6.vavgw.128B" => "__builtin_HEXAGON_V6_vavgw_128B",
+    "llvm.hexagon.V6.vavgwrnd" => "__builtin_HEXAGON_V6_vavgwrnd",
+    "llvm.hexagon.V6.vavgwrnd.128B" => "__builtin_HEXAGON_V6_vavgwrnd_128B",
+    "llvm.hexagon.V6.vcl0h" => "__builtin_HEXAGON_V6_vcl0h",
+    "llvm.hexagon.V6.vcl0h.128B" => "__builtin_HEXAGON_V6_vcl0h_128B",
+    "llvm.hexagon.V6.vcl0w" => "__builtin_HEXAGON_V6_vcl0w",
+    "llvm.hexagon.V6.vcl0w.128B" => "__builtin_HEXAGON_V6_vcl0w_128B",
+    "llvm.hexagon.V6.vcombine" => "__builtin_HEXAGON_V6_vcombine",
+    "llvm.hexagon.V6.vcombine.128B" => "__builtin_HEXAGON_V6_vcombine_128B",
+    "llvm.hexagon.V6.vconv.h.hf" => "__builtin_HEXAGON_V6_vconv_h_hf",
+    "llvm.hexagon.V6.vconv.h.hf.128B" => "__builtin_HEXAGON_V6_vconv_h_hf_128B",
+    "llvm.hexagon.V6.vconv.hf.h" => "__builtin_HEXAGON_V6_vconv_hf_h",
+    "llvm.hexagon.V6.vconv.hf.h.128B" => "__builtin_HEXAGON_V6_vconv_hf_h_128B",
+    "llvm.hexagon.V6.vconv.hf.qf16" => "__builtin_HEXAGON_V6_vconv_hf_qf16",
+    "llvm.hexagon.V6.vconv.hf.qf16.128B" => "__builtin_HEXAGON_V6_vconv_hf_qf16_128B",
+    "llvm.hexagon.V6.vconv.hf.qf32" => "__builtin_HEXAGON_V6_vconv_hf_qf32",
+    "llvm.hexagon.V6.vconv.hf.qf32.128B" => "__builtin_HEXAGON_V6_vconv_hf_qf32_128B",
+    "llvm.hexagon.V6.vconv.sf.qf32" => "__builtin_HEXAGON_V6_vconv_sf_qf32",
+    "llvm.hexagon.V6.vconv.sf.qf32.128B" => "__builtin_HEXAGON_V6_vconv_sf_qf32_128B",
+    "llvm.hexagon.V6.vconv.sf.w" => "__builtin_HEXAGON_V6_vconv_sf_w",
+    "llvm.hexagon.V6.vconv.sf.w.128B" => "__builtin_HEXAGON_V6_vconv_sf_w_128B",
+    "llvm.hexagon.V6.vconv.w.sf" => "__builtin_HEXAGON_V6_vconv_w_sf",
+    "llvm.hexagon.V6.vconv.w.sf.128B" => "__builtin_HEXAGON_V6_vconv_w_sf_128B",
+    "llvm.hexagon.V6.vcvt.b.hf" => "__builtin_HEXAGON_V6_vcvt_b_hf",
+    "llvm.hexagon.V6.vcvt.b.hf.128B" => "__builtin_HEXAGON_V6_vcvt_b_hf_128B",
+    "llvm.hexagon.V6.vcvt.bf.sf" => "__builtin_HEXAGON_V6_vcvt_bf_sf",
+    "llvm.hexagon.V6.vcvt.bf.sf.128B" => "__builtin_HEXAGON_V6_vcvt_bf_sf_128B",
+    "llvm.hexagon.V6.vcvt.h.hf" => "__builtin_HEXAGON_V6_vcvt_h_hf",
+    "llvm.hexagon.V6.vcvt.h.hf.128B" => "__builtin_HEXAGON_V6_vcvt_h_hf_128B",
+    "llvm.hexagon.V6.vcvt.hf.b" => "__builtin_HEXAGON_V6_vcvt_hf_b",
+    "llvm.hexagon.V6.vcvt.hf.b.128B" => "__builtin_HEXAGON_V6_vcvt_hf_b_128B",
+    "llvm.hexagon.V6.vcvt.hf.h" => "__builtin_HEXAGON_V6_vcvt_hf_h",
+    "llvm.hexagon.V6.vcvt.hf.h.128B" => "__builtin_HEXAGON_V6_vcvt_hf_h_128B",
+    "llvm.hexagon.V6.vcvt.hf.sf" => "__builtin_HEXAGON_V6_vcvt_hf_sf",
+    "llvm.hexagon.V6.vcvt.hf.sf.128B" => "__builtin_HEXAGON_V6_vcvt_hf_sf_128B",
+    "llvm.hexagon.V6.vcvt.hf.ub" => "__builtin_HEXAGON_V6_vcvt_hf_ub",
+    "llvm.hexagon.V6.vcvt.hf.ub.128B" => "__builtin_HEXAGON_V6_vcvt_hf_ub_128B",
+    "llvm.hexagon.V6.vcvt.hf.uh" => "__builtin_HEXAGON_V6_vcvt_hf_uh",
+    "llvm.hexagon.V6.vcvt.hf.uh.128B" => "__builtin_HEXAGON_V6_vcvt_hf_uh_128B",
+    "llvm.hexagon.V6.vcvt.sf.hf" => "__builtin_HEXAGON_V6_vcvt_sf_hf",
+    "llvm.hexagon.V6.vcvt.sf.hf.128B" => "__builtin_HEXAGON_V6_vcvt_sf_hf_128B",
+    "llvm.hexagon.V6.vcvt.ub.hf" => "__builtin_HEXAGON_V6_vcvt_ub_hf",
+    "llvm.hexagon.V6.vcvt.ub.hf.128B" => "__builtin_HEXAGON_V6_vcvt_ub_hf_128B",
+    "llvm.hexagon.V6.vcvt.uh.hf" => "__builtin_HEXAGON_V6_vcvt_uh_hf",
+    "llvm.hexagon.V6.vcvt.uh.hf.128B" => "__builtin_HEXAGON_V6_vcvt_uh_hf_128B",
+    "llvm.hexagon.V6.vd0" => "__builtin_HEXAGON_V6_vd0",
+    "llvm.hexagon.V6.vd0.128B" => "__builtin_HEXAGON_V6_vd0_128B",
+    "llvm.hexagon.V6.vdd0" => "__builtin_HEXAGON_V6_vdd0",
+    "llvm.hexagon.V6.vdd0.128B" => "__builtin_HEXAGON_V6_vdd0_128B",
+    "llvm.hexagon.V6.vdealb" => "__builtin_HEXAGON_V6_vdealb",
+    "llvm.hexagon.V6.vdealb.128B" => "__builtin_HEXAGON_V6_vdealb_128B",
+    "llvm.hexagon.V6.vdealb4w" => "__builtin_HEXAGON_V6_vdealb4w",
+    "llvm.hexagon.V6.vdealb4w.128B" => "__builtin_HEXAGON_V6_vdealb4w_128B",
+    "llvm.hexagon.V6.vdealh" => "__builtin_HEXAGON_V6_vdealh",
+    "llvm.hexagon.V6.vdealh.128B" => "__builtin_HEXAGON_V6_vdealh_128B",
+    "llvm.hexagon.V6.vdealvdd" => "__builtin_HEXAGON_V6_vdealvdd",
+    "llvm.hexagon.V6.vdealvdd.128B" => "__builtin_HEXAGON_V6_vdealvdd_128B",
+    "llvm.hexagon.V6.vdelta" => "__builtin_HEXAGON_V6_vdelta",
+    "llvm.hexagon.V6.vdelta.128B" => "__builtin_HEXAGON_V6_vdelta_128B",
+    "llvm.hexagon.V6.vdmpy.sf.hf" => "__builtin_HEXAGON_V6_vdmpy_sf_hf",
+    "llvm.hexagon.V6.vdmpy.sf.hf.128B" => "__builtin_HEXAGON_V6_vdmpy_sf_hf_128B",
+    "llvm.hexagon.V6.vdmpy.sf.hf.acc" => "__builtin_HEXAGON_V6_vdmpy_sf_hf_acc",
+    "llvm.hexagon.V6.vdmpy.sf.hf.acc.128B" => "__builtin_HEXAGON_V6_vdmpy_sf_hf_acc_128B",
+    "llvm.hexagon.V6.vdmpybus" => "__builtin_HEXAGON_V6_vdmpybus",
+    "llvm.hexagon.V6.vdmpybus.128B" => "__builtin_HEXAGON_V6_vdmpybus_128B",
+    "llvm.hexagon.V6.vdmpybus.acc" => "__builtin_HEXAGON_V6_vdmpybus_acc",
+    "llvm.hexagon.V6.vdmpybus.acc.128B" => "__builtin_HEXAGON_V6_vdmpybus_acc_128B",
+    "llvm.hexagon.V6.vdmpybus.dv" => "__builtin_HEXAGON_V6_vdmpybus_dv",
+    "llvm.hexagon.V6.vdmpybus.dv.128B" => "__builtin_HEXAGON_V6_vdmpybus_dv_128B",
+    "llvm.hexagon.V6.vdmpybus.dv.acc" => "__builtin_HEXAGON_V6_vdmpybus_dv_acc",
+    "llvm.hexagon.V6.vdmpybus.dv.acc.128B" => "__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B",
+    "llvm.hexagon.V6.vdmpyhb" => "__builtin_HEXAGON_V6_vdmpyhb",
+    "llvm.hexagon.V6.vdmpyhb.128B" => "__builtin_HEXAGON_V6_vdmpyhb_128B",
+    "llvm.hexagon.V6.vdmpyhb.acc" => "__builtin_HEXAGON_V6_vdmpyhb_acc",
+    "llvm.hexagon.V6.vdmpyhb.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhb_acc_128B",
+    "llvm.hexagon.V6.vdmpyhb.dv" => "__builtin_HEXAGON_V6_vdmpyhb_dv",
+    "llvm.hexagon.V6.vdmpyhb.dv.128B" => "__builtin_HEXAGON_V6_vdmpyhb_dv_128B",
+    "llvm.hexagon.V6.vdmpyhb.dv.acc" => "__builtin_HEXAGON_V6_vdmpyhb_dv_acc",
+    "llvm.hexagon.V6.vdmpyhb.dv.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B",
+    "llvm.hexagon.V6.vdmpyhisat" => "__builtin_HEXAGON_V6_vdmpyhisat",
+    "llvm.hexagon.V6.vdmpyhisat.128B" => "__builtin_HEXAGON_V6_vdmpyhisat_128B",
+    "llvm.hexagon.V6.vdmpyhisat.acc" => "__builtin_HEXAGON_V6_vdmpyhisat_acc",
+    "llvm.hexagon.V6.vdmpyhisat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhisat_acc_128B",
+    "llvm.hexagon.V6.vdmpyhsat" => "__builtin_HEXAGON_V6_vdmpyhsat",
+    "llvm.hexagon.V6.vdmpyhsat.128B" => "__builtin_HEXAGON_V6_vdmpyhsat_128B",
+    "llvm.hexagon.V6.vdmpyhsat.acc" => "__builtin_HEXAGON_V6_vdmpyhsat_acc",
+    "llvm.hexagon.V6.vdmpyhsat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhsat_acc_128B",
+    "llvm.hexagon.V6.vdmpyhsuisat" => "__builtin_HEXAGON_V6_vdmpyhsuisat",
+    "llvm.hexagon.V6.vdmpyhsuisat.128B" => "__builtin_HEXAGON_V6_vdmpyhsuisat_128B",
+    "llvm.hexagon.V6.vdmpyhsuisat.acc" => "__builtin_HEXAGON_V6_vdmpyhsuisat_acc",
+    "llvm.hexagon.V6.vdmpyhsuisat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B",
+    "llvm.hexagon.V6.vdmpyhsusat" => "__builtin_HEXAGON_V6_vdmpyhsusat",
+    "llvm.hexagon.V6.vdmpyhsusat.128B" => "__builtin_HEXAGON_V6_vdmpyhsusat_128B",
+    "llvm.hexagon.V6.vdmpyhsusat.acc" => "__builtin_HEXAGON_V6_vdmpyhsusat_acc",
+    "llvm.hexagon.V6.vdmpyhsusat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B",
+    "llvm.hexagon.V6.vdmpyhvsat" => "__builtin_HEXAGON_V6_vdmpyhvsat",
+    "llvm.hexagon.V6.vdmpyhvsat.128B" => "__builtin_HEXAGON_V6_vdmpyhvsat_128B",
+    "llvm.hexagon.V6.vdmpyhvsat.acc" => "__builtin_HEXAGON_V6_vdmpyhvsat_acc",
+    "llvm.hexagon.V6.vdmpyhvsat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B",
+    "llvm.hexagon.V6.vdsaduh" => "__builtin_HEXAGON_V6_vdsaduh",
+    "llvm.hexagon.V6.vdsaduh.128B" => "__builtin_HEXAGON_V6_vdsaduh_128B",
+    "llvm.hexagon.V6.vdsaduh.acc" => "__builtin_HEXAGON_V6_vdsaduh_acc",
+    "llvm.hexagon.V6.vdsaduh.acc.128B" => "__builtin_HEXAGON_V6_vdsaduh_acc_128B",
+    "llvm.hexagon.V6.veqb" => "__builtin_HEXAGON_V6_veqb",
+    "llvm.hexagon.V6.veqb.128B" => "__builtin_HEXAGON_V6_veqb_128B",
+    "llvm.hexagon.V6.veqb.and" => "__builtin_HEXAGON_V6_veqb_and",
+    "llvm.hexagon.V6.veqb.and.128B" => "__builtin_HEXAGON_V6_veqb_and_128B",
+    "llvm.hexagon.V6.veqb.or" => "__builtin_HEXAGON_V6_veqb_or",
+    "llvm.hexagon.V6.veqb.or.128B" => "__builtin_HEXAGON_V6_veqb_or_128B",
+    "llvm.hexagon.V6.veqb.xor" => "__builtin_HEXAGON_V6_veqb_xor",
+    "llvm.hexagon.V6.veqb.xor.128B" => "__builtin_HEXAGON_V6_veqb_xor_128B",
+    "llvm.hexagon.V6.veqh" => "__builtin_HEXAGON_V6_veqh",
+    "llvm.hexagon.V6.veqh.128B" => "__builtin_HEXAGON_V6_veqh_128B",
+    "llvm.hexagon.V6.veqh.and" => "__builtin_HEXAGON_V6_veqh_and",
+    "llvm.hexagon.V6.veqh.and.128B" => "__builtin_HEXAGON_V6_veqh_and_128B",
+    "llvm.hexagon.V6.veqh.or" => "__builtin_HEXAGON_V6_veqh_or",
+    "llvm.hexagon.V6.veqh.or.128B" => "__builtin_HEXAGON_V6_veqh_or_128B",
+    "llvm.hexagon.V6.veqh.xor" => "__builtin_HEXAGON_V6_veqh_xor",
+    "llvm.hexagon.V6.veqh.xor.128B" => "__builtin_HEXAGON_V6_veqh_xor_128B",
+    "llvm.hexagon.V6.veqw" => "__builtin_HEXAGON_V6_veqw",
+    "llvm.hexagon.V6.veqw.128B" => "__builtin_HEXAGON_V6_veqw_128B",
+    "llvm.hexagon.V6.veqw.and" => "__builtin_HEXAGON_V6_veqw_and",
+    "llvm.hexagon.V6.veqw.and.128B" => "__builtin_HEXAGON_V6_veqw_and_128B",
+    "llvm.hexagon.V6.veqw.or" => "__builtin_HEXAGON_V6_veqw_or",
+    "llvm.hexagon.V6.veqw.or.128B" => "__builtin_HEXAGON_V6_veqw_or_128B",
+    "llvm.hexagon.V6.veqw.xor" => "__builtin_HEXAGON_V6_veqw_xor",
+    "llvm.hexagon.V6.veqw.xor.128B" => "__builtin_HEXAGON_V6_veqw_xor_128B",
+    "llvm.hexagon.V6.vfmax.hf" => "__builtin_HEXAGON_V6_vfmax_hf",
+    "llvm.hexagon.V6.vfmax.hf.128B" => "__builtin_HEXAGON_V6_vfmax_hf_128B",
+    "llvm.hexagon.V6.vfmax.sf" => "__builtin_HEXAGON_V6_vfmax_sf",
+    "llvm.hexagon.V6.vfmax.sf.128B" => "__builtin_HEXAGON_V6_vfmax_sf_128B",
+    "llvm.hexagon.V6.vfmin.hf" => "__builtin_HEXAGON_V6_vfmin_hf",
+    "llvm.hexagon.V6.vfmin.hf.128B" => "__builtin_HEXAGON_V6_vfmin_hf_128B",
+    "llvm.hexagon.V6.vfmin.sf" => "__builtin_HEXAGON_V6_vfmin_sf",
+    "llvm.hexagon.V6.vfmin.sf.128B" => "__builtin_HEXAGON_V6_vfmin_sf_128B",
+    "llvm.hexagon.V6.vfneg.hf" => "__builtin_HEXAGON_V6_vfneg_hf",
+    "llvm.hexagon.V6.vfneg.hf.128B" => "__builtin_HEXAGON_V6_vfneg_hf_128B",
+    "llvm.hexagon.V6.vfneg.sf" => "__builtin_HEXAGON_V6_vfneg_sf",
+    "llvm.hexagon.V6.vfneg.sf.128B" => "__builtin_HEXAGON_V6_vfneg_sf_128B",
+    "llvm.hexagon.V6.vgathermh" => "__builtin_HEXAGON_V6_vgathermh",
+    "llvm.hexagon.V6.vgathermh.128B" => "__builtin_HEXAGON_V6_vgathermh_128B",
+    "llvm.hexagon.V6.vgathermhq" => "__builtin_HEXAGON_V6_vgathermhq",
+    "llvm.hexagon.V6.vgathermhq.128B" => "__builtin_HEXAGON_V6_vgathermhq_128B",
+    "llvm.hexagon.V6.vgathermhw" => "__builtin_HEXAGON_V6_vgathermhw",
+    "llvm.hexagon.V6.vgathermhw.128B" => "__builtin_HEXAGON_V6_vgathermhw_128B",
+    "llvm.hexagon.V6.vgathermhwq" => "__builtin_HEXAGON_V6_vgathermhwq",
+    "llvm.hexagon.V6.vgathermhwq.128B" => "__builtin_HEXAGON_V6_vgathermhwq_128B",
+    "llvm.hexagon.V6.vgathermw" => "__builtin_HEXAGON_V6_vgathermw",
+    "llvm.hexagon.V6.vgathermw.128B" => "__builtin_HEXAGON_V6_vgathermw_128B",
+    "llvm.hexagon.V6.vgathermwq" => "__builtin_HEXAGON_V6_vgathermwq",
+    "llvm.hexagon.V6.vgathermwq.128B" => "__builtin_HEXAGON_V6_vgathermwq_128B",
+    "llvm.hexagon.V6.vgtb" => "__builtin_HEXAGON_V6_vgtb",
+    "llvm.hexagon.V6.vgtb.128B" => "__builtin_HEXAGON_V6_vgtb_128B",
+    "llvm.hexagon.V6.vgtb.and" => "__builtin_HEXAGON_V6_vgtb_and",
+    "llvm.hexagon.V6.vgtb.and.128B" => "__builtin_HEXAGON_V6_vgtb_and_128B",
+    "llvm.hexagon.V6.vgtb.or" => "__builtin_HEXAGON_V6_vgtb_or",
+    "llvm.hexagon.V6.vgtb.or.128B" => "__builtin_HEXAGON_V6_vgtb_or_128B",
+    "llvm.hexagon.V6.vgtb.xor" => "__builtin_HEXAGON_V6_vgtb_xor",
+    "llvm.hexagon.V6.vgtb.xor.128B" => "__builtin_HEXAGON_V6_vgtb_xor_128B",
+    "llvm.hexagon.V6.vgtbf" => "__builtin_HEXAGON_V6_vgtbf",
+    "llvm.hexagon.V6.vgtbf.128B" => "__builtin_HEXAGON_V6_vgtbf_128B",
+    "llvm.hexagon.V6.vgtbf.and" => "__builtin_HEXAGON_V6_vgtbf_and",
+    "llvm.hexagon.V6.vgtbf.and.128B" => "__builtin_HEXAGON_V6_vgtbf_and_128B",
+    "llvm.hexagon.V6.vgtbf.or" => "__builtin_HEXAGON_V6_vgtbf_or",
+    "llvm.hexagon.V6.vgtbf.or.128B" => "__builtin_HEXAGON_V6_vgtbf_or_128B",
+    "llvm.hexagon.V6.vgtbf.xor" => "__builtin_HEXAGON_V6_vgtbf_xor",
+    "llvm.hexagon.V6.vgtbf.xor.128B" => "__builtin_HEXAGON_V6_vgtbf_xor_128B",
+    "llvm.hexagon.V6.vgth" => "__builtin_HEXAGON_V6_vgth",
+    "llvm.hexagon.V6.vgth.128B" => "__builtin_HEXAGON_V6_vgth_128B",
+    "llvm.hexagon.V6.vgth.and" => "__builtin_HEXAGON_V6_vgth_and",
+    "llvm.hexagon.V6.vgth.and.128B" => "__builtin_HEXAGON_V6_vgth_and_128B",
+    "llvm.hexagon.V6.vgth.or" => "__builtin_HEXAGON_V6_vgth_or",
+    "llvm.hexagon.V6.vgth.or.128B" => "__builtin_HEXAGON_V6_vgth_or_128B",
+    "llvm.hexagon.V6.vgth.xor" => "__builtin_HEXAGON_V6_vgth_xor",
+    "llvm.hexagon.V6.vgth.xor.128B" => "__builtin_HEXAGON_V6_vgth_xor_128B",
+    "llvm.hexagon.V6.vgthf" => "__builtin_HEXAGON_V6_vgthf",
+    "llvm.hexagon.V6.vgthf.128B" => "__builtin_HEXAGON_V6_vgthf_128B",
+    "llvm.hexagon.V6.vgthf.and" => "__builtin_HEXAGON_V6_vgthf_and",
+    "llvm.hexagon.V6.vgthf.and.128B" => "__builtin_HEXAGON_V6_vgthf_and_128B",
+    "llvm.hexagon.V6.vgthf.or" => "__builtin_HEXAGON_V6_vgthf_or",
+    "llvm.hexagon.V6.vgthf.or.128B" => "__builtin_HEXAGON_V6_vgthf_or_128B",
+    "llvm.hexagon.V6.vgthf.xor" => "__builtin_HEXAGON_V6_vgthf_xor",
+    "llvm.hexagon.V6.vgthf.xor.128B" => "__builtin_HEXAGON_V6_vgthf_xor_128B",
+    "llvm.hexagon.V6.vgtsf" => "__builtin_HEXAGON_V6_vgtsf",
+    "llvm.hexagon.V6.vgtsf.128B" => "__builtin_HEXAGON_V6_vgtsf_128B",
+    "llvm.hexagon.V6.vgtsf.and" => "__builtin_HEXAGON_V6_vgtsf_and",
+    "llvm.hexagon.V6.vgtsf.and.128B" => "__builtin_HEXAGON_V6_vgtsf_and_128B",
+    "llvm.hexagon.V6.vgtsf.or" => "__builtin_HEXAGON_V6_vgtsf_or",
+    "llvm.hexagon.V6.vgtsf.or.128B" => "__builtin_HEXAGON_V6_vgtsf_or_128B",
+    "llvm.hexagon.V6.vgtsf.xor" => "__builtin_HEXAGON_V6_vgtsf_xor",
+    "llvm.hexagon.V6.vgtsf.xor.128B" => "__builtin_HEXAGON_V6_vgtsf_xor_128B",
+    "llvm.hexagon.V6.vgtub" => "__builtin_HEXAGON_V6_vgtub",
+    "llvm.hexagon.V6.vgtub.128B" => "__builtin_HEXAGON_V6_vgtub_128B",
+    "llvm.hexagon.V6.vgtub.and" => "__builtin_HEXAGON_V6_vgtub_and",
+    "llvm.hexagon.V6.vgtub.and.128B" => "__builtin_HEXAGON_V6_vgtub_and_128B",
+    "llvm.hexagon.V6.vgtub.or" => "__builtin_HEXAGON_V6_vgtub_or",
+    "llvm.hexagon.V6.vgtub.or.128B" => "__builtin_HEXAGON_V6_vgtub_or_128B",
+    "llvm.hexagon.V6.vgtub.xor" => "__builtin_HEXAGON_V6_vgtub_xor",
+    "llvm.hexagon.V6.vgtub.xor.128B" => "__builtin_HEXAGON_V6_vgtub_xor_128B",
+    "llvm.hexagon.V6.vgtuh" => "__builtin_HEXAGON_V6_vgtuh",
+    "llvm.hexagon.V6.vgtuh.128B" => "__builtin_HEXAGON_V6_vgtuh_128B",
+    "llvm.hexagon.V6.vgtuh.and" => "__builtin_HEXAGON_V6_vgtuh_and",
+    "llvm.hexagon.V6.vgtuh.and.128B" => "__builtin_HEXAGON_V6_vgtuh_and_128B",
+    "llvm.hexagon.V6.vgtuh.or" => "__builtin_HEXAGON_V6_vgtuh_or",
+    "llvm.hexagon.V6.vgtuh.or.128B" => "__builtin_HEXAGON_V6_vgtuh_or_128B",
+    "llvm.hexagon.V6.vgtuh.xor" => "__builtin_HEXAGON_V6_vgtuh_xor",
+    "llvm.hexagon.V6.vgtuh.xor.128B" => "__builtin_HEXAGON_V6_vgtuh_xor_128B",
+    "llvm.hexagon.V6.vgtuw" => "__builtin_HEXAGON_V6_vgtuw",
+    "llvm.hexagon.V6.vgtuw.128B" => "__builtin_HEXAGON_V6_vgtuw_128B",
+    "llvm.hexagon.V6.vgtuw.and" => "__builtin_HEXAGON_V6_vgtuw_and",
+    "llvm.hexagon.V6.vgtuw.and.128B" => "__builtin_HEXAGON_V6_vgtuw_and_128B",
+    "llvm.hexagon.V6.vgtuw.or" => "__builtin_HEXAGON_V6_vgtuw_or",
+    "llvm.hexagon.V6.vgtuw.or.128B" => "__builtin_HEXAGON_V6_vgtuw_or_128B",
+    "llvm.hexagon.V6.vgtuw.xor" => "__builtin_HEXAGON_V6_vgtuw_xor",
+    "llvm.hexagon.V6.vgtuw.xor.128B" => "__builtin_HEXAGON_V6_vgtuw_xor_128B",
+    "llvm.hexagon.V6.vgtw" => "__builtin_HEXAGON_V6_vgtw",
+    "llvm.hexagon.V6.vgtw.128B" => "__builtin_HEXAGON_V6_vgtw_128B",
+    "llvm.hexagon.V6.vgtw.and" => "__builtin_HEXAGON_V6_vgtw_and",
+    "llvm.hexagon.V6.vgtw.and.128B" => "__builtin_HEXAGON_V6_vgtw_and_128B",
+    "llvm.hexagon.V6.vgtw.or" => "__builtin_HEXAGON_V6_vgtw_or",
+    "llvm.hexagon.V6.vgtw.or.128B" => "__builtin_HEXAGON_V6_vgtw_or_128B",
+    "llvm.hexagon.V6.vgtw.xor" => "__builtin_HEXAGON_V6_vgtw_xor",
+    "llvm.hexagon.V6.vgtw.xor.128B" => "__builtin_HEXAGON_V6_vgtw_xor_128B",
+    "llvm.hexagon.V6.vinsertwr" => "__builtin_HEXAGON_V6_vinsertwr",
+    "llvm.hexagon.V6.vinsertwr.128B" => "__builtin_HEXAGON_V6_vinsertwr_128B",
+    "llvm.hexagon.V6.vlalignb" => "__builtin_HEXAGON_V6_vlalignb",
+    "llvm.hexagon.V6.vlalignb.128B" => "__builtin_HEXAGON_V6_vlalignb_128B",
+    "llvm.hexagon.V6.vlalignbi" => "__builtin_HEXAGON_V6_vlalignbi",
+    "llvm.hexagon.V6.vlalignbi.128B" => "__builtin_HEXAGON_V6_vlalignbi_128B",
+    "llvm.hexagon.V6.vlsrb" => "__builtin_HEXAGON_V6_vlsrb",
+    "llvm.hexagon.V6.vlsrb.128B" => "__builtin_HEXAGON_V6_vlsrb_128B",
+    "llvm.hexagon.V6.vlsrh" => "__builtin_HEXAGON_V6_vlsrh",
+    "llvm.hexagon.V6.vlsrh.128B" => "__builtin_HEXAGON_V6_vlsrh_128B",
+    "llvm.hexagon.V6.vlsrhv" => "__builtin_HEXAGON_V6_vlsrhv",
+    "llvm.hexagon.V6.vlsrhv.128B" => "__builtin_HEXAGON_V6_vlsrhv_128B",
+    "llvm.hexagon.V6.vlsrw" => "__builtin_HEXAGON_V6_vlsrw",
+    "llvm.hexagon.V6.vlsrw.128B" => "__builtin_HEXAGON_V6_vlsrw_128B",
+    "llvm.hexagon.V6.vlsrwv" => "__builtin_HEXAGON_V6_vlsrwv",
+    "llvm.hexagon.V6.vlsrwv.128B" => "__builtin_HEXAGON_V6_vlsrwv_128B",
+    "llvm.hexagon.V6.vlut4" => "__builtin_HEXAGON_V6_vlut4",
+    "llvm.hexagon.V6.vlut4.128B" => "__builtin_HEXAGON_V6_vlut4_128B",
+    "llvm.hexagon.V6.vlutb" => "__builtin_HEXAGON_V6_vlutb",
+    "llvm.hexagon.V6.vlutb.128B" => "__builtin_HEXAGON_V6_vlutb_128B",
+    "llvm.hexagon.V6.vlutb.acc" => "__builtin_HEXAGON_V6_vlutb_acc",
+    "llvm.hexagon.V6.vlutb.acc.128B" => "__builtin_HEXAGON_V6_vlutb_acc_128B",
+    "llvm.hexagon.V6.vlutb.dv" => "__builtin_HEXAGON_V6_vlutb_dv",
+    "llvm.hexagon.V6.vlutb.dv.128B" => "__builtin_HEXAGON_V6_vlutb_dv_128B",
+    "llvm.hexagon.V6.vlutb.dv.acc" => "__builtin_HEXAGON_V6_vlutb_dv_acc",
+    "llvm.hexagon.V6.vlutb.dv.acc.128B" => "__builtin_HEXAGON_V6_vlutb_dv_acc_128B",
+    "llvm.hexagon.V6.vlutvvb" => "__builtin_HEXAGON_V6_vlutvvb",
+    "llvm.hexagon.V6.vlutvvb.128B" => "__builtin_HEXAGON_V6_vlutvvb_128B",
+    "llvm.hexagon.V6.vlutvvb.nm" => "__builtin_HEXAGON_V6_vlutvvb_nm",
+    "llvm.hexagon.V6.vlutvvb.nm.128B" => "__builtin_HEXAGON_V6_vlutvvb_nm_128B",
+    "llvm.hexagon.V6.vlutvvb.oracc" => "__builtin_HEXAGON_V6_vlutvvb_oracc",
+    "llvm.hexagon.V6.vlutvvb.oracc.128B" => "__builtin_HEXAGON_V6_vlutvvb_oracc_128B",
+    "llvm.hexagon.V6.vlutvvb.oracci" => "__builtin_HEXAGON_V6_vlutvvb_oracci",
+    "llvm.hexagon.V6.vlutvvb.oracci.128B" => "__builtin_HEXAGON_V6_vlutvvb_oracci_128B",
+    "llvm.hexagon.V6.vlutvvbi" => "__builtin_HEXAGON_V6_vlutvvbi",
+    "llvm.hexagon.V6.vlutvvbi.128B" => "__builtin_HEXAGON_V6_vlutvvbi_128B",
+    "llvm.hexagon.V6.vlutvwh" => "__builtin_HEXAGON_V6_vlutvwh",
+    "llvm.hexagon.V6.vlutvwh.128B" => "__builtin_HEXAGON_V6_vlutvwh_128B",
+    "llvm.hexagon.V6.vlutvwh.nm" => "__builtin_HEXAGON_V6_vlutvwh_nm",
+    "llvm.hexagon.V6.vlutvwh.nm.128B" => "__builtin_HEXAGON_V6_vlutvwh_nm_128B",
+    "llvm.hexagon.V6.vlutvwh.oracc" => "__builtin_HEXAGON_V6_vlutvwh_oracc",
+    "llvm.hexagon.V6.vlutvwh.oracc.128B" => "__builtin_HEXAGON_V6_vlutvwh_oracc_128B",
+    "llvm.hexagon.V6.vlutvwh.oracci" => "__builtin_HEXAGON_V6_vlutvwh_oracci",
+    "llvm.hexagon.V6.vlutvwh.oracci.128B" => "__builtin_HEXAGON_V6_vlutvwh_oracci_128B",
+    "llvm.hexagon.V6.vlutvwhi" => "__builtin_HEXAGON_V6_vlutvwhi",
+    "llvm.hexagon.V6.vlutvwhi.128B" => "__builtin_HEXAGON_V6_vlutvwhi_128B",
+    "llvm.hexagon.V6.vmax.bf" => "__builtin_HEXAGON_V6_vmax_bf",
+    "llvm.hexagon.V6.vmax.bf.128B" => "__builtin_HEXAGON_V6_vmax_bf_128B",
+    "llvm.hexagon.V6.vmax.hf" => "__builtin_HEXAGON_V6_vmax_hf",
+    "llvm.hexagon.V6.vmax.hf.128B" => "__builtin_HEXAGON_V6_vmax_hf_128B",
+    "llvm.hexagon.V6.vmax.sf" => "__builtin_HEXAGON_V6_vmax_sf",
+    "llvm.hexagon.V6.vmax.sf.128B" => "__builtin_HEXAGON_V6_vmax_sf_128B",
+    "llvm.hexagon.V6.vmaxb" => "__builtin_HEXAGON_V6_vmaxb",
+    "llvm.hexagon.V6.vmaxb.128B" => "__builtin_HEXAGON_V6_vmaxb_128B",
+    "llvm.hexagon.V6.vmaxh" => "__builtin_HEXAGON_V6_vmaxh",
+    "llvm.hexagon.V6.vmaxh.128B" => "__builtin_HEXAGON_V6_vmaxh_128B",
+    "llvm.hexagon.V6.vmaxub" => "__builtin_HEXAGON_V6_vmaxub",
+    "llvm.hexagon.V6.vmaxub.128B" => "__builtin_HEXAGON_V6_vmaxub_128B",
+    "llvm.hexagon.V6.vmaxuh" => "__builtin_HEXAGON_V6_vmaxuh",
+    "llvm.hexagon.V6.vmaxuh.128B" => "__builtin_HEXAGON_V6_vmaxuh_128B",
+    "llvm.hexagon.V6.vmaxw" => "__builtin_HEXAGON_V6_vmaxw",
+    "llvm.hexagon.V6.vmaxw.128B" => "__builtin_HEXAGON_V6_vmaxw_128B",
+    "llvm.hexagon.V6.vmin.bf" => "__builtin_HEXAGON_V6_vmin_bf",
+    "llvm.hexagon.V6.vmin.bf.128B" => "__builtin_HEXAGON_V6_vmin_bf_128B",
+    "llvm.hexagon.V6.vmin.hf" => "__builtin_HEXAGON_V6_vmin_hf",
+    "llvm.hexagon.V6.vmin.hf.128B" => "__builtin_HEXAGON_V6_vmin_hf_128B",
+    "llvm.hexagon.V6.vmin.sf" => "__builtin_HEXAGON_V6_vmin_sf",
+    "llvm.hexagon.V6.vmin.sf.128B" => "__builtin_HEXAGON_V6_vmin_sf_128B",
+    "llvm.hexagon.V6.vminb" => "__builtin_HEXAGON_V6_vminb",
+    "llvm.hexagon.V6.vminb.128B" => "__builtin_HEXAGON_V6_vminb_128B",
+    "llvm.hexagon.V6.vminh" => "__builtin_HEXAGON_V6_vminh",
+    "llvm.hexagon.V6.vminh.128B" => "__builtin_HEXAGON_V6_vminh_128B",
+    "llvm.hexagon.V6.vminub" => "__builtin_HEXAGON_V6_vminub",
+    "llvm.hexagon.V6.vminub.128B" => "__builtin_HEXAGON_V6_vminub_128B",
+    "llvm.hexagon.V6.vminuh" => "__builtin_HEXAGON_V6_vminuh",
+    "llvm.hexagon.V6.vminuh.128B" => "__builtin_HEXAGON_V6_vminuh_128B",
+    "llvm.hexagon.V6.vminw" => "__builtin_HEXAGON_V6_vminw",
+    "llvm.hexagon.V6.vminw.128B" => "__builtin_HEXAGON_V6_vminw_128B",
+    "llvm.hexagon.V6.vmpabus" => "__builtin_HEXAGON_V6_vmpabus",
+    "llvm.hexagon.V6.vmpabus.128B" => "__builtin_HEXAGON_V6_vmpabus_128B",
+    "llvm.hexagon.V6.vmpabus.acc" => "__builtin_HEXAGON_V6_vmpabus_acc",
+    "llvm.hexagon.V6.vmpabus.acc.128B" => "__builtin_HEXAGON_V6_vmpabus_acc_128B",
+    "llvm.hexagon.V6.vmpabusv" => "__builtin_HEXAGON_V6_vmpabusv",
+    "llvm.hexagon.V6.vmpabusv.128B" => "__builtin_HEXAGON_V6_vmpabusv_128B",
+    "llvm.hexagon.V6.vmpabuu" => "__builtin_HEXAGON_V6_vmpabuu",
+    "llvm.hexagon.V6.vmpabuu.128B" => "__builtin_HEXAGON_V6_vmpabuu_128B",
+    "llvm.hexagon.V6.vmpabuu.acc" => "__builtin_HEXAGON_V6_vmpabuu_acc",
+    "llvm.hexagon.V6.vmpabuu.acc.128B" => "__builtin_HEXAGON_V6_vmpabuu_acc_128B",
+    "llvm.hexagon.V6.vmpabuuv" => "__builtin_HEXAGON_V6_vmpabuuv",
+    "llvm.hexagon.V6.vmpabuuv.128B" => "__builtin_HEXAGON_V6_vmpabuuv_128B",
+    "llvm.hexagon.V6.vmpahb" => "__builtin_HEXAGON_V6_vmpahb",
+    "llvm.hexagon.V6.vmpahb.128B" => "__builtin_HEXAGON_V6_vmpahb_128B",
+    "llvm.hexagon.V6.vmpahb.acc" => "__builtin_HEXAGON_V6_vmpahb_acc",
+    "llvm.hexagon.V6.vmpahb.acc.128B" => "__builtin_HEXAGON_V6_vmpahb_acc_128B",
+    "llvm.hexagon.V6.vmpahhsat" => "__builtin_HEXAGON_V6_vmpahhsat",
+    "llvm.hexagon.V6.vmpahhsat.128B" => "__builtin_HEXAGON_V6_vmpahhsat_128B",
+    "llvm.hexagon.V6.vmpauhb" => "__builtin_HEXAGON_V6_vmpauhb",
+    "llvm.hexagon.V6.vmpauhb.128B" => "__builtin_HEXAGON_V6_vmpauhb_128B",
+    "llvm.hexagon.V6.vmpauhb.acc" => "__builtin_HEXAGON_V6_vmpauhb_acc",
+    "llvm.hexagon.V6.vmpauhb.acc.128B" => "__builtin_HEXAGON_V6_vmpauhb_acc_128B",
+    "llvm.hexagon.V6.vmpauhuhsat" => "__builtin_HEXAGON_V6_vmpauhuhsat",
+    "llvm.hexagon.V6.vmpauhuhsat.128B" => "__builtin_HEXAGON_V6_vmpauhuhsat_128B",
+    "llvm.hexagon.V6.vmpsuhuhsat" => "__builtin_HEXAGON_V6_vmpsuhuhsat",
+    "llvm.hexagon.V6.vmpsuhuhsat.128B" => "__builtin_HEXAGON_V6_vmpsuhuhsat_128B",
+    "llvm.hexagon.V6.vmpy.hf.hf" => "__builtin_HEXAGON_V6_vmpy_hf_hf",
+    "llvm.hexagon.V6.vmpy.hf.hf.128B" => "__builtin_HEXAGON_V6_vmpy_hf_hf_128B",
+    "llvm.hexagon.V6.vmpy.hf.hf.acc" => "__builtin_HEXAGON_V6_vmpy_hf_hf_acc",
+    "llvm.hexagon.V6.vmpy.hf.hf.acc.128B" => "__builtin_HEXAGON_V6_vmpy_hf_hf_acc_128B",
+    "llvm.hexagon.V6.vmpy.qf16" => "__builtin_HEXAGON_V6_vmpy_qf16",
+    "llvm.hexagon.V6.vmpy.qf16.128B" => "__builtin_HEXAGON_V6_vmpy_qf16_128B",
+    "llvm.hexagon.V6.vmpy.qf16.hf" => "__builtin_HEXAGON_V6_vmpy_qf16_hf",
+    "llvm.hexagon.V6.vmpy.qf16.hf.128B" => "__builtin_HEXAGON_V6_vmpy_qf16_hf_128B",
+    "llvm.hexagon.V6.vmpy.qf16.mix.hf" => "__builtin_HEXAGON_V6_vmpy_qf16_mix_hf",
+    "llvm.hexagon.V6.vmpy.qf16.mix.hf.128B" => "__builtin_HEXAGON_V6_vmpy_qf16_mix_hf_128B",
+    "llvm.hexagon.V6.vmpy.qf32" => "__builtin_HEXAGON_V6_vmpy_qf32",
+    "llvm.hexagon.V6.vmpy.qf32.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_128B",
+    "llvm.hexagon.V6.vmpy.qf32.hf" => "__builtin_HEXAGON_V6_vmpy_qf32_hf",
+    "llvm.hexagon.V6.vmpy.qf32.hf.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_hf_128B",
+    "llvm.hexagon.V6.vmpy.qf32.mix.hf" => "__builtin_HEXAGON_V6_vmpy_qf32_mix_hf",
+    "llvm.hexagon.V6.vmpy.qf32.mix.hf.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_mix_hf_128B",
+    "llvm.hexagon.V6.vmpy.qf32.qf16" => "__builtin_HEXAGON_V6_vmpy_qf32_qf16",
+    "llvm.hexagon.V6.vmpy.qf32.qf16.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_qf16_128B",
+    "llvm.hexagon.V6.vmpy.qf32.sf" => "__builtin_HEXAGON_V6_vmpy_qf32_sf",
+    "llvm.hexagon.V6.vmpy.qf32.sf.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_sf_128B",
+    "llvm.hexagon.V6.vmpy.sf.bf" => "__builtin_HEXAGON_V6_vmpy_sf_bf",
+    "llvm.hexagon.V6.vmpy.sf.bf.128B" => "__builtin_HEXAGON_V6_vmpy_sf_bf_128B",
+    "llvm.hexagon.V6.vmpy.sf.bf.acc" => "__builtin_HEXAGON_V6_vmpy_sf_bf_acc",
+    "llvm.hexagon.V6.vmpy.sf.bf.acc.128B" => "__builtin_HEXAGON_V6_vmpy_sf_bf_acc_128B",
+    "llvm.hexagon.V6.vmpy.sf.hf" => "__builtin_HEXAGON_V6_vmpy_sf_hf",
+    "llvm.hexagon.V6.vmpy.sf.hf.128B" => "__builtin_HEXAGON_V6_vmpy_sf_hf_128B",
+    "llvm.hexagon.V6.vmpy.sf.hf.acc" => "__builtin_HEXAGON_V6_vmpy_sf_hf_acc",
+    "llvm.hexagon.V6.vmpy.sf.hf.acc.128B" => "__builtin_HEXAGON_V6_vmpy_sf_hf_acc_128B",
+    "llvm.hexagon.V6.vmpy.sf.sf" => "__builtin_HEXAGON_V6_vmpy_sf_sf",
+    "llvm.hexagon.V6.vmpy.sf.sf.128B" => "__builtin_HEXAGON_V6_vmpy_sf_sf_128B",
+    "llvm.hexagon.V6.vmpybus" => "__builtin_HEXAGON_V6_vmpybus",
+    "llvm.hexagon.V6.vmpybus.128B" => "__builtin_HEXAGON_V6_vmpybus_128B",
+    "llvm.hexagon.V6.vmpybus.acc" => "__builtin_HEXAGON_V6_vmpybus_acc",
+    "llvm.hexagon.V6.vmpybus.acc.128B" => "__builtin_HEXAGON_V6_vmpybus_acc_128B",
+    "llvm.hexagon.V6.vmpybusv" => "__builtin_HEXAGON_V6_vmpybusv",
+    "llvm.hexagon.V6.vmpybusv.128B" => "__builtin_HEXAGON_V6_vmpybusv_128B",
+    "llvm.hexagon.V6.vmpybusv.acc" => "__builtin_HEXAGON_V6_vmpybusv_acc",
+    "llvm.hexagon.V6.vmpybusv.acc.128B" => "__builtin_HEXAGON_V6_vmpybusv_acc_128B",
+    "llvm.hexagon.V6.vmpybv" => "__builtin_HEXAGON_V6_vmpybv",
+    "llvm.hexagon.V6.vmpybv.128B" => "__builtin_HEXAGON_V6_vmpybv_128B",
+    "llvm.hexagon.V6.vmpybv.acc" => "__builtin_HEXAGON_V6_vmpybv_acc",
+    "llvm.hexagon.V6.vmpybv.acc.128B" => "__builtin_HEXAGON_V6_vmpybv_acc_128B",
+    "llvm.hexagon.V6.vmpyewuh" => "__builtin_HEXAGON_V6_vmpyewuh",
+    "llvm.hexagon.V6.vmpyewuh.128B" => "__builtin_HEXAGON_V6_vmpyewuh_128B",
+    "llvm.hexagon.V6.vmpyewuh.64" => "__builtin_HEXAGON_V6_vmpyewuh_64",
+    "llvm.hexagon.V6.vmpyewuh.64.128B" => "__builtin_HEXAGON_V6_vmpyewuh_64_128B",
+    "llvm.hexagon.V6.vmpyh" => "__builtin_HEXAGON_V6_vmpyh",
+    "llvm.hexagon.V6.vmpyh.128B" => "__builtin_HEXAGON_V6_vmpyh_128B",
+    "llvm.hexagon.V6.vmpyh.acc" => "__builtin_HEXAGON_V6_vmpyh_acc",
+    "llvm.hexagon.V6.vmpyh.acc.128B" => "__builtin_HEXAGON_V6_vmpyh_acc_128B",
+    "llvm.hexagon.V6.vmpyhsat.acc" => "__builtin_HEXAGON_V6_vmpyhsat_acc",
+    "llvm.hexagon.V6.vmpyhsat.acc.128B" => "__builtin_HEXAGON_V6_vmpyhsat_acc_128B",
+    "llvm.hexagon.V6.vmpyhsrs" => "__builtin_HEXAGON_V6_vmpyhsrs",
+    "llvm.hexagon.V6.vmpyhsrs.128B" => "__builtin_HEXAGON_V6_vmpyhsrs_128B",
+    "llvm.hexagon.V6.vmpyhss" => "__builtin_HEXAGON_V6_vmpyhss",
+    "llvm.hexagon.V6.vmpyhss.128B" => "__builtin_HEXAGON_V6_vmpyhss_128B",
+    "llvm.hexagon.V6.vmpyhus" => "__builtin_HEXAGON_V6_vmpyhus",
+    "llvm.hexagon.V6.vmpyhus.128B" => "__builtin_HEXAGON_V6_vmpyhus_128B",
+    "llvm.hexagon.V6.vmpyhus.acc" => "__builtin_HEXAGON_V6_vmpyhus_acc",
+    "llvm.hexagon.V6.vmpyhus.acc.128B" => "__builtin_HEXAGON_V6_vmpyhus_acc_128B",
+    "llvm.hexagon.V6.vmpyhv" => "__builtin_HEXAGON_V6_vmpyhv",
+    "llvm.hexagon.V6.vmpyhv.128B" => "__builtin_HEXAGON_V6_vmpyhv_128B",
+    "llvm.hexagon.V6.vmpyhv.acc" => "__builtin_HEXAGON_V6_vmpyhv_acc",
+    "llvm.hexagon.V6.vmpyhv.acc.128B" => "__builtin_HEXAGON_V6_vmpyhv_acc_128B",
+    "llvm.hexagon.V6.vmpyhvsrs" => "__builtin_HEXAGON_V6_vmpyhvsrs",
+    "llvm.hexagon.V6.vmpyhvsrs.128B" => "__builtin_HEXAGON_V6_vmpyhvsrs_128B",
+    "llvm.hexagon.V6.vmpyieoh" => "__builtin_HEXAGON_V6_vmpyieoh",
+    "llvm.hexagon.V6.vmpyieoh.128B" => "__builtin_HEXAGON_V6_vmpyieoh_128B",
+    "llvm.hexagon.V6.vmpyiewh.acc" => "__builtin_HEXAGON_V6_vmpyiewh_acc",
+    "llvm.hexagon.V6.vmpyiewh.acc.128B" => "__builtin_HEXAGON_V6_vmpyiewh_acc_128B",
+    "llvm.hexagon.V6.vmpyiewuh" => "__builtin_HEXAGON_V6_vmpyiewuh",
+    "llvm.hexagon.V6.vmpyiewuh.128B" => "__builtin_HEXAGON_V6_vmpyiewuh_128B",
+    "llvm.hexagon.V6.vmpyiewuh.acc" => "__builtin_HEXAGON_V6_vmpyiewuh_acc",
+    "llvm.hexagon.V6.vmpyiewuh.acc.128B" => "__builtin_HEXAGON_V6_vmpyiewuh_acc_128B",
+    "llvm.hexagon.V6.vmpyih" => "__builtin_HEXAGON_V6_vmpyih",
+    "llvm.hexagon.V6.vmpyih.128B" => "__builtin_HEXAGON_V6_vmpyih_128B",
+    "llvm.hexagon.V6.vmpyih.acc" => "__builtin_HEXAGON_V6_vmpyih_acc",
+    "llvm.hexagon.V6.vmpyih.acc.128B" => "__builtin_HEXAGON_V6_vmpyih_acc_128B",
+    "llvm.hexagon.V6.vmpyihb" => "__builtin_HEXAGON_V6_vmpyihb",
+    "llvm.hexagon.V6.vmpyihb.128B" => "__builtin_HEXAGON_V6_vmpyihb_128B",
+    "llvm.hexagon.V6.vmpyihb.acc" => "__builtin_HEXAGON_V6_vmpyihb_acc",
+    "llvm.hexagon.V6.vmpyihb.acc.128B" => "__builtin_HEXAGON_V6_vmpyihb_acc_128B",
+    "llvm.hexagon.V6.vmpyiowh" => "__builtin_HEXAGON_V6_vmpyiowh",
+    "llvm.hexagon.V6.vmpyiowh.128B" => "__builtin_HEXAGON_V6_vmpyiowh_128B",
+    "llvm.hexagon.V6.vmpyiwb" => "__builtin_HEXAGON_V6_vmpyiwb",
+    "llvm.hexagon.V6.vmpyiwb.128B" => "__builtin_HEXAGON_V6_vmpyiwb_128B",
+    "llvm.hexagon.V6.vmpyiwb.acc" => "__builtin_HEXAGON_V6_vmpyiwb_acc",
+    "llvm.hexagon.V6.vmpyiwb.acc.128B" => "__builtin_HEXAGON_V6_vmpyiwb_acc_128B",
+    "llvm.hexagon.V6.vmpyiwh" => "__builtin_HEXAGON_V6_vmpyiwh",
+    "llvm.hexagon.V6.vmpyiwh.128B" => "__builtin_HEXAGON_V6_vmpyiwh_128B",
+    "llvm.hexagon.V6.vmpyiwh.acc" => "__builtin_HEXAGON_V6_vmpyiwh_acc",
+    "llvm.hexagon.V6.vmpyiwh.acc.128B" => "__builtin_HEXAGON_V6_vmpyiwh_acc_128B",
+    "llvm.hexagon.V6.vmpyiwub" => "__builtin_HEXAGON_V6_vmpyiwub",
+    "llvm.hexagon.V6.vmpyiwub.128B" => "__builtin_HEXAGON_V6_vmpyiwub_128B",
+    "llvm.hexagon.V6.vmpyiwub.acc" => "__builtin_HEXAGON_V6_vmpyiwub_acc",
+    "llvm.hexagon.V6.vmpyiwub.acc.128B" => "__builtin_HEXAGON_V6_vmpyiwub_acc_128B",
+    "llvm.hexagon.V6.vmpyowh" => "__builtin_HEXAGON_V6_vmpyowh",
+    "llvm.hexagon.V6.vmpyowh.128B" => "__builtin_HEXAGON_V6_vmpyowh_128B",
+    "llvm.hexagon.V6.vmpyowh.64.acc" => "__builtin_HEXAGON_V6_vmpyowh_64_acc",
+    "llvm.hexagon.V6.vmpyowh.64.acc.128B" => "__builtin_HEXAGON_V6_vmpyowh_64_acc_128B",
+    "llvm.hexagon.V6.vmpyowh.rnd" => "__builtin_HEXAGON_V6_vmpyowh_rnd",
+    "llvm.hexagon.V6.vmpyowh.rnd.128B" => "__builtin_HEXAGON_V6_vmpyowh_rnd_128B",
+    "llvm.hexagon.V6.vmpyowh.rnd.sacc" => "__builtin_HEXAGON_V6_vmpyowh_rnd_sacc",
+    "llvm.hexagon.V6.vmpyowh.rnd.sacc.128B" => "__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B",
+    "llvm.hexagon.V6.vmpyowh.sacc" => "__builtin_HEXAGON_V6_vmpyowh_sacc",
+    "llvm.hexagon.V6.vmpyowh.sacc.128B" => "__builtin_HEXAGON_V6_vmpyowh_sacc_128B",
+    "llvm.hexagon.V6.vmpyub" => "__builtin_HEXAGON_V6_vmpyub",
+    "llvm.hexagon.V6.vmpyub.128B" => "__builtin_HEXAGON_V6_vmpyub_128B",
+    "llvm.hexagon.V6.vmpyub.acc" => "__builtin_HEXAGON_V6_vmpyub_acc",
+    "llvm.hexagon.V6.vmpyub.acc.128B" => "__builtin_HEXAGON_V6_vmpyub_acc_128B",
+    "llvm.hexagon.V6.vmpyubv" => "__builtin_HEXAGON_V6_vmpyubv",
+    "llvm.hexagon.V6.vmpyubv.128B" => "__builtin_HEXAGON_V6_vmpyubv_128B",
+    "llvm.hexagon.V6.vmpyubv.acc" => "__builtin_HEXAGON_V6_vmpyubv_acc",
+    "llvm.hexagon.V6.vmpyubv.acc.128B" => "__builtin_HEXAGON_V6_vmpyubv_acc_128B",
+    "llvm.hexagon.V6.vmpyuh" => "__builtin_HEXAGON_V6_vmpyuh",
+    "llvm.hexagon.V6.vmpyuh.128B" => "__builtin_HEXAGON_V6_vmpyuh_128B",
+    "llvm.hexagon.V6.vmpyuh.acc" => "__builtin_HEXAGON_V6_vmpyuh_acc",
+    "llvm.hexagon.V6.vmpyuh.acc.128B" => "__builtin_HEXAGON_V6_vmpyuh_acc_128B",
+    "llvm.hexagon.V6.vmpyuhe" => "__builtin_HEXAGON_V6_vmpyuhe",
+    "llvm.hexagon.V6.vmpyuhe.128B" => "__builtin_HEXAGON_V6_vmpyuhe_128B",
+    "llvm.hexagon.V6.vmpyuhe.acc" => "__builtin_HEXAGON_V6_vmpyuhe_acc",
+    "llvm.hexagon.V6.vmpyuhe.acc.128B" => "__builtin_HEXAGON_V6_vmpyuhe_acc_128B",
+    "llvm.hexagon.V6.vmpyuhv" => "__builtin_HEXAGON_V6_vmpyuhv",
+    "llvm.hexagon.V6.vmpyuhv.128B" => "__builtin_HEXAGON_V6_vmpyuhv_128B",
+    "llvm.hexagon.V6.vmpyuhv.acc" => "__builtin_HEXAGON_V6_vmpyuhv_acc",
+    "llvm.hexagon.V6.vmpyuhv.acc.128B" => "__builtin_HEXAGON_V6_vmpyuhv_acc_128B",
+    "llvm.hexagon.V6.vmpyuhvs" => "__builtin_HEXAGON_V6_vmpyuhvs",
+    "llvm.hexagon.V6.vmpyuhvs.128B" => "__builtin_HEXAGON_V6_vmpyuhvs_128B",
+    "llvm.hexagon.V6.vmux" => "__builtin_HEXAGON_V6_vmux",
+    "llvm.hexagon.V6.vmux.128B" => "__builtin_HEXAGON_V6_vmux_128B",
+    "llvm.hexagon.V6.vnavgb" => "__builtin_HEXAGON_V6_vnavgb",
+    "llvm.hexagon.V6.vnavgb.128B" => "__builtin_HEXAGON_V6_vnavgb_128B",
+    "llvm.hexagon.V6.vnavgh" => "__builtin_HEXAGON_V6_vnavgh",
+    "llvm.hexagon.V6.vnavgh.128B" => "__builtin_HEXAGON_V6_vnavgh_128B",
+    "llvm.hexagon.V6.vnavgub" => "__builtin_HEXAGON_V6_vnavgub",
+    "llvm.hexagon.V6.vnavgub.128B" => "__builtin_HEXAGON_V6_vnavgub_128B",
+    "llvm.hexagon.V6.vnavgw" => "__builtin_HEXAGON_V6_vnavgw",
+    "llvm.hexagon.V6.vnavgw.128B" => "__builtin_HEXAGON_V6_vnavgw_128B",
+    "llvm.hexagon.V6.vnormamth" => "__builtin_HEXAGON_V6_vnormamth",
+    "llvm.hexagon.V6.vnormamth.128B" => "__builtin_HEXAGON_V6_vnormamth_128B",
+    "llvm.hexagon.V6.vnormamtw" => "__builtin_HEXAGON_V6_vnormamtw",
+    "llvm.hexagon.V6.vnormamtw.128B" => "__builtin_HEXAGON_V6_vnormamtw_128B",
+    "llvm.hexagon.V6.vnot" => "__builtin_HEXAGON_V6_vnot",
+    "llvm.hexagon.V6.vnot.128B" => "__builtin_HEXAGON_V6_vnot_128B",
+    "llvm.hexagon.V6.vor" => "__builtin_HEXAGON_V6_vor",
+    "llvm.hexagon.V6.vor.128B" => "__builtin_HEXAGON_V6_vor_128B",
+    "llvm.hexagon.V6.vpackeb" => "__builtin_HEXAGON_V6_vpackeb",
+    "llvm.hexagon.V6.vpackeb.128B" => "__builtin_HEXAGON_V6_vpackeb_128B",
+    "llvm.hexagon.V6.vpackeh" => "__builtin_HEXAGON_V6_vpackeh",
+    "llvm.hexagon.V6.vpackeh.128B" => "__builtin_HEXAGON_V6_vpackeh_128B",
+    "llvm.hexagon.V6.vpackhb.sat" => "__builtin_HEXAGON_V6_vpackhb_sat",
+    "llvm.hexagon.V6.vpackhb.sat.128B" => "__builtin_HEXAGON_V6_vpackhb_sat_128B",
+    "llvm.hexagon.V6.vpackhub.sat" => "__builtin_HEXAGON_V6_vpackhub_sat",
+    "llvm.hexagon.V6.vpackhub.sat.128B" => "__builtin_HEXAGON_V6_vpackhub_sat_128B",
+    "llvm.hexagon.V6.vpackob" => "__builtin_HEXAGON_V6_vpackob",
+    "llvm.hexagon.V6.vpackob.128B" => "__builtin_HEXAGON_V6_vpackob_128B",
+    "llvm.hexagon.V6.vpackoh" => "__builtin_HEXAGON_V6_vpackoh",
+    "llvm.hexagon.V6.vpackoh.128B" => "__builtin_HEXAGON_V6_vpackoh_128B",
+    "llvm.hexagon.V6.vpackwh.sat" => "__builtin_HEXAGON_V6_vpackwh_sat",
+    "llvm.hexagon.V6.vpackwh.sat.128B" => "__builtin_HEXAGON_V6_vpackwh_sat_128B",
+    "llvm.hexagon.V6.vpackwuh.sat" => "__builtin_HEXAGON_V6_vpackwuh_sat",
+    "llvm.hexagon.V6.vpackwuh.sat.128B" => "__builtin_HEXAGON_V6_vpackwuh_sat_128B",
+    "llvm.hexagon.V6.vpopcounth" => "__builtin_HEXAGON_V6_vpopcounth",
+    "llvm.hexagon.V6.vpopcounth.128B" => "__builtin_HEXAGON_V6_vpopcounth_128B",
+    "llvm.hexagon.V6.vprefixqb" => "__builtin_HEXAGON_V6_vprefixqb",
+    "llvm.hexagon.V6.vprefixqb.128B" => "__builtin_HEXAGON_V6_vprefixqb_128B",
+    "llvm.hexagon.V6.vprefixqh" => "__builtin_HEXAGON_V6_vprefixqh",
+    "llvm.hexagon.V6.vprefixqh.128B" => "__builtin_HEXAGON_V6_vprefixqh_128B",
+    "llvm.hexagon.V6.vprefixqw" => "__builtin_HEXAGON_V6_vprefixqw",
+    "llvm.hexagon.V6.vprefixqw.128B" => "__builtin_HEXAGON_V6_vprefixqw_128B",
+    "llvm.hexagon.V6.vrdelta" => "__builtin_HEXAGON_V6_vrdelta",
+    "llvm.hexagon.V6.vrdelta.128B" => "__builtin_HEXAGON_V6_vrdelta_128B",
+    "llvm.hexagon.V6.vrmpybub.rtt" => "__builtin_HEXAGON_V6_vrmpybub_rtt",
+    "llvm.hexagon.V6.vrmpybub.rtt.128B" => "__builtin_HEXAGON_V6_vrmpybub_rtt_128B",
+    "llvm.hexagon.V6.vrmpybub.rtt.acc" => "__builtin_HEXAGON_V6_vrmpybub_rtt_acc",
+    "llvm.hexagon.V6.vrmpybub.rtt.acc.128B" => "__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B",
+    "llvm.hexagon.V6.vrmpybus" => "__builtin_HEXAGON_V6_vrmpybus",
+    "llvm.hexagon.V6.vrmpybus.128B" => "__builtin_HEXAGON_V6_vrmpybus_128B",
+    "llvm.hexagon.V6.vrmpybus.acc" => "__builtin_HEXAGON_V6_vrmpybus_acc",
+    "llvm.hexagon.V6.vrmpybus.acc.128B" => "__builtin_HEXAGON_V6_vrmpybus_acc_128B",
+    "llvm.hexagon.V6.vrmpybusi" => "__builtin_HEXAGON_V6_vrmpybusi",
+    "llvm.hexagon.V6.vrmpybusi.128B" => "__builtin_HEXAGON_V6_vrmpybusi_128B",
+    "llvm.hexagon.V6.vrmpybusi.acc" => "__builtin_HEXAGON_V6_vrmpybusi_acc",
+    "llvm.hexagon.V6.vrmpybusi.acc.128B" => "__builtin_HEXAGON_V6_vrmpybusi_acc_128B",
+    "llvm.hexagon.V6.vrmpybusv" => "__builtin_HEXAGON_V6_vrmpybusv",
+    "llvm.hexagon.V6.vrmpybusv.128B" => "__builtin_HEXAGON_V6_vrmpybusv_128B",
+    "llvm.hexagon.V6.vrmpybusv.acc" => "__builtin_HEXAGON_V6_vrmpybusv_acc",
+    "llvm.hexagon.V6.vrmpybusv.acc.128B" => "__builtin_HEXAGON_V6_vrmpybusv_acc_128B",
+    "llvm.hexagon.V6.vrmpybv" => "__builtin_HEXAGON_V6_vrmpybv",
+    "llvm.hexagon.V6.vrmpybv.128B" => "__builtin_HEXAGON_V6_vrmpybv_128B",
+    "llvm.hexagon.V6.vrmpybv.acc" => "__builtin_HEXAGON_V6_vrmpybv_acc",
+    "llvm.hexagon.V6.vrmpybv.acc.128B" => "__builtin_HEXAGON_V6_vrmpybv_acc_128B",
+    "llvm.hexagon.V6.vrmpyub" => "__builtin_HEXAGON_V6_vrmpyub",
+    "llvm.hexagon.V6.vrmpyub.128B" => "__builtin_HEXAGON_V6_vrmpyub_128B",
+    "llvm.hexagon.V6.vrmpyub.acc" => "__builtin_HEXAGON_V6_vrmpyub_acc",
+    "llvm.hexagon.V6.vrmpyub.acc.128B" => "__builtin_HEXAGON_V6_vrmpyub_acc_128B",
+    "llvm.hexagon.V6.vrmpyub.rtt" => "__builtin_HEXAGON_V6_vrmpyub_rtt",
+    "llvm.hexagon.V6.vrmpyub.rtt.128B" => "__builtin_HEXAGON_V6_vrmpyub_rtt_128B",
+    "llvm.hexagon.V6.vrmpyub.rtt.acc" => "__builtin_HEXAGON_V6_vrmpyub_rtt_acc",
+    "llvm.hexagon.V6.vrmpyub.rtt.acc.128B" => "__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B",
+    "llvm.hexagon.V6.vrmpyubi" => "__builtin_HEXAGON_V6_vrmpyubi",
+    "llvm.hexagon.V6.vrmpyubi.128B" => "__builtin_HEXAGON_V6_vrmpyubi_128B",
+    "llvm.hexagon.V6.vrmpyubi.acc" => "__builtin_HEXAGON_V6_vrmpyubi_acc",
+    "llvm.hexagon.V6.vrmpyubi.acc.128B" => "__builtin_HEXAGON_V6_vrmpyubi_acc_128B",
+    "llvm.hexagon.V6.vrmpyubv" => "__builtin_HEXAGON_V6_vrmpyubv",
+    "llvm.hexagon.V6.vrmpyubv.128B" => "__builtin_HEXAGON_V6_vrmpyubv_128B",
+    "llvm.hexagon.V6.vrmpyubv.acc" => "__builtin_HEXAGON_V6_vrmpyubv_acc",
+    "llvm.hexagon.V6.vrmpyubv.acc.128B" => "__builtin_HEXAGON_V6_vrmpyubv_acc_128B",
+    "llvm.hexagon.V6.vror" => "__builtin_HEXAGON_V6_vror",
+    "llvm.hexagon.V6.vror.128B" => "__builtin_HEXAGON_V6_vror_128B",
+    "llvm.hexagon.V6.vrotr" => "__builtin_HEXAGON_V6_vrotr",
+    "llvm.hexagon.V6.vrotr.128B" => "__builtin_HEXAGON_V6_vrotr_128B",
+    "llvm.hexagon.V6.vroundhb" => "__builtin_HEXAGON_V6_vroundhb",
+    "llvm.hexagon.V6.vroundhb.128B" => "__builtin_HEXAGON_V6_vroundhb_128B",
+    "llvm.hexagon.V6.vroundhub" => "__builtin_HEXAGON_V6_vroundhub",
+    "llvm.hexagon.V6.vroundhub.128B" => "__builtin_HEXAGON_V6_vroundhub_128B",
+    "llvm.hexagon.V6.vrounduhub" => "__builtin_HEXAGON_V6_vrounduhub",
+    "llvm.hexagon.V6.vrounduhub.128B" => "__builtin_HEXAGON_V6_vrounduhub_128B",
+    "llvm.hexagon.V6.vrounduwuh" => "__builtin_HEXAGON_V6_vrounduwuh",
+    "llvm.hexagon.V6.vrounduwuh.128B" => "__builtin_HEXAGON_V6_vrounduwuh_128B",
+    "llvm.hexagon.V6.vroundwh" => "__builtin_HEXAGON_V6_vroundwh",
+    "llvm.hexagon.V6.vroundwh.128B" => "__builtin_HEXAGON_V6_vroundwh_128B",
+    "llvm.hexagon.V6.vroundwuh" => "__builtin_HEXAGON_V6_vroundwuh",
+    "llvm.hexagon.V6.vroundwuh.128B" => "__builtin_HEXAGON_V6_vroundwuh_128B",
+    "llvm.hexagon.V6.vrsadubi" => "__builtin_HEXAGON_V6_vrsadubi",
+    "llvm.hexagon.V6.vrsadubi.128B" => "__builtin_HEXAGON_V6_vrsadubi_128B",
+    "llvm.hexagon.V6.vrsadubi.acc" => "__builtin_HEXAGON_V6_vrsadubi_acc",
+    "llvm.hexagon.V6.vrsadubi.acc.128B" => "__builtin_HEXAGON_V6_vrsadubi_acc_128B",
+    "llvm.hexagon.V6.vsatdw" => "__builtin_HEXAGON_V6_vsatdw",
+    "llvm.hexagon.V6.vsatdw.128B" => "__builtin_HEXAGON_V6_vsatdw_128B",
+    "llvm.hexagon.V6.vsathub" => "__builtin_HEXAGON_V6_vsathub",
+    "llvm.hexagon.V6.vsathub.128B" => "__builtin_HEXAGON_V6_vsathub_128B",
+    "llvm.hexagon.V6.vsatuwuh" => "__builtin_HEXAGON_V6_vsatuwuh",
+    "llvm.hexagon.V6.vsatuwuh.128B" => "__builtin_HEXAGON_V6_vsatuwuh_128B",
+    "llvm.hexagon.V6.vsatwh" => "__builtin_HEXAGON_V6_vsatwh",
+    "llvm.hexagon.V6.vsatwh.128B" => "__builtin_HEXAGON_V6_vsatwh_128B",
+    "llvm.hexagon.V6.vsb" => "__builtin_HEXAGON_V6_vsb",
+    "llvm.hexagon.V6.vsb.128B" => "__builtin_HEXAGON_V6_vsb_128B",
+    "llvm.hexagon.V6.vscattermh" => "__builtin_HEXAGON_V6_vscattermh",
+    "llvm.hexagon.V6.vscattermh.128B" => "__builtin_HEXAGON_V6_vscattermh_128B",
+    "llvm.hexagon.V6.vscattermh.add" => "__builtin_HEXAGON_V6_vscattermh_add",
+    "llvm.hexagon.V6.vscattermh.add.128B" => "__builtin_HEXAGON_V6_vscattermh_add_128B",
+    "llvm.hexagon.V6.vscattermhq" => "__builtin_HEXAGON_V6_vscattermhq",
+    "llvm.hexagon.V6.vscattermhq.128B" => "__builtin_HEXAGON_V6_vscattermhq_128B",
+    "llvm.hexagon.V6.vscattermhw" => "__builtin_HEXAGON_V6_vscattermhw",
+    "llvm.hexagon.V6.vscattermhw.128B" => "__builtin_HEXAGON_V6_vscattermhw_128B",
+    "llvm.hexagon.V6.vscattermhw.add" => "__builtin_HEXAGON_V6_vscattermhw_add",
+    "llvm.hexagon.V6.vscattermhw.add.128B" => "__builtin_HEXAGON_V6_vscattermhw_add_128B",
+    "llvm.hexagon.V6.vscattermhwq" => "__builtin_HEXAGON_V6_vscattermhwq",
+    "llvm.hexagon.V6.vscattermhwq.128B" => "__builtin_HEXAGON_V6_vscattermhwq_128B",
+    "llvm.hexagon.V6.vscattermw" => "__builtin_HEXAGON_V6_vscattermw",
+    "llvm.hexagon.V6.vscattermw.128B" => "__builtin_HEXAGON_V6_vscattermw_128B",
+    "llvm.hexagon.V6.vscattermw.add" => "__builtin_HEXAGON_V6_vscattermw_add",
+    "llvm.hexagon.V6.vscattermw.add.128B" => "__builtin_HEXAGON_V6_vscattermw_add_128B",
+    "llvm.hexagon.V6.vscattermwq" => "__builtin_HEXAGON_V6_vscattermwq",
+    "llvm.hexagon.V6.vscattermwq.128B" => "__builtin_HEXAGON_V6_vscattermwq_128B",
+    "llvm.hexagon.V6.vsh" => "__builtin_HEXAGON_V6_vsh",
+    "llvm.hexagon.V6.vsh.128B" => "__builtin_HEXAGON_V6_vsh_128B",
+    "llvm.hexagon.V6.vshufeh" => "__builtin_HEXAGON_V6_vshufeh",
+    "llvm.hexagon.V6.vshufeh.128B" => "__builtin_HEXAGON_V6_vshufeh_128B",
+    "llvm.hexagon.V6.vshuffb" => "__builtin_HEXAGON_V6_vshuffb",
+    "llvm.hexagon.V6.vshuffb.128B" => "__builtin_HEXAGON_V6_vshuffb_128B",
+    "llvm.hexagon.V6.vshuffeb" => "__builtin_HEXAGON_V6_vshuffeb",
+    "llvm.hexagon.V6.vshuffeb.128B" => "__builtin_HEXAGON_V6_vshuffeb_128B",
+    "llvm.hexagon.V6.vshuffh" => "__builtin_HEXAGON_V6_vshuffh",
+    "llvm.hexagon.V6.vshuffh.128B" => "__builtin_HEXAGON_V6_vshuffh_128B",
+    "llvm.hexagon.V6.vshuffob" => "__builtin_HEXAGON_V6_vshuffob",
+    "llvm.hexagon.V6.vshuffob.128B" => "__builtin_HEXAGON_V6_vshuffob_128B",
+    "llvm.hexagon.V6.vshuffvdd" => "__builtin_HEXAGON_V6_vshuffvdd",
+    "llvm.hexagon.V6.vshuffvdd.128B" => "__builtin_HEXAGON_V6_vshuffvdd_128B",
+    "llvm.hexagon.V6.vshufoeb" => "__builtin_HEXAGON_V6_vshufoeb",
+    "llvm.hexagon.V6.vshufoeb.128B" => "__builtin_HEXAGON_V6_vshufoeb_128B",
+    "llvm.hexagon.V6.vshufoeh" => "__builtin_HEXAGON_V6_vshufoeh",
+    "llvm.hexagon.V6.vshufoeh.128B" => "__builtin_HEXAGON_V6_vshufoeh_128B",
+    "llvm.hexagon.V6.vshufoh" => "__builtin_HEXAGON_V6_vshufoh",
+    "llvm.hexagon.V6.vshufoh.128B" => "__builtin_HEXAGON_V6_vshufoh_128B",
+    "llvm.hexagon.V6.vsub.hf" => "__builtin_HEXAGON_V6_vsub_hf",
+    "llvm.hexagon.V6.vsub.hf.128B" => "__builtin_HEXAGON_V6_vsub_hf_128B",
+    "llvm.hexagon.V6.vsub.hf.hf" => "__builtin_HEXAGON_V6_vsub_hf_hf",
+    "llvm.hexagon.V6.vsub.hf.hf.128B" => "__builtin_HEXAGON_V6_vsub_hf_hf_128B",
+    "llvm.hexagon.V6.vsub.qf16" => "__builtin_HEXAGON_V6_vsub_qf16",
+    "llvm.hexagon.V6.vsub.qf16.128B" => "__builtin_HEXAGON_V6_vsub_qf16_128B",
+    "llvm.hexagon.V6.vsub.qf16.mix" => "__builtin_HEXAGON_V6_vsub_qf16_mix",
+    "llvm.hexagon.V6.vsub.qf16.mix.128B" => "__builtin_HEXAGON_V6_vsub_qf16_mix_128B",
+    "llvm.hexagon.V6.vsub.qf32" => "__builtin_HEXAGON_V6_vsub_qf32",
+    "llvm.hexagon.V6.vsub.qf32.128B" => "__builtin_HEXAGON_V6_vsub_qf32_128B",
+    "llvm.hexagon.V6.vsub.qf32.mix" => "__builtin_HEXAGON_V6_vsub_qf32_mix",
+    "llvm.hexagon.V6.vsub.qf32.mix.128B" => "__builtin_HEXAGON_V6_vsub_qf32_mix_128B",
+    "llvm.hexagon.V6.vsub.sf" => "__builtin_HEXAGON_V6_vsub_sf",
+    "llvm.hexagon.V6.vsub.sf.128B" => "__builtin_HEXAGON_V6_vsub_sf_128B",
+    "llvm.hexagon.V6.vsub.sf.bf" => "__builtin_HEXAGON_V6_vsub_sf_bf",
+    "llvm.hexagon.V6.vsub.sf.bf.128B" => "__builtin_HEXAGON_V6_vsub_sf_bf_128B",
+    "llvm.hexagon.V6.vsub.sf.hf" => "__builtin_HEXAGON_V6_vsub_sf_hf",
+    "llvm.hexagon.V6.vsub.sf.hf.128B" => "__builtin_HEXAGON_V6_vsub_sf_hf_128B",
+    "llvm.hexagon.V6.vsub.sf.sf" => "__builtin_HEXAGON_V6_vsub_sf_sf",
+    "llvm.hexagon.V6.vsub.sf.sf.128B" => "__builtin_HEXAGON_V6_vsub_sf_sf_128B",
+    "llvm.hexagon.V6.vsubb" => "__builtin_HEXAGON_V6_vsubb",
+    "llvm.hexagon.V6.vsubb.128B" => "__builtin_HEXAGON_V6_vsubb_128B",
+    "llvm.hexagon.V6.vsubb.dv" => "__builtin_HEXAGON_V6_vsubb_dv",
+    "llvm.hexagon.V6.vsubb.dv.128B" => "__builtin_HEXAGON_V6_vsubb_dv_128B",
+    "llvm.hexagon.V6.vsubbnq" => "__builtin_HEXAGON_V6_vsubbnq",
+    "llvm.hexagon.V6.vsubbnq.128B" => "__builtin_HEXAGON_V6_vsubbnq_128B",
+    "llvm.hexagon.V6.vsubbq" => "__builtin_HEXAGON_V6_vsubbq",
+    "llvm.hexagon.V6.vsubbq.128B" => "__builtin_HEXAGON_V6_vsubbq_128B",
+    "llvm.hexagon.V6.vsubbsat" => "__builtin_HEXAGON_V6_vsubbsat",
+    "llvm.hexagon.V6.vsubbsat.128B" => "__builtin_HEXAGON_V6_vsubbsat_128B",
+    "llvm.hexagon.V6.vsubbsat.dv" => "__builtin_HEXAGON_V6_vsubbsat_dv",
+    "llvm.hexagon.V6.vsubbsat.dv.128B" => "__builtin_HEXAGON_V6_vsubbsat_dv_128B",
+    "llvm.hexagon.V6.vsubh" => "__builtin_HEXAGON_V6_vsubh",
+    "llvm.hexagon.V6.vsubh.128B" => "__builtin_HEXAGON_V6_vsubh_128B",
+    "llvm.hexagon.V6.vsubh.dv" => "__builtin_HEXAGON_V6_vsubh_dv",
+    "llvm.hexagon.V6.vsubh.dv.128B" => "__builtin_HEXAGON_V6_vsubh_dv_128B",
+    "llvm.hexagon.V6.vsubhnq" => "__builtin_HEXAGON_V6_vsubhnq",
+    "llvm.hexagon.V6.vsubhnq.128B" => "__builtin_HEXAGON_V6_vsubhnq_128B",
+    "llvm.hexagon.V6.vsubhq" => "__builtin_HEXAGON_V6_vsubhq",
+    "llvm.hexagon.V6.vsubhq.128B" => "__builtin_HEXAGON_V6_vsubhq_128B",
+    "llvm.hexagon.V6.vsubhsat" => "__builtin_HEXAGON_V6_vsubhsat",
+    "llvm.hexagon.V6.vsubhsat.128B" => "__builtin_HEXAGON_V6_vsubhsat_128B",
+    "llvm.hexagon.V6.vsubhsat.dv" => "__builtin_HEXAGON_V6_vsubhsat_dv",
+    "llvm.hexagon.V6.vsubhsat.dv.128B" => "__builtin_HEXAGON_V6_vsubhsat_dv_128B",
+    "llvm.hexagon.V6.vsubhw" => "__builtin_HEXAGON_V6_vsubhw",
+    "llvm.hexagon.V6.vsubhw.128B" => "__builtin_HEXAGON_V6_vsubhw_128B",
+    "llvm.hexagon.V6.vsububh" => "__builtin_HEXAGON_V6_vsububh",
+    "llvm.hexagon.V6.vsububh.128B" => "__builtin_HEXAGON_V6_vsububh_128B",
+    "llvm.hexagon.V6.vsububsat" => "__builtin_HEXAGON_V6_vsububsat",
+    "llvm.hexagon.V6.vsububsat.128B" => "__builtin_HEXAGON_V6_vsububsat_128B",
+    "llvm.hexagon.V6.vsububsat.dv" => "__builtin_HEXAGON_V6_vsububsat_dv",
+    "llvm.hexagon.V6.vsububsat.dv.128B" => "__builtin_HEXAGON_V6_vsububsat_dv_128B",
+    "llvm.hexagon.V6.vsubububb.sat" => "__builtin_HEXAGON_V6_vsubububb_sat",
+    "llvm.hexagon.V6.vsubububb.sat.128B" => "__builtin_HEXAGON_V6_vsubububb_sat_128B",
+    "llvm.hexagon.V6.vsubuhsat" => "__builtin_HEXAGON_V6_vsubuhsat",
+    "llvm.hexagon.V6.vsubuhsat.128B" => "__builtin_HEXAGON_V6_vsubuhsat_128B",
+    "llvm.hexagon.V6.vsubuhsat.dv" => "__builtin_HEXAGON_V6_vsubuhsat_dv",
+    "llvm.hexagon.V6.vsubuhsat.dv.128B" => "__builtin_HEXAGON_V6_vsubuhsat_dv_128B",
+    "llvm.hexagon.V6.vsubuhw" => "__builtin_HEXAGON_V6_vsubuhw",
+    "llvm.hexagon.V6.vsubuhw.128B" => "__builtin_HEXAGON_V6_vsubuhw_128B",
+    "llvm.hexagon.V6.vsubuwsat" => "__builtin_HEXAGON_V6_vsubuwsat",
+    "llvm.hexagon.V6.vsubuwsat.128B" => "__builtin_HEXAGON_V6_vsubuwsat_128B",
+    "llvm.hexagon.V6.vsubuwsat.dv" => "__builtin_HEXAGON_V6_vsubuwsat_dv",
+    "llvm.hexagon.V6.vsubuwsat.dv.128B" => "__builtin_HEXAGON_V6_vsubuwsat_dv_128B",
+    "llvm.hexagon.V6.vsubw" => "__builtin_HEXAGON_V6_vsubw",
+    "llvm.hexagon.V6.vsubw.128B" => "__builtin_HEXAGON_V6_vsubw_128B",
+    "llvm.hexagon.V6.vsubw.dv" => "__builtin_HEXAGON_V6_vsubw_dv",
+    "llvm.hexagon.V6.vsubw.dv.128B" => "__builtin_HEXAGON_V6_vsubw_dv_128B",
+    "llvm.hexagon.V6.vsubwnq" => "__builtin_HEXAGON_V6_vsubwnq",
+    "llvm.hexagon.V6.vsubwnq.128B" => "__builtin_HEXAGON_V6_vsubwnq_128B",
+    "llvm.hexagon.V6.vsubwq" => "__builtin_HEXAGON_V6_vsubwq",
+    "llvm.hexagon.V6.vsubwq.128B" => "__builtin_HEXAGON_V6_vsubwq_128B",
+    "llvm.hexagon.V6.vsubwsat" => "__builtin_HEXAGON_V6_vsubwsat",
+    "llvm.hexagon.V6.vsubwsat.128B" => "__builtin_HEXAGON_V6_vsubwsat_128B",
+    "llvm.hexagon.V6.vsubwsat.dv" => "__builtin_HEXAGON_V6_vsubwsat_dv",
+    "llvm.hexagon.V6.vsubwsat.dv.128B" => "__builtin_HEXAGON_V6_vsubwsat_dv_128B",
+    "llvm.hexagon.V6.vswap" => "__builtin_HEXAGON_V6_vswap",
+    "llvm.hexagon.V6.vswap.128B" => "__builtin_HEXAGON_V6_vswap_128B",
+    "llvm.hexagon.V6.vtmpyb" => "__builtin_HEXAGON_V6_vtmpyb",
+    "llvm.hexagon.V6.vtmpyb.128B" => "__builtin_HEXAGON_V6_vtmpyb_128B",
+    "llvm.hexagon.V6.vtmpyb.acc" => "__builtin_HEXAGON_V6_vtmpyb_acc",
+    "llvm.hexagon.V6.vtmpyb.acc.128B" => "__builtin_HEXAGON_V6_vtmpyb_acc_128B",
+    "llvm.hexagon.V6.vtmpybus" => "__builtin_HEXAGON_V6_vtmpybus",
+    "llvm.hexagon.V6.vtmpybus.128B" => "__builtin_HEXAGON_V6_vtmpybus_128B",
+    "llvm.hexagon.V6.vtmpybus.acc" => "__builtin_HEXAGON_V6_vtmpybus_acc",
+    "llvm.hexagon.V6.vtmpybus.acc.128B" => "__builtin_HEXAGON_V6_vtmpybus_acc_128B",
+    "llvm.hexagon.V6.vtmpyhb" => "__builtin_HEXAGON_V6_vtmpyhb",
+    "llvm.hexagon.V6.vtmpyhb.128B" => "__builtin_HEXAGON_V6_vtmpyhb_128B",
+    "llvm.hexagon.V6.vtmpyhb.acc" => "__builtin_HEXAGON_V6_vtmpyhb_acc",
+    "llvm.hexagon.V6.vtmpyhb.acc.128B" => "__builtin_HEXAGON_V6_vtmpyhb_acc_128B",
+    "llvm.hexagon.V6.vunpackb" => "__builtin_HEXAGON_V6_vunpackb",
+    "llvm.hexagon.V6.vunpackb.128B" => "__builtin_HEXAGON_V6_vunpackb_128B",
+    "llvm.hexagon.V6.vunpackh" => "__builtin_HEXAGON_V6_vunpackh",
+    "llvm.hexagon.V6.vunpackh.128B" => "__builtin_HEXAGON_V6_vunpackh_128B",
+    "llvm.hexagon.V6.vunpackob" => "__builtin_HEXAGON_V6_vunpackob",
+    "llvm.hexagon.V6.vunpackob.128B" => "__builtin_HEXAGON_V6_vunpackob_128B",
+    "llvm.hexagon.V6.vunpackoh" => "__builtin_HEXAGON_V6_vunpackoh",
+    "llvm.hexagon.V6.vunpackoh.128B" => "__builtin_HEXAGON_V6_vunpackoh_128B",
+    "llvm.hexagon.V6.vunpackub" => "__builtin_HEXAGON_V6_vunpackub",
+    "llvm.hexagon.V6.vunpackub.128B" => "__builtin_HEXAGON_V6_vunpackub_128B",
+    "llvm.hexagon.V6.vunpackuh" => "__builtin_HEXAGON_V6_vunpackuh",
+    "llvm.hexagon.V6.vunpackuh.128B" => "__builtin_HEXAGON_V6_vunpackuh_128B",
+    "llvm.hexagon.V6.vxor" => "__builtin_HEXAGON_V6_vxor",
+    "llvm.hexagon.V6.vxor.128B" => "__builtin_HEXAGON_V6_vxor_128B",
+    "llvm.hexagon.V6.vzb" => "__builtin_HEXAGON_V6_vzb",
+    "llvm.hexagon.V6.vzb.128B" => "__builtin_HEXAGON_V6_vzb_128B",
+    "llvm.hexagon.V6.vzh" => "__builtin_HEXAGON_V6_vzh",
+    "llvm.hexagon.V6.vzh.128B" => "__builtin_HEXAGON_V6_vzh_128B",
+    "llvm.hexagon.Y2.dccleana" => "__builtin_HEXAGON_Y2_dccleana",
+    "llvm.hexagon.Y2.dccleaninva" => "__builtin_HEXAGON_Y2_dccleaninva",
+    "llvm.hexagon.Y2.dcfetch" => "__builtin_HEXAGON_Y2_dcfetch",
+    "llvm.hexagon.Y2.dcinva" => "__builtin_HEXAGON_Y2_dcinva",
+    "llvm.hexagon.Y2.dczeroa" => "__builtin_HEXAGON_Y2_dczeroa",
+    "llvm.hexagon.Y4.l2fetch" => "__builtin_HEXAGON_Y4_l2fetch",
+    "llvm.hexagon.Y5.l2fetch" => "__builtin_HEXAGON_Y5_l2fetch",
+    "llvm.hexagon.Y6.dmlink" => "__builtin_HEXAGON_Y6_dmlink",
+    "llvm.hexagon.Y6.dmpause" => "__builtin_HEXAGON_Y6_dmpause",
+    "llvm.hexagon.Y6.dmpoll" => "__builtin_HEXAGON_Y6_dmpoll",
+    "llvm.hexagon.Y6.dmresume" => "__builtin_HEXAGON_Y6_dmresume",
+    "llvm.hexagon.Y6.dmstart" => "__builtin_HEXAGON_Y6_dmstart",
+    "llvm.hexagon.Y6.dmwait" => "__builtin_HEXAGON_Y6_dmwait",
+    "llvm.hexagon.brev.ldb" => "__builtin_brev_ldb",
+    "llvm.hexagon.brev.ldd" => "__builtin_brev_ldd",
+    "llvm.hexagon.brev.ldh" => "__builtin_brev_ldh",
+    "llvm.hexagon.brev.ldub" => "__builtin_brev_ldub",
+    "llvm.hexagon.brev.lduh" => "__builtin_brev_lduh",
+    "llvm.hexagon.brev.ldw" => "__builtin_brev_ldw",
+    "llvm.hexagon.brev.stb" => "__builtin_brev_stb",
+    "llvm.hexagon.brev.std" => "__builtin_brev_std",
+    "llvm.hexagon.brev.sth" => "__builtin_brev_sth",
+    "llvm.hexagon.brev.sthhi" => "__builtin_brev_sthhi",
+    "llvm.hexagon.brev.stw" => "__builtin_brev_stw",
+    "llvm.hexagon.circ.ldb" => "__builtin_circ_ldb",
+    "llvm.hexagon.circ.ldd" => "__builtin_circ_ldd",
+    "llvm.hexagon.circ.ldh" => "__builtin_circ_ldh",
+    "llvm.hexagon.circ.ldub" => "__builtin_circ_ldub",
+    "llvm.hexagon.circ.lduh" => "__builtin_circ_lduh",
+    "llvm.hexagon.circ.ldw" => "__builtin_circ_ldw",
+    "llvm.hexagon.circ.stb" => "__builtin_circ_stb",
+    "llvm.hexagon.circ.std" => "__builtin_circ_std",
+    "llvm.hexagon.circ.sth" => "__builtin_circ_sth",
+    "llvm.hexagon.circ.sthhi" => "__builtin_circ_sthhi",
+    "llvm.hexagon.circ.stw" => "__builtin_circ_stw",
+    "llvm.hexagon.mm256i.vaddw" => "__builtin__mm256i_vaddw",
+    "llvm.hexagon.prefetch" => "__builtin_HEXAGON_prefetch",
+    "llvm.hexagon.vmemcpy" => "__builtin_hexagon_vmemcpy",
+    "llvm.hexagon.vmemset" => "__builtin_hexagon_vmemset",
+    // loongarch
+    "llvm.loongarch.asrtgt.d" => "__builtin_loongarch_asrtgt_d",
+    "llvm.loongarch.asrtle.d" => "__builtin_loongarch_asrtle_d",
+    "llvm.loongarch.break" => "__builtin_loongarch_break",
+    "llvm.loongarch.cacop.d" => "__builtin_loongarch_cacop_d",
+    "llvm.loongarch.cacop.w" => "__builtin_loongarch_cacop_w",
+    "llvm.loongarch.cpucfg" => "__builtin_loongarch_cpucfg",
+    "llvm.loongarch.crc.w.b.w" => "__builtin_loongarch_crc_w_b_w",
+    "llvm.loongarch.crc.w.d.w" => "__builtin_loongarch_crc_w_d_w",
+    "llvm.loongarch.crc.w.h.w" => "__builtin_loongarch_crc_w_h_w",
+    "llvm.loongarch.crc.w.w.w" => "__builtin_loongarch_crc_w_w_w",
+    "llvm.loongarch.crcc.w.b.w" => "__builtin_loongarch_crcc_w_b_w",
+    "llvm.loongarch.crcc.w.d.w" => "__builtin_loongarch_crcc_w_d_w",
+    "llvm.loongarch.crcc.w.h.w" => "__builtin_loongarch_crcc_w_h_w",
+    "llvm.loongarch.crcc.w.w.w" => "__builtin_loongarch_crcc_w_w_w",
+    "llvm.loongarch.csrrd.d" => "__builtin_loongarch_csrrd_d",
+    "llvm.loongarch.csrrd.w" => "__builtin_loongarch_csrrd_w",
+    "llvm.loongarch.csrwr.d" => "__builtin_loongarch_csrwr_d",
+    "llvm.loongarch.csrwr.w" => "__builtin_loongarch_csrwr_w",
+    "llvm.loongarch.csrxchg.d" => "__builtin_loongarch_csrxchg_d",
+    "llvm.loongarch.csrxchg.w" => "__builtin_loongarch_csrxchg_w",
+    "llvm.loongarch.dbar" => "__builtin_loongarch_dbar",
+    "llvm.loongarch.frecipe.d" => "__builtin_loongarch_frecipe_d",
+    "llvm.loongarch.frecipe.s" => "__builtin_loongarch_frecipe_s",
+    "llvm.loongarch.frsqrte.d" => "__builtin_loongarch_frsqrte_d",
+    "llvm.loongarch.frsqrte.s" => "__builtin_loongarch_frsqrte_s",
+    "llvm.loongarch.ibar" => "__builtin_loongarch_ibar",
+    "llvm.loongarch.iocsrrd.b" => "__builtin_loongarch_iocsrrd_b",
+    "llvm.loongarch.iocsrrd.d" => "__builtin_loongarch_iocsrrd_d",
+    "llvm.loongarch.iocsrrd.h" => "__builtin_loongarch_iocsrrd_h",
+    "llvm.loongarch.iocsrrd.w" => "__builtin_loongarch_iocsrrd_w",
+    "llvm.loongarch.iocsrwr.b" => "__builtin_loongarch_iocsrwr_b",
+    "llvm.loongarch.iocsrwr.d" => "__builtin_loongarch_iocsrwr_d",
+    "llvm.loongarch.iocsrwr.h" => "__builtin_loongarch_iocsrwr_h",
+    "llvm.loongarch.iocsrwr.w" => "__builtin_loongarch_iocsrwr_w",
+    "llvm.loongarch.lasx.vext2xv.d.b" => "__builtin_lasx_vext2xv_d_b",
+    "llvm.loongarch.lasx.vext2xv.d.h" => "__builtin_lasx_vext2xv_d_h",
+    "llvm.loongarch.lasx.vext2xv.d.w" => "__builtin_lasx_vext2xv_d_w",
+    "llvm.loongarch.lasx.vext2xv.du.bu" => "__builtin_lasx_vext2xv_du_bu",
+    "llvm.loongarch.lasx.vext2xv.du.hu" => "__builtin_lasx_vext2xv_du_hu",
+    "llvm.loongarch.lasx.vext2xv.du.wu" => "__builtin_lasx_vext2xv_du_wu",
+    "llvm.loongarch.lasx.vext2xv.h.b" => "__builtin_lasx_vext2xv_h_b",
+    "llvm.loongarch.lasx.vext2xv.hu.bu" => "__builtin_lasx_vext2xv_hu_bu",
+    "llvm.loongarch.lasx.vext2xv.w.b" => "__builtin_lasx_vext2xv_w_b",
+    "llvm.loongarch.lasx.vext2xv.w.h" => "__builtin_lasx_vext2xv_w_h",
+    "llvm.loongarch.lasx.vext2xv.wu.bu" => "__builtin_lasx_vext2xv_wu_bu",
+    "llvm.loongarch.lasx.vext2xv.wu.hu" => "__builtin_lasx_vext2xv_wu_hu",
+    "llvm.loongarch.lasx.xbnz.b" => "__builtin_lasx_xbnz_b",
+    "llvm.loongarch.lasx.xbnz.d" => "__builtin_lasx_xbnz_d",
+    "llvm.loongarch.lasx.xbnz.h" => "__builtin_lasx_xbnz_h",
+    "llvm.loongarch.lasx.xbnz.v" => "__builtin_lasx_xbnz_v",
+    "llvm.loongarch.lasx.xbnz.w" => "__builtin_lasx_xbnz_w",
+    "llvm.loongarch.lasx.xbz.b" => "__builtin_lasx_xbz_b",
+    "llvm.loongarch.lasx.xbz.d" => "__builtin_lasx_xbz_d",
+    "llvm.loongarch.lasx.xbz.h" => "__builtin_lasx_xbz_h",
+    "llvm.loongarch.lasx.xbz.v" => "__builtin_lasx_xbz_v",
+    "llvm.loongarch.lasx.xbz.w" => "__builtin_lasx_xbz_w",
+    "llvm.loongarch.lasx.xvabsd.b" => "__builtin_lasx_xvabsd_b",
+    "llvm.loongarch.lasx.xvabsd.bu" => "__builtin_lasx_xvabsd_bu",
+    "llvm.loongarch.lasx.xvabsd.d" => "__builtin_lasx_xvabsd_d",
+    "llvm.loongarch.lasx.xvabsd.du" => "__builtin_lasx_xvabsd_du",
+    "llvm.loongarch.lasx.xvabsd.h" => "__builtin_lasx_xvabsd_h",
+    "llvm.loongarch.lasx.xvabsd.hu" => "__builtin_lasx_xvabsd_hu",
+    "llvm.loongarch.lasx.xvabsd.w" => "__builtin_lasx_xvabsd_w",
+    "llvm.loongarch.lasx.xvabsd.wu" => "__builtin_lasx_xvabsd_wu",
+    "llvm.loongarch.lasx.xvadd.b" => "__builtin_lasx_xvadd_b",
+    "llvm.loongarch.lasx.xvadd.d" => "__builtin_lasx_xvadd_d",
+    "llvm.loongarch.lasx.xvadd.h" => "__builtin_lasx_xvadd_h",
+    "llvm.loongarch.lasx.xvadd.q" => "__builtin_lasx_xvadd_q",
+    "llvm.loongarch.lasx.xvadd.w" => "__builtin_lasx_xvadd_w",
+    "llvm.loongarch.lasx.xvadda.b" => "__builtin_lasx_xvadda_b",
+    "llvm.loongarch.lasx.xvadda.d" => "__builtin_lasx_xvadda_d",
+    "llvm.loongarch.lasx.xvadda.h" => "__builtin_lasx_xvadda_h",
+    "llvm.loongarch.lasx.xvadda.w" => "__builtin_lasx_xvadda_w",
+    "llvm.loongarch.lasx.xvaddi.bu" => "__builtin_lasx_xvaddi_bu",
+    "llvm.loongarch.lasx.xvaddi.du" => "__builtin_lasx_xvaddi_du",
+    "llvm.loongarch.lasx.xvaddi.hu" => "__builtin_lasx_xvaddi_hu",
+    "llvm.loongarch.lasx.xvaddi.wu" => "__builtin_lasx_xvaddi_wu",
+    "llvm.loongarch.lasx.xvaddwev.d.w" => "__builtin_lasx_xvaddwev_d_w",
+    "llvm.loongarch.lasx.xvaddwev.d.wu" => "__builtin_lasx_xvaddwev_d_wu",
+    "llvm.loongarch.lasx.xvaddwev.d.wu.w" => "__builtin_lasx_xvaddwev_d_wu_w",
+    "llvm.loongarch.lasx.xvaddwev.h.b" => "__builtin_lasx_xvaddwev_h_b",
+    "llvm.loongarch.lasx.xvaddwev.h.bu" => "__builtin_lasx_xvaddwev_h_bu",
+    "llvm.loongarch.lasx.xvaddwev.h.bu.b" => "__builtin_lasx_xvaddwev_h_bu_b",
+    "llvm.loongarch.lasx.xvaddwev.q.d" => "__builtin_lasx_xvaddwev_q_d",
+    "llvm.loongarch.lasx.xvaddwev.q.du" => "__builtin_lasx_xvaddwev_q_du",
+    "llvm.loongarch.lasx.xvaddwev.q.du.d" => "__builtin_lasx_xvaddwev_q_du_d",
+    "llvm.loongarch.lasx.xvaddwev.w.h" => "__builtin_lasx_xvaddwev_w_h",
+    "llvm.loongarch.lasx.xvaddwev.w.hu" => "__builtin_lasx_xvaddwev_w_hu",
+    "llvm.loongarch.lasx.xvaddwev.w.hu.h" => "__builtin_lasx_xvaddwev_w_hu_h",
+    "llvm.loongarch.lasx.xvaddwod.d.w" => "__builtin_lasx_xvaddwod_d_w",
+    "llvm.loongarch.lasx.xvaddwod.d.wu" => "__builtin_lasx_xvaddwod_d_wu",
+    "llvm.loongarch.lasx.xvaddwod.d.wu.w" => "__builtin_lasx_xvaddwod_d_wu_w",
+    "llvm.loongarch.lasx.xvaddwod.h.b" => "__builtin_lasx_xvaddwod_h_b",
+    "llvm.loongarch.lasx.xvaddwod.h.bu" => "__builtin_lasx_xvaddwod_h_bu",
+    "llvm.loongarch.lasx.xvaddwod.h.bu.b" => "__builtin_lasx_xvaddwod_h_bu_b",
+    "llvm.loongarch.lasx.xvaddwod.q.d" => "__builtin_lasx_xvaddwod_q_d",
+    "llvm.loongarch.lasx.xvaddwod.q.du" => "__builtin_lasx_xvaddwod_q_du",
+    "llvm.loongarch.lasx.xvaddwod.q.du.d" => "__builtin_lasx_xvaddwod_q_du_d",
+    "llvm.loongarch.lasx.xvaddwod.w.h" => "__builtin_lasx_xvaddwod_w_h",
+    "llvm.loongarch.lasx.xvaddwod.w.hu" => "__builtin_lasx_xvaddwod_w_hu",
+    "llvm.loongarch.lasx.xvaddwod.w.hu.h" => "__builtin_lasx_xvaddwod_w_hu_h",
+    "llvm.loongarch.lasx.xvand.v" => "__builtin_lasx_xvand_v",
+    "llvm.loongarch.lasx.xvandi.b" => "__builtin_lasx_xvandi_b",
+    "llvm.loongarch.lasx.xvandn.v" => "__builtin_lasx_xvandn_v",
+    "llvm.loongarch.lasx.xvavg.b" => "__builtin_lasx_xvavg_b",
+    "llvm.loongarch.lasx.xvavg.bu" => "__builtin_lasx_xvavg_bu",
+    "llvm.loongarch.lasx.xvavg.d" => "__builtin_lasx_xvavg_d",
+    "llvm.loongarch.lasx.xvavg.du" => "__builtin_lasx_xvavg_du",
+    "llvm.loongarch.lasx.xvavg.h" => "__builtin_lasx_xvavg_h",
+    "llvm.loongarch.lasx.xvavg.hu" => "__builtin_lasx_xvavg_hu",
+    "llvm.loongarch.lasx.xvavg.w" => "__builtin_lasx_xvavg_w",
+    "llvm.loongarch.lasx.xvavg.wu" => "__builtin_lasx_xvavg_wu",
+    "llvm.loongarch.lasx.xvavgr.b" => "__builtin_lasx_xvavgr_b",
+    "llvm.loongarch.lasx.xvavgr.bu" => "__builtin_lasx_xvavgr_bu",
+    "llvm.loongarch.lasx.xvavgr.d" => "__builtin_lasx_xvavgr_d",
+    "llvm.loongarch.lasx.xvavgr.du" => "__builtin_lasx_xvavgr_du",
+    "llvm.loongarch.lasx.xvavgr.h" => "__builtin_lasx_xvavgr_h",
+    "llvm.loongarch.lasx.xvavgr.hu" => "__builtin_lasx_xvavgr_hu",
+    "llvm.loongarch.lasx.xvavgr.w" => "__builtin_lasx_xvavgr_w",
+    "llvm.loongarch.lasx.xvavgr.wu" => "__builtin_lasx_xvavgr_wu",
+    "llvm.loongarch.lasx.xvbitclr.b" => "__builtin_lasx_xvbitclr_b",
+    "llvm.loongarch.lasx.xvbitclr.d" => "__builtin_lasx_xvbitclr_d",
+    "llvm.loongarch.lasx.xvbitclr.h" => "__builtin_lasx_xvbitclr_h",
+    "llvm.loongarch.lasx.xvbitclr.w" => "__builtin_lasx_xvbitclr_w",
+    "llvm.loongarch.lasx.xvbitclri.b" => "__builtin_lasx_xvbitclri_b",
+    "llvm.loongarch.lasx.xvbitclri.d" => "__builtin_lasx_xvbitclri_d",
+    "llvm.loongarch.lasx.xvbitclri.h" => "__builtin_lasx_xvbitclri_h",
+    "llvm.loongarch.lasx.xvbitclri.w" => "__builtin_lasx_xvbitclri_w",
+    "llvm.loongarch.lasx.xvbitrev.b" => "__builtin_lasx_xvbitrev_b",
+    "llvm.loongarch.lasx.xvbitrev.d" => "__builtin_lasx_xvbitrev_d",
+    "llvm.loongarch.lasx.xvbitrev.h" => "__builtin_lasx_xvbitrev_h",
+    "llvm.loongarch.lasx.xvbitrev.w" => "__builtin_lasx_xvbitrev_w",
+    "llvm.loongarch.lasx.xvbitrevi.b" => "__builtin_lasx_xvbitrevi_b",
+    "llvm.loongarch.lasx.xvbitrevi.d" => "__builtin_lasx_xvbitrevi_d",
+    "llvm.loongarch.lasx.xvbitrevi.h" => "__builtin_lasx_xvbitrevi_h",
+    "llvm.loongarch.lasx.xvbitrevi.w" => "__builtin_lasx_xvbitrevi_w",
+    "llvm.loongarch.lasx.xvbitsel.v" => "__builtin_lasx_xvbitsel_v",
+    "llvm.loongarch.lasx.xvbitseli.b" => "__builtin_lasx_xvbitseli_b",
+    "llvm.loongarch.lasx.xvbitset.b" => "__builtin_lasx_xvbitset_b",
+    "llvm.loongarch.lasx.xvbitset.d" => "__builtin_lasx_xvbitset_d",
+    "llvm.loongarch.lasx.xvbitset.h" => "__builtin_lasx_xvbitset_h",
+    "llvm.loongarch.lasx.xvbitset.w" => "__builtin_lasx_xvbitset_w",
+    "llvm.loongarch.lasx.xvbitseti.b" => "__builtin_lasx_xvbitseti_b",
+    "llvm.loongarch.lasx.xvbitseti.d" => "__builtin_lasx_xvbitseti_d",
+    "llvm.loongarch.lasx.xvbitseti.h" => "__builtin_lasx_xvbitseti_h",
+    "llvm.loongarch.lasx.xvbitseti.w" => "__builtin_lasx_xvbitseti_w",
+    "llvm.loongarch.lasx.xvbsll.v" => "__builtin_lasx_xvbsll_v",
+    "llvm.loongarch.lasx.xvbsrl.v" => "__builtin_lasx_xvbsrl_v",
+    "llvm.loongarch.lasx.xvclo.b" => "__builtin_lasx_xvclo_b",
+    "llvm.loongarch.lasx.xvclo.d" => "__builtin_lasx_xvclo_d",
+    "llvm.loongarch.lasx.xvclo.h" => "__builtin_lasx_xvclo_h",
+    "llvm.loongarch.lasx.xvclo.w" => "__builtin_lasx_xvclo_w",
+    "llvm.loongarch.lasx.xvclz.b" => "__builtin_lasx_xvclz_b",
+    "llvm.loongarch.lasx.xvclz.d" => "__builtin_lasx_xvclz_d",
+    "llvm.loongarch.lasx.xvclz.h" => "__builtin_lasx_xvclz_h",
+    "llvm.loongarch.lasx.xvclz.w" => "__builtin_lasx_xvclz_w",
+    "llvm.loongarch.lasx.xvdiv.b" => "__builtin_lasx_xvdiv_b",
+    "llvm.loongarch.lasx.xvdiv.bu" => "__builtin_lasx_xvdiv_bu",
+    "llvm.loongarch.lasx.xvdiv.d" => "__builtin_lasx_xvdiv_d",
+    "llvm.loongarch.lasx.xvdiv.du" => "__builtin_lasx_xvdiv_du",
+    "llvm.loongarch.lasx.xvdiv.h" => "__builtin_lasx_xvdiv_h",
+    "llvm.loongarch.lasx.xvdiv.hu" => "__builtin_lasx_xvdiv_hu",
+    "llvm.loongarch.lasx.xvdiv.w" => "__builtin_lasx_xvdiv_w",
+    "llvm.loongarch.lasx.xvdiv.wu" => "__builtin_lasx_xvdiv_wu",
+    "llvm.loongarch.lasx.xvexth.d.w" => "__builtin_lasx_xvexth_d_w",
+    "llvm.loongarch.lasx.xvexth.du.wu" => "__builtin_lasx_xvexth_du_wu",
+    "llvm.loongarch.lasx.xvexth.h.b" => "__builtin_lasx_xvexth_h_b",
+    "llvm.loongarch.lasx.xvexth.hu.bu" => "__builtin_lasx_xvexth_hu_bu",
+    "llvm.loongarch.lasx.xvexth.q.d" => "__builtin_lasx_xvexth_q_d",
+    "llvm.loongarch.lasx.xvexth.qu.du" => "__builtin_lasx_xvexth_qu_du",
+    "llvm.loongarch.lasx.xvexth.w.h" => "__builtin_lasx_xvexth_w_h",
+    "llvm.loongarch.lasx.xvexth.wu.hu" => "__builtin_lasx_xvexth_wu_hu",
+    "llvm.loongarch.lasx.xvextl.q.d" => "__builtin_lasx_xvextl_q_d",
+    "llvm.loongarch.lasx.xvextl.qu.du" => "__builtin_lasx_xvextl_qu_du",
+    "llvm.loongarch.lasx.xvextrins.b" => "__builtin_lasx_xvextrins_b",
+    "llvm.loongarch.lasx.xvextrins.d" => "__builtin_lasx_xvextrins_d",
+    "llvm.loongarch.lasx.xvextrins.h" => "__builtin_lasx_xvextrins_h",
+    "llvm.loongarch.lasx.xvextrins.w" => "__builtin_lasx_xvextrins_w",
+    "llvm.loongarch.lasx.xvfadd.d" => "__builtin_lasx_xvfadd_d",
+    "llvm.loongarch.lasx.xvfadd.s" => "__builtin_lasx_xvfadd_s",
+    "llvm.loongarch.lasx.xvfclass.d" => "__builtin_lasx_xvfclass_d",
+    "llvm.loongarch.lasx.xvfclass.s" => "__builtin_lasx_xvfclass_s",
+    "llvm.loongarch.lasx.xvfcmp.caf.d" => "__builtin_lasx_xvfcmp_caf_d",
+    "llvm.loongarch.lasx.xvfcmp.caf.s" => "__builtin_lasx_xvfcmp_caf_s",
+    "llvm.loongarch.lasx.xvfcmp.ceq.d" => "__builtin_lasx_xvfcmp_ceq_d",
+    "llvm.loongarch.lasx.xvfcmp.ceq.s" => "__builtin_lasx_xvfcmp_ceq_s",
+    "llvm.loongarch.lasx.xvfcmp.cle.d" => "__builtin_lasx_xvfcmp_cle_d",
+    "llvm.loongarch.lasx.xvfcmp.cle.s" => "__builtin_lasx_xvfcmp_cle_s",
+    "llvm.loongarch.lasx.xvfcmp.clt.d" => "__builtin_lasx_xvfcmp_clt_d",
+    "llvm.loongarch.lasx.xvfcmp.clt.s" => "__builtin_lasx_xvfcmp_clt_s",
+    "llvm.loongarch.lasx.xvfcmp.cne.d" => "__builtin_lasx_xvfcmp_cne_d",
+    "llvm.loongarch.lasx.xvfcmp.cne.s" => "__builtin_lasx_xvfcmp_cne_s",
+    "llvm.loongarch.lasx.xvfcmp.cor.d" => "__builtin_lasx_xvfcmp_cor_d",
+    "llvm.loongarch.lasx.xvfcmp.cor.s" => "__builtin_lasx_xvfcmp_cor_s",
+    "llvm.loongarch.lasx.xvfcmp.cueq.d" => "__builtin_lasx_xvfcmp_cueq_d",
+    "llvm.loongarch.lasx.xvfcmp.cueq.s" => "__builtin_lasx_xvfcmp_cueq_s",
+    "llvm.loongarch.lasx.xvfcmp.cule.d" => "__builtin_lasx_xvfcmp_cule_d",
+    "llvm.loongarch.lasx.xvfcmp.cule.s" => "__builtin_lasx_xvfcmp_cule_s",
+    "llvm.loongarch.lasx.xvfcmp.cult.d" => "__builtin_lasx_xvfcmp_cult_d",
+    "llvm.loongarch.lasx.xvfcmp.cult.s" => "__builtin_lasx_xvfcmp_cult_s",
+    "llvm.loongarch.lasx.xvfcmp.cun.d" => "__builtin_lasx_xvfcmp_cun_d",
+    "llvm.loongarch.lasx.xvfcmp.cun.s" => "__builtin_lasx_xvfcmp_cun_s",
+    "llvm.loongarch.lasx.xvfcmp.cune.d" => "__builtin_lasx_xvfcmp_cune_d",
+    "llvm.loongarch.lasx.xvfcmp.cune.s" => "__builtin_lasx_xvfcmp_cune_s",
+    "llvm.loongarch.lasx.xvfcmp.saf.d" => "__builtin_lasx_xvfcmp_saf_d",
+    "llvm.loongarch.lasx.xvfcmp.saf.s" => "__builtin_lasx_xvfcmp_saf_s",
+    "llvm.loongarch.lasx.xvfcmp.seq.d" => "__builtin_lasx_xvfcmp_seq_d",
+    "llvm.loongarch.lasx.xvfcmp.seq.s" => "__builtin_lasx_xvfcmp_seq_s",
+    "llvm.loongarch.lasx.xvfcmp.sle.d" => "__builtin_lasx_xvfcmp_sle_d",
+    "llvm.loongarch.lasx.xvfcmp.sle.s" => "__builtin_lasx_xvfcmp_sle_s",
+    "llvm.loongarch.lasx.xvfcmp.slt.d" => "__builtin_lasx_xvfcmp_slt_d",
+    "llvm.loongarch.lasx.xvfcmp.slt.s" => "__builtin_lasx_xvfcmp_slt_s",
+    "llvm.loongarch.lasx.xvfcmp.sne.d" => "__builtin_lasx_xvfcmp_sne_d",
+    "llvm.loongarch.lasx.xvfcmp.sne.s" => "__builtin_lasx_xvfcmp_sne_s",
+    "llvm.loongarch.lasx.xvfcmp.sor.d" => "__builtin_lasx_xvfcmp_sor_d",
+    "llvm.loongarch.lasx.xvfcmp.sor.s" => "__builtin_lasx_xvfcmp_sor_s",
+    "llvm.loongarch.lasx.xvfcmp.sueq.d" => "__builtin_lasx_xvfcmp_sueq_d",
+    "llvm.loongarch.lasx.xvfcmp.sueq.s" => "__builtin_lasx_xvfcmp_sueq_s",
+    "llvm.loongarch.lasx.xvfcmp.sule.d" => "__builtin_lasx_xvfcmp_sule_d",
+    "llvm.loongarch.lasx.xvfcmp.sule.s" => "__builtin_lasx_xvfcmp_sule_s",
+    "llvm.loongarch.lasx.xvfcmp.sult.d" => "__builtin_lasx_xvfcmp_sult_d",
+    "llvm.loongarch.lasx.xvfcmp.sult.s" => "__builtin_lasx_xvfcmp_sult_s",
+    "llvm.loongarch.lasx.xvfcmp.sun.d" => "__builtin_lasx_xvfcmp_sun_d",
+    "llvm.loongarch.lasx.xvfcmp.sun.s" => "__builtin_lasx_xvfcmp_sun_s",
+    "llvm.loongarch.lasx.xvfcmp.sune.d" => "__builtin_lasx_xvfcmp_sune_d",
+    "llvm.loongarch.lasx.xvfcmp.sune.s" => "__builtin_lasx_xvfcmp_sune_s",
+    "llvm.loongarch.lasx.xvfcvt.h.s" => "__builtin_lasx_xvfcvt_h_s",
+    "llvm.loongarch.lasx.xvfcvt.s.d" => "__builtin_lasx_xvfcvt_s_d",
+    "llvm.loongarch.lasx.xvfcvth.d.s" => "__builtin_lasx_xvfcvth_d_s",
+    "llvm.loongarch.lasx.xvfcvth.s.h" => "__builtin_lasx_xvfcvth_s_h",
+    "llvm.loongarch.lasx.xvfcvtl.d.s" => "__builtin_lasx_xvfcvtl_d_s",
+    "llvm.loongarch.lasx.xvfcvtl.s.h" => "__builtin_lasx_xvfcvtl_s_h",
+    "llvm.loongarch.lasx.xvfdiv.d" => "__builtin_lasx_xvfdiv_d",
+    "llvm.loongarch.lasx.xvfdiv.s" => "__builtin_lasx_xvfdiv_s",
+    "llvm.loongarch.lasx.xvffint.d.l" => "__builtin_lasx_xvffint_d_l",
+    "llvm.loongarch.lasx.xvffint.d.lu" => "__builtin_lasx_xvffint_d_lu",
+    "llvm.loongarch.lasx.xvffint.s.l" => "__builtin_lasx_xvffint_s_l",
+    "llvm.loongarch.lasx.xvffint.s.w" => "__builtin_lasx_xvffint_s_w",
+    "llvm.loongarch.lasx.xvffint.s.wu" => "__builtin_lasx_xvffint_s_wu",
+    "llvm.loongarch.lasx.xvffinth.d.w" => "__builtin_lasx_xvffinth_d_w",
+    "llvm.loongarch.lasx.xvffintl.d.w" => "__builtin_lasx_xvffintl_d_w",
+    "llvm.loongarch.lasx.xvflogb.d" => "__builtin_lasx_xvflogb_d",
+    "llvm.loongarch.lasx.xvflogb.s" => "__builtin_lasx_xvflogb_s",
+    "llvm.loongarch.lasx.xvfmadd.d" => "__builtin_lasx_xvfmadd_d",
+    "llvm.loongarch.lasx.xvfmadd.s" => "__builtin_lasx_xvfmadd_s",
+    "llvm.loongarch.lasx.xvfmax.d" => "__builtin_lasx_xvfmax_d",
+    "llvm.loongarch.lasx.xvfmax.s" => "__builtin_lasx_xvfmax_s",
+    "llvm.loongarch.lasx.xvfmaxa.d" => "__builtin_lasx_xvfmaxa_d",
+    "llvm.loongarch.lasx.xvfmaxa.s" => "__builtin_lasx_xvfmaxa_s",
+    "llvm.loongarch.lasx.xvfmin.d" => "__builtin_lasx_xvfmin_d",
+    "llvm.loongarch.lasx.xvfmin.s" => "__builtin_lasx_xvfmin_s",
+    "llvm.loongarch.lasx.xvfmina.d" => "__builtin_lasx_xvfmina_d",
+    "llvm.loongarch.lasx.xvfmina.s" => "__builtin_lasx_xvfmina_s",
+    "llvm.loongarch.lasx.xvfmsub.d" => "__builtin_lasx_xvfmsub_d",
+    "llvm.loongarch.lasx.xvfmsub.s" => "__builtin_lasx_xvfmsub_s",
+    "llvm.loongarch.lasx.xvfmul.d" => "__builtin_lasx_xvfmul_d",
+    "llvm.loongarch.lasx.xvfmul.s" => "__builtin_lasx_xvfmul_s",
+    "llvm.loongarch.lasx.xvfnmadd.d" => "__builtin_lasx_xvfnmadd_d",
+    "llvm.loongarch.lasx.xvfnmadd.s" => "__builtin_lasx_xvfnmadd_s",
+    "llvm.loongarch.lasx.xvfnmsub.d" => "__builtin_lasx_xvfnmsub_d",
+    "llvm.loongarch.lasx.xvfnmsub.s" => "__builtin_lasx_xvfnmsub_s",
+    "llvm.loongarch.lasx.xvfrecip.d" => "__builtin_lasx_xvfrecip_d",
+    "llvm.loongarch.lasx.xvfrecip.s" => "__builtin_lasx_xvfrecip_s",
+    "llvm.loongarch.lasx.xvfrecipe.d" => "__builtin_lasx_xvfrecipe_d",
+    "llvm.loongarch.lasx.xvfrecipe.s" => "__builtin_lasx_xvfrecipe_s",
+    "llvm.loongarch.lasx.xvfrint.d" => "__builtin_lasx_xvfrint_d",
+    "llvm.loongarch.lasx.xvfrint.s" => "__builtin_lasx_xvfrint_s",
+    "llvm.loongarch.lasx.xvfrintrm.d" => "__builtin_lasx_xvfrintrm_d",
+    "llvm.loongarch.lasx.xvfrintrm.s" => "__builtin_lasx_xvfrintrm_s",
+    "llvm.loongarch.lasx.xvfrintrne.d" => "__builtin_lasx_xvfrintrne_d",
+    "llvm.loongarch.lasx.xvfrintrne.s" => "__builtin_lasx_xvfrintrne_s",
+    "llvm.loongarch.lasx.xvfrintrp.d" => "__builtin_lasx_xvfrintrp_d",
+    "llvm.loongarch.lasx.xvfrintrp.s" => "__builtin_lasx_xvfrintrp_s",
+    "llvm.loongarch.lasx.xvfrintrz.d" => "__builtin_lasx_xvfrintrz_d",
+    "llvm.loongarch.lasx.xvfrintrz.s" => "__builtin_lasx_xvfrintrz_s",
+    "llvm.loongarch.lasx.xvfrsqrt.d" => "__builtin_lasx_xvfrsqrt_d",
+    "llvm.loongarch.lasx.xvfrsqrt.s" => "__builtin_lasx_xvfrsqrt_s",
+    "llvm.loongarch.lasx.xvfrsqrte.d" => "__builtin_lasx_xvfrsqrte_d",
+    "llvm.loongarch.lasx.xvfrsqrte.s" => "__builtin_lasx_xvfrsqrte_s",
+    "llvm.loongarch.lasx.xvfrstp.b" => "__builtin_lasx_xvfrstp_b",
+    "llvm.loongarch.lasx.xvfrstp.h" => "__builtin_lasx_xvfrstp_h",
+    "llvm.loongarch.lasx.xvfrstpi.b" => "__builtin_lasx_xvfrstpi_b",
+    "llvm.loongarch.lasx.xvfrstpi.h" => "__builtin_lasx_xvfrstpi_h",
+    "llvm.loongarch.lasx.xvfsqrt.d" => "__builtin_lasx_xvfsqrt_d",
+    "llvm.loongarch.lasx.xvfsqrt.s" => "__builtin_lasx_xvfsqrt_s",
+    "llvm.loongarch.lasx.xvfsub.d" => "__builtin_lasx_xvfsub_d",
+    "llvm.loongarch.lasx.xvfsub.s" => "__builtin_lasx_xvfsub_s",
+    "llvm.loongarch.lasx.xvftint.l.d" => "__builtin_lasx_xvftint_l_d",
+    "llvm.loongarch.lasx.xvftint.lu.d" => "__builtin_lasx_xvftint_lu_d",
+    "llvm.loongarch.lasx.xvftint.w.d" => "__builtin_lasx_xvftint_w_d",
+    "llvm.loongarch.lasx.xvftint.w.s" => "__builtin_lasx_xvftint_w_s",
+    "llvm.loongarch.lasx.xvftint.wu.s" => "__builtin_lasx_xvftint_wu_s",
+    "llvm.loongarch.lasx.xvftinth.l.s" => "__builtin_lasx_xvftinth_l_s",
+    "llvm.loongarch.lasx.xvftintl.l.s" => "__builtin_lasx_xvftintl_l_s",
+    "llvm.loongarch.lasx.xvftintrm.l.d" => "__builtin_lasx_xvftintrm_l_d",
+    "llvm.loongarch.lasx.xvftintrm.w.d" => "__builtin_lasx_xvftintrm_w_d",
+    "llvm.loongarch.lasx.xvftintrm.w.s" => "__builtin_lasx_xvftintrm_w_s",
+    "llvm.loongarch.lasx.xvftintrmh.l.s" => "__builtin_lasx_xvftintrmh_l_s",
+    "llvm.loongarch.lasx.xvftintrml.l.s" => "__builtin_lasx_xvftintrml_l_s",
+    "llvm.loongarch.lasx.xvftintrne.l.d" => "__builtin_lasx_xvftintrne_l_d",
+    "llvm.loongarch.lasx.xvftintrne.w.d" => "__builtin_lasx_xvftintrne_w_d",
+    "llvm.loongarch.lasx.xvftintrne.w.s" => "__builtin_lasx_xvftintrne_w_s",
+    "llvm.loongarch.lasx.xvftintrneh.l.s" => "__builtin_lasx_xvftintrneh_l_s",
+    "llvm.loongarch.lasx.xvftintrnel.l.s" => "__builtin_lasx_xvftintrnel_l_s",
+    "llvm.loongarch.lasx.xvftintrp.l.d" => "__builtin_lasx_xvftintrp_l_d",
+    "llvm.loongarch.lasx.xvftintrp.w.d" => "__builtin_lasx_xvftintrp_w_d",
+    "llvm.loongarch.lasx.xvftintrp.w.s" => "__builtin_lasx_xvftintrp_w_s",
+    "llvm.loongarch.lasx.xvftintrph.l.s" => "__builtin_lasx_xvftintrph_l_s",
+    "llvm.loongarch.lasx.xvftintrpl.l.s" => "__builtin_lasx_xvftintrpl_l_s",
+    "llvm.loongarch.lasx.xvftintrz.l.d" => "__builtin_lasx_xvftintrz_l_d",
+    "llvm.loongarch.lasx.xvftintrz.lu.d" => "__builtin_lasx_xvftintrz_lu_d",
+    "llvm.loongarch.lasx.xvftintrz.w.d" => "__builtin_lasx_xvftintrz_w_d",
+    "llvm.loongarch.lasx.xvftintrz.w.s" => "__builtin_lasx_xvftintrz_w_s",
+    "llvm.loongarch.lasx.xvftintrz.wu.s" => "__builtin_lasx_xvftintrz_wu_s",
+    "llvm.loongarch.lasx.xvftintrzh.l.s" => "__builtin_lasx_xvftintrzh_l_s",
+    "llvm.loongarch.lasx.xvftintrzl.l.s" => "__builtin_lasx_xvftintrzl_l_s",
+    "llvm.loongarch.lasx.xvhaddw.d.w" => "__builtin_lasx_xvhaddw_d_w",
+    "llvm.loongarch.lasx.xvhaddw.du.wu" => "__builtin_lasx_xvhaddw_du_wu",
+    "llvm.loongarch.lasx.xvhaddw.h.b" => "__builtin_lasx_xvhaddw_h_b",
+    "llvm.loongarch.lasx.xvhaddw.hu.bu" => "__builtin_lasx_xvhaddw_hu_bu",
+    "llvm.loongarch.lasx.xvhaddw.q.d" => "__builtin_lasx_xvhaddw_q_d",
+    "llvm.loongarch.lasx.xvhaddw.qu.du" => "__builtin_lasx_xvhaddw_qu_du",
+    "llvm.loongarch.lasx.xvhaddw.w.h" => "__builtin_lasx_xvhaddw_w_h",
+    "llvm.loongarch.lasx.xvhaddw.wu.hu" => "__builtin_lasx_xvhaddw_wu_hu",
+    "llvm.loongarch.lasx.xvhsubw.d.w" => "__builtin_lasx_xvhsubw_d_w",
+    "llvm.loongarch.lasx.xvhsubw.du.wu" => "__builtin_lasx_xvhsubw_du_wu",
+    "llvm.loongarch.lasx.xvhsubw.h.b" => "__builtin_lasx_xvhsubw_h_b",
+    "llvm.loongarch.lasx.xvhsubw.hu.bu" => "__builtin_lasx_xvhsubw_hu_bu",
+    "llvm.loongarch.lasx.xvhsubw.q.d" => "__builtin_lasx_xvhsubw_q_d",
+    "llvm.loongarch.lasx.xvhsubw.qu.du" => "__builtin_lasx_xvhsubw_qu_du",
+    "llvm.loongarch.lasx.xvhsubw.w.h" => "__builtin_lasx_xvhsubw_w_h",
+    "llvm.loongarch.lasx.xvhsubw.wu.hu" => "__builtin_lasx_xvhsubw_wu_hu",
+    "llvm.loongarch.lasx.xvilvh.b" => "__builtin_lasx_xvilvh_b",
+    "llvm.loongarch.lasx.xvilvh.d" => "__builtin_lasx_xvilvh_d",
+    "llvm.loongarch.lasx.xvilvh.h" => "__builtin_lasx_xvilvh_h",
+    "llvm.loongarch.lasx.xvilvh.w" => "__builtin_lasx_xvilvh_w",
+    "llvm.loongarch.lasx.xvilvl.b" => "__builtin_lasx_xvilvl_b",
+    "llvm.loongarch.lasx.xvilvl.d" => "__builtin_lasx_xvilvl_d",
+    "llvm.loongarch.lasx.xvilvl.h" => "__builtin_lasx_xvilvl_h",
+    "llvm.loongarch.lasx.xvilvl.w" => "__builtin_lasx_xvilvl_w",
+    "llvm.loongarch.lasx.xvinsgr2vr.d" => "__builtin_lasx_xvinsgr2vr_d",
+    "llvm.loongarch.lasx.xvinsgr2vr.w" => "__builtin_lasx_xvinsgr2vr_w",
+    "llvm.loongarch.lasx.xvinsve0.d" => "__builtin_lasx_xvinsve0_d",
+    "llvm.loongarch.lasx.xvinsve0.w" => "__builtin_lasx_xvinsve0_w",
+    "llvm.loongarch.lasx.xvld" => "__builtin_lasx_xvld",
+    "llvm.loongarch.lasx.xvldi" => "__builtin_lasx_xvldi",
+    "llvm.loongarch.lasx.xvldrepl.b" => "__builtin_lasx_xvldrepl_b",
+    "llvm.loongarch.lasx.xvldrepl.d" => "__builtin_lasx_xvldrepl_d",
+    "llvm.loongarch.lasx.xvldrepl.h" => "__builtin_lasx_xvldrepl_h",
+    "llvm.loongarch.lasx.xvldrepl.w" => "__builtin_lasx_xvldrepl_w",
+    "llvm.loongarch.lasx.xvldx" => "__builtin_lasx_xvldx",
+    "llvm.loongarch.lasx.xvmadd.b" => "__builtin_lasx_xvmadd_b",
+    "llvm.loongarch.lasx.xvmadd.d" => "__builtin_lasx_xvmadd_d",
+    "llvm.loongarch.lasx.xvmadd.h" => "__builtin_lasx_xvmadd_h",
+    "llvm.loongarch.lasx.xvmadd.w" => "__builtin_lasx_xvmadd_w",
+    "llvm.loongarch.lasx.xvmaddwev.d.w" => "__builtin_lasx_xvmaddwev_d_w",
+    "llvm.loongarch.lasx.xvmaddwev.d.wu" => "__builtin_lasx_xvmaddwev_d_wu",
+    "llvm.loongarch.lasx.xvmaddwev.d.wu.w" => "__builtin_lasx_xvmaddwev_d_wu_w",
+    "llvm.loongarch.lasx.xvmaddwev.h.b" => "__builtin_lasx_xvmaddwev_h_b",
+    "llvm.loongarch.lasx.xvmaddwev.h.bu" => "__builtin_lasx_xvmaddwev_h_bu",
+    "llvm.loongarch.lasx.xvmaddwev.h.bu.b" => "__builtin_lasx_xvmaddwev_h_bu_b",
+    "llvm.loongarch.lasx.xvmaddwev.q.d" => "__builtin_lasx_xvmaddwev_q_d",
+    "llvm.loongarch.lasx.xvmaddwev.q.du" => "__builtin_lasx_xvmaddwev_q_du",
+    "llvm.loongarch.lasx.xvmaddwev.q.du.d" => "__builtin_lasx_xvmaddwev_q_du_d",
+    "llvm.loongarch.lasx.xvmaddwev.w.h" => "__builtin_lasx_xvmaddwev_w_h",
+    "llvm.loongarch.lasx.xvmaddwev.w.hu" => "__builtin_lasx_xvmaddwev_w_hu",
+    "llvm.loongarch.lasx.xvmaddwev.w.hu.h" => "__builtin_lasx_xvmaddwev_w_hu_h",
+    "llvm.loongarch.lasx.xvmaddwod.d.w" => "__builtin_lasx_xvmaddwod_d_w",
+    "llvm.loongarch.lasx.xvmaddwod.d.wu" => "__builtin_lasx_xvmaddwod_d_wu",
+    "llvm.loongarch.lasx.xvmaddwod.d.wu.w" => "__builtin_lasx_xvmaddwod_d_wu_w",
+    "llvm.loongarch.lasx.xvmaddwod.h.b" => "__builtin_lasx_xvmaddwod_h_b",
+    "llvm.loongarch.lasx.xvmaddwod.h.bu" => "__builtin_lasx_xvmaddwod_h_bu",
+    "llvm.loongarch.lasx.xvmaddwod.h.bu.b" => "__builtin_lasx_xvmaddwod_h_bu_b",
+    "llvm.loongarch.lasx.xvmaddwod.q.d" => "__builtin_lasx_xvmaddwod_q_d",
+    "llvm.loongarch.lasx.xvmaddwod.q.du" => "__builtin_lasx_xvmaddwod_q_du",
+    "llvm.loongarch.lasx.xvmaddwod.q.du.d" => "__builtin_lasx_xvmaddwod_q_du_d",
+    "llvm.loongarch.lasx.xvmaddwod.w.h" => "__builtin_lasx_xvmaddwod_w_h",
+    "llvm.loongarch.lasx.xvmaddwod.w.hu" => "__builtin_lasx_xvmaddwod_w_hu",
+    "llvm.loongarch.lasx.xvmaddwod.w.hu.h" => "__builtin_lasx_xvmaddwod_w_hu_h",
+    "llvm.loongarch.lasx.xvmax.b" => "__builtin_lasx_xvmax_b",
+    "llvm.loongarch.lasx.xvmax.bu" => "__builtin_lasx_xvmax_bu",
+    "llvm.loongarch.lasx.xvmax.d" => "__builtin_lasx_xvmax_d",
+    "llvm.loongarch.lasx.xvmax.du" => "__builtin_lasx_xvmax_du",
+    "llvm.loongarch.lasx.xvmax.h" => "__builtin_lasx_xvmax_h",
+    "llvm.loongarch.lasx.xvmax.hu" => "__builtin_lasx_xvmax_hu",
+    "llvm.loongarch.lasx.xvmax.w" => "__builtin_lasx_xvmax_w",
+    "llvm.loongarch.lasx.xvmax.wu" => "__builtin_lasx_xvmax_wu",
+    "llvm.loongarch.lasx.xvmaxi.b" => "__builtin_lasx_xvmaxi_b",
+    "llvm.loongarch.lasx.xvmaxi.bu" => "__builtin_lasx_xvmaxi_bu",
+    "llvm.loongarch.lasx.xvmaxi.d" => "__builtin_lasx_xvmaxi_d",
+    "llvm.loongarch.lasx.xvmaxi.du" => "__builtin_lasx_xvmaxi_du",
+    "llvm.loongarch.lasx.xvmaxi.h" => "__builtin_lasx_xvmaxi_h",
+    "llvm.loongarch.lasx.xvmaxi.hu" => "__builtin_lasx_xvmaxi_hu",
+    "llvm.loongarch.lasx.xvmaxi.w" => "__builtin_lasx_xvmaxi_w",
+    "llvm.loongarch.lasx.xvmaxi.wu" => "__builtin_lasx_xvmaxi_wu",
+    "llvm.loongarch.lasx.xvmin.b" => "__builtin_lasx_xvmin_b",
+    "llvm.loongarch.lasx.xvmin.bu" => "__builtin_lasx_xvmin_bu",
+    "llvm.loongarch.lasx.xvmin.d" => "__builtin_lasx_xvmin_d",
+    "llvm.loongarch.lasx.xvmin.du" => "__builtin_lasx_xvmin_du",
+    "llvm.loongarch.lasx.xvmin.h" => "__builtin_lasx_xvmin_h",
+    "llvm.loongarch.lasx.xvmin.hu" => "__builtin_lasx_xvmin_hu",
+    "llvm.loongarch.lasx.xvmin.w" => "__builtin_lasx_xvmin_w",
+    "llvm.loongarch.lasx.xvmin.wu" => "__builtin_lasx_xvmin_wu",
+    "llvm.loongarch.lasx.xvmini.b" => "__builtin_lasx_xvmini_b",
+    "llvm.loongarch.lasx.xvmini.bu" => "__builtin_lasx_xvmini_bu",
+    "llvm.loongarch.lasx.xvmini.d" => "__builtin_lasx_xvmini_d",
+    "llvm.loongarch.lasx.xvmini.du" => "__builtin_lasx_xvmini_du",
+    "llvm.loongarch.lasx.xvmini.h" => "__builtin_lasx_xvmini_h",
+    "llvm.loongarch.lasx.xvmini.hu" => "__builtin_lasx_xvmini_hu",
+    "llvm.loongarch.lasx.xvmini.w" => "__builtin_lasx_xvmini_w",
+    "llvm.loongarch.lasx.xvmini.wu" => "__builtin_lasx_xvmini_wu",
+    "llvm.loongarch.lasx.xvmod.b" => "__builtin_lasx_xvmod_b",
+    "llvm.loongarch.lasx.xvmod.bu" => "__builtin_lasx_xvmod_bu",
+    "llvm.loongarch.lasx.xvmod.d" => "__builtin_lasx_xvmod_d",
+    "llvm.loongarch.lasx.xvmod.du" => "__builtin_lasx_xvmod_du",
+    "llvm.loongarch.lasx.xvmod.h" => "__builtin_lasx_xvmod_h",
+    "llvm.loongarch.lasx.xvmod.hu" => "__builtin_lasx_xvmod_hu",
+    "llvm.loongarch.lasx.xvmod.w" => "__builtin_lasx_xvmod_w",
+    "llvm.loongarch.lasx.xvmod.wu" => "__builtin_lasx_xvmod_wu",
+    "llvm.loongarch.lasx.xvmskgez.b" => "__builtin_lasx_xvmskgez_b",
+    "llvm.loongarch.lasx.xvmskltz.b" => "__builtin_lasx_xvmskltz_b",
+    "llvm.loongarch.lasx.xvmskltz.d" => "__builtin_lasx_xvmskltz_d",
+    "llvm.loongarch.lasx.xvmskltz.h" => "__builtin_lasx_xvmskltz_h",
+    "llvm.loongarch.lasx.xvmskltz.w" => "__builtin_lasx_xvmskltz_w",
+    "llvm.loongarch.lasx.xvmsknz.b" => "__builtin_lasx_xvmsknz_b",
+    "llvm.loongarch.lasx.xvmsub.b" => "__builtin_lasx_xvmsub_b",
+    "llvm.loongarch.lasx.xvmsub.d" => "__builtin_lasx_xvmsub_d",
+    "llvm.loongarch.lasx.xvmsub.h" => "__builtin_lasx_xvmsub_h",
+    "llvm.loongarch.lasx.xvmsub.w" => "__builtin_lasx_xvmsub_w",
+    "llvm.loongarch.lasx.xvmuh.b" => "__builtin_lasx_xvmuh_b",
+    "llvm.loongarch.lasx.xvmuh.bu" => "__builtin_lasx_xvmuh_bu",
+    "llvm.loongarch.lasx.xvmuh.d" => "__builtin_lasx_xvmuh_d",
+    "llvm.loongarch.lasx.xvmuh.du" => "__builtin_lasx_xvmuh_du",
+    "llvm.loongarch.lasx.xvmuh.h" => "__builtin_lasx_xvmuh_h",
+    "llvm.loongarch.lasx.xvmuh.hu" => "__builtin_lasx_xvmuh_hu",
+    "llvm.loongarch.lasx.xvmuh.w" => "__builtin_lasx_xvmuh_w",
+    "llvm.loongarch.lasx.xvmuh.wu" => "__builtin_lasx_xvmuh_wu",
+    "llvm.loongarch.lasx.xvmul.b" => "__builtin_lasx_xvmul_b",
+    "llvm.loongarch.lasx.xvmul.d" => "__builtin_lasx_xvmul_d",
+    "llvm.loongarch.lasx.xvmul.h" => "__builtin_lasx_xvmul_h",
+    "llvm.loongarch.lasx.xvmul.w" => "__builtin_lasx_xvmul_w",
+    "llvm.loongarch.lasx.xvmulwev.d.w" => "__builtin_lasx_xvmulwev_d_w",
+    "llvm.loongarch.lasx.xvmulwev.d.wu" => "__builtin_lasx_xvmulwev_d_wu",
+    "llvm.loongarch.lasx.xvmulwev.d.wu.w" => "__builtin_lasx_xvmulwev_d_wu_w",
+    "llvm.loongarch.lasx.xvmulwev.h.b" => "__builtin_lasx_xvmulwev_h_b",
+    "llvm.loongarch.lasx.xvmulwev.h.bu" => "__builtin_lasx_xvmulwev_h_bu",
+    "llvm.loongarch.lasx.xvmulwev.h.bu.b" => "__builtin_lasx_xvmulwev_h_bu_b",
+    "llvm.loongarch.lasx.xvmulwev.q.d" => "__builtin_lasx_xvmulwev_q_d",
+    "llvm.loongarch.lasx.xvmulwev.q.du" => "__builtin_lasx_xvmulwev_q_du",
+    "llvm.loongarch.lasx.xvmulwev.q.du.d" => "__builtin_lasx_xvmulwev_q_du_d",
+    "llvm.loongarch.lasx.xvmulwev.w.h" => "__builtin_lasx_xvmulwev_w_h",
+    "llvm.loongarch.lasx.xvmulwev.w.hu" => "__builtin_lasx_xvmulwev_w_hu",
+    "llvm.loongarch.lasx.xvmulwev.w.hu.h" => "__builtin_lasx_xvmulwev_w_hu_h",
+    "llvm.loongarch.lasx.xvmulwod.d.w" => "__builtin_lasx_xvmulwod_d_w",
+    "llvm.loongarch.lasx.xvmulwod.d.wu" => "__builtin_lasx_xvmulwod_d_wu",
+    "llvm.loongarch.lasx.xvmulwod.d.wu.w" => "__builtin_lasx_xvmulwod_d_wu_w",
+    "llvm.loongarch.lasx.xvmulwod.h.b" => "__builtin_lasx_xvmulwod_h_b",
+    "llvm.loongarch.lasx.xvmulwod.h.bu" => "__builtin_lasx_xvmulwod_h_bu",
+    "llvm.loongarch.lasx.xvmulwod.h.bu.b" => "__builtin_lasx_xvmulwod_h_bu_b",
+    "llvm.loongarch.lasx.xvmulwod.q.d" => "__builtin_lasx_xvmulwod_q_d",
+    "llvm.loongarch.lasx.xvmulwod.q.du" => "__builtin_lasx_xvmulwod_q_du",
+    "llvm.loongarch.lasx.xvmulwod.q.du.d" => "__builtin_lasx_xvmulwod_q_du_d",
+    "llvm.loongarch.lasx.xvmulwod.w.h" => "__builtin_lasx_xvmulwod_w_h",
+    "llvm.loongarch.lasx.xvmulwod.w.hu" => "__builtin_lasx_xvmulwod_w_hu",
+    "llvm.loongarch.lasx.xvmulwod.w.hu.h" => "__builtin_lasx_xvmulwod_w_hu_h",
+    "llvm.loongarch.lasx.xvneg.b" => "__builtin_lasx_xvneg_b",
+    "llvm.loongarch.lasx.xvneg.d" => "__builtin_lasx_xvneg_d",
+    "llvm.loongarch.lasx.xvneg.h" => "__builtin_lasx_xvneg_h",
+    "llvm.loongarch.lasx.xvneg.w" => "__builtin_lasx_xvneg_w",
+    "llvm.loongarch.lasx.xvnor.v" => "__builtin_lasx_xvnor_v",
+    "llvm.loongarch.lasx.xvnori.b" => "__builtin_lasx_xvnori_b",
+    "llvm.loongarch.lasx.xvor.v" => "__builtin_lasx_xvor_v",
+    "llvm.loongarch.lasx.xvori.b" => "__builtin_lasx_xvori_b",
+    "llvm.loongarch.lasx.xvorn.v" => "__builtin_lasx_xvorn_v",
+    "llvm.loongarch.lasx.xvpackev.b" => "__builtin_lasx_xvpackev_b",
+    "llvm.loongarch.lasx.xvpackev.d" => "__builtin_lasx_xvpackev_d",
+    "llvm.loongarch.lasx.xvpackev.h" => "__builtin_lasx_xvpackev_h",
+    "llvm.loongarch.lasx.xvpackev.w" => "__builtin_lasx_xvpackev_w",
+    "llvm.loongarch.lasx.xvpackod.b" => "__builtin_lasx_xvpackod_b",
+    "llvm.loongarch.lasx.xvpackod.d" => "__builtin_lasx_xvpackod_d",
+    "llvm.loongarch.lasx.xvpackod.h" => "__builtin_lasx_xvpackod_h",
+    "llvm.loongarch.lasx.xvpackod.w" => "__builtin_lasx_xvpackod_w",
+    "llvm.loongarch.lasx.xvpcnt.b" => "__builtin_lasx_xvpcnt_b",
+    "llvm.loongarch.lasx.xvpcnt.d" => "__builtin_lasx_xvpcnt_d",
+    "llvm.loongarch.lasx.xvpcnt.h" => "__builtin_lasx_xvpcnt_h",
+    "llvm.loongarch.lasx.xvpcnt.w" => "__builtin_lasx_xvpcnt_w",
+    "llvm.loongarch.lasx.xvperm.w" => "__builtin_lasx_xvperm_w",
+    "llvm.loongarch.lasx.xvpermi.d" => "__builtin_lasx_xvpermi_d",
+    "llvm.loongarch.lasx.xvpermi.q" => "__builtin_lasx_xvpermi_q",
+    "llvm.loongarch.lasx.xvpermi.w" => "__builtin_lasx_xvpermi_w",
+    "llvm.loongarch.lasx.xvpickev.b" => "__builtin_lasx_xvpickev_b",
+    "llvm.loongarch.lasx.xvpickev.d" => "__builtin_lasx_xvpickev_d",
+    "llvm.loongarch.lasx.xvpickev.h" => "__builtin_lasx_xvpickev_h",
+    "llvm.loongarch.lasx.xvpickev.w" => "__builtin_lasx_xvpickev_w",
+    "llvm.loongarch.lasx.xvpickod.b" => "__builtin_lasx_xvpickod_b",
+    "llvm.loongarch.lasx.xvpickod.d" => "__builtin_lasx_xvpickod_d",
+    "llvm.loongarch.lasx.xvpickod.h" => "__builtin_lasx_xvpickod_h",
+    "llvm.loongarch.lasx.xvpickod.w" => "__builtin_lasx_xvpickod_w",
+    "llvm.loongarch.lasx.xvpickve.d" => "__builtin_lasx_xvpickve_d",
+    "llvm.loongarch.lasx.xvpickve.d.f" => "__builtin_lasx_xvpickve_d_f",
+    "llvm.loongarch.lasx.xvpickve.w" => "__builtin_lasx_xvpickve_w",
+    "llvm.loongarch.lasx.xvpickve.w.f" => "__builtin_lasx_xvpickve_w_f",
+    "llvm.loongarch.lasx.xvpickve2gr.d" => "__builtin_lasx_xvpickve2gr_d",
+    "llvm.loongarch.lasx.xvpickve2gr.du" => "__builtin_lasx_xvpickve2gr_du",
+    "llvm.loongarch.lasx.xvpickve2gr.w" => "__builtin_lasx_xvpickve2gr_w",
+    "llvm.loongarch.lasx.xvpickve2gr.wu" => "__builtin_lasx_xvpickve2gr_wu",
+    "llvm.loongarch.lasx.xvrepl128vei.b" => "__builtin_lasx_xvrepl128vei_b",
+    "llvm.loongarch.lasx.xvrepl128vei.d" => "__builtin_lasx_xvrepl128vei_d",
+    "llvm.loongarch.lasx.xvrepl128vei.h" => "__builtin_lasx_xvrepl128vei_h",
+    "llvm.loongarch.lasx.xvrepl128vei.w" => "__builtin_lasx_xvrepl128vei_w",
+    "llvm.loongarch.lasx.xvreplgr2vr.b" => "__builtin_lasx_xvreplgr2vr_b",
+    "llvm.loongarch.lasx.xvreplgr2vr.d" => "__builtin_lasx_xvreplgr2vr_d",
+    "llvm.loongarch.lasx.xvreplgr2vr.h" => "__builtin_lasx_xvreplgr2vr_h",
+    "llvm.loongarch.lasx.xvreplgr2vr.w" => "__builtin_lasx_xvreplgr2vr_w",
+    "llvm.loongarch.lasx.xvrepli.b" => "__builtin_lasx_xvrepli_b",
+    "llvm.loongarch.lasx.xvrepli.d" => "__builtin_lasx_xvrepli_d",
+    "llvm.loongarch.lasx.xvrepli.h" => "__builtin_lasx_xvrepli_h",
+    "llvm.loongarch.lasx.xvrepli.w" => "__builtin_lasx_xvrepli_w",
+    "llvm.loongarch.lasx.xvreplve.b" => "__builtin_lasx_xvreplve_b",
+    "llvm.loongarch.lasx.xvreplve.d" => "__builtin_lasx_xvreplve_d",
+    "llvm.loongarch.lasx.xvreplve.h" => "__builtin_lasx_xvreplve_h",
+    "llvm.loongarch.lasx.xvreplve.w" => "__builtin_lasx_xvreplve_w",
+    "llvm.loongarch.lasx.xvreplve0.b" => "__builtin_lasx_xvreplve0_b",
+    "llvm.loongarch.lasx.xvreplve0.d" => "__builtin_lasx_xvreplve0_d",
+    "llvm.loongarch.lasx.xvreplve0.h" => "__builtin_lasx_xvreplve0_h",
+    "llvm.loongarch.lasx.xvreplve0.q" => "__builtin_lasx_xvreplve0_q",
+    "llvm.loongarch.lasx.xvreplve0.w" => "__builtin_lasx_xvreplve0_w",
+    "llvm.loongarch.lasx.xvrotr.b" => "__builtin_lasx_xvrotr_b",
+    "llvm.loongarch.lasx.xvrotr.d" => "__builtin_lasx_xvrotr_d",
+    "llvm.loongarch.lasx.xvrotr.h" => "__builtin_lasx_xvrotr_h",
+    "llvm.loongarch.lasx.xvrotr.w" => "__builtin_lasx_xvrotr_w",
+    "llvm.loongarch.lasx.xvrotri.b" => "__builtin_lasx_xvrotri_b",
+    "llvm.loongarch.lasx.xvrotri.d" => "__builtin_lasx_xvrotri_d",
+    "llvm.loongarch.lasx.xvrotri.h" => "__builtin_lasx_xvrotri_h",
+    "llvm.loongarch.lasx.xvrotri.w" => "__builtin_lasx_xvrotri_w",
+    "llvm.loongarch.lasx.xvsadd.b" => "__builtin_lasx_xvsadd_b",
+    "llvm.loongarch.lasx.xvsadd.bu" => "__builtin_lasx_xvsadd_bu",
+    "llvm.loongarch.lasx.xvsadd.d" => "__builtin_lasx_xvsadd_d",
+    "llvm.loongarch.lasx.xvsadd.du" => "__builtin_lasx_xvsadd_du",
+    "llvm.loongarch.lasx.xvsadd.h" => "__builtin_lasx_xvsadd_h",
+    "llvm.loongarch.lasx.xvsadd.hu" => "__builtin_lasx_xvsadd_hu",
+    "llvm.loongarch.lasx.xvsadd.w" => "__builtin_lasx_xvsadd_w",
+    "llvm.loongarch.lasx.xvsadd.wu" => "__builtin_lasx_xvsadd_wu",
+    "llvm.loongarch.lasx.xvsat.b" => "__builtin_lasx_xvsat_b",
+    "llvm.loongarch.lasx.xvsat.bu" => "__builtin_lasx_xvsat_bu",
+    "llvm.loongarch.lasx.xvsat.d" => "__builtin_lasx_xvsat_d",
+    "llvm.loongarch.lasx.xvsat.du" => "__builtin_lasx_xvsat_du",
+    "llvm.loongarch.lasx.xvsat.h" => "__builtin_lasx_xvsat_h",
+    "llvm.loongarch.lasx.xvsat.hu" => "__builtin_lasx_xvsat_hu",
+    "llvm.loongarch.lasx.xvsat.w" => "__builtin_lasx_xvsat_w",
+    "llvm.loongarch.lasx.xvsat.wu" => "__builtin_lasx_xvsat_wu",
+    "llvm.loongarch.lasx.xvseq.b" => "__builtin_lasx_xvseq_b",
+    "llvm.loongarch.lasx.xvseq.d" => "__builtin_lasx_xvseq_d",
+    "llvm.loongarch.lasx.xvseq.h" => "__builtin_lasx_xvseq_h",
+    "llvm.loongarch.lasx.xvseq.w" => "__builtin_lasx_xvseq_w",
+    "llvm.loongarch.lasx.xvseqi.b" => "__builtin_lasx_xvseqi_b",
+    "llvm.loongarch.lasx.xvseqi.d" => "__builtin_lasx_xvseqi_d",
+    "llvm.loongarch.lasx.xvseqi.h" => "__builtin_lasx_xvseqi_h",
+    "llvm.loongarch.lasx.xvseqi.w" => "__builtin_lasx_xvseqi_w",
+    "llvm.loongarch.lasx.xvshuf.b" => "__builtin_lasx_xvshuf_b",
+    "llvm.loongarch.lasx.xvshuf.d" => "__builtin_lasx_xvshuf_d",
+    "llvm.loongarch.lasx.xvshuf.h" => "__builtin_lasx_xvshuf_h",
+    "llvm.loongarch.lasx.xvshuf.w" => "__builtin_lasx_xvshuf_w",
+    "llvm.loongarch.lasx.xvshuf4i.b" => "__builtin_lasx_xvshuf4i_b",
+    "llvm.loongarch.lasx.xvshuf4i.d" => "__builtin_lasx_xvshuf4i_d",
+    "llvm.loongarch.lasx.xvshuf4i.h" => "__builtin_lasx_xvshuf4i_h",
+    "llvm.loongarch.lasx.xvshuf4i.w" => "__builtin_lasx_xvshuf4i_w",
+    "llvm.loongarch.lasx.xvsigncov.b" => "__builtin_lasx_xvsigncov_b",
+    "llvm.loongarch.lasx.xvsigncov.d" => "__builtin_lasx_xvsigncov_d",
+    "llvm.loongarch.lasx.xvsigncov.h" => "__builtin_lasx_xvsigncov_h",
+    "llvm.loongarch.lasx.xvsigncov.w" => "__builtin_lasx_xvsigncov_w",
+    "llvm.loongarch.lasx.xvsle.b" => "__builtin_lasx_xvsle_b",
+    "llvm.loongarch.lasx.xvsle.bu" => "__builtin_lasx_xvsle_bu",
+    "llvm.loongarch.lasx.xvsle.d" => "__builtin_lasx_xvsle_d",
+    "llvm.loongarch.lasx.xvsle.du" => "__builtin_lasx_xvsle_du",
+    "llvm.loongarch.lasx.xvsle.h" => "__builtin_lasx_xvsle_h",
+    "llvm.loongarch.lasx.xvsle.hu" => "__builtin_lasx_xvsle_hu",
+    "llvm.loongarch.lasx.xvsle.w" => "__builtin_lasx_xvsle_w",
+    "llvm.loongarch.lasx.xvsle.wu" => "__builtin_lasx_xvsle_wu",
+    "llvm.loongarch.lasx.xvslei.b" => "__builtin_lasx_xvslei_b",
+    "llvm.loongarch.lasx.xvslei.bu" => "__builtin_lasx_xvslei_bu",
+    "llvm.loongarch.lasx.xvslei.d" => "__builtin_lasx_xvslei_d",
+    "llvm.loongarch.lasx.xvslei.du" => "__builtin_lasx_xvslei_du",
+    "llvm.loongarch.lasx.xvslei.h" => "__builtin_lasx_xvslei_h",
+    "llvm.loongarch.lasx.xvslei.hu" => "__builtin_lasx_xvslei_hu",
+    "llvm.loongarch.lasx.xvslei.w" => "__builtin_lasx_xvslei_w",
+    "llvm.loongarch.lasx.xvslei.wu" => "__builtin_lasx_xvslei_wu",
+    "llvm.loongarch.lasx.xvsll.b" => "__builtin_lasx_xvsll_b",
+    "llvm.loongarch.lasx.xvsll.d" => "__builtin_lasx_xvsll_d",
+    "llvm.loongarch.lasx.xvsll.h" => "__builtin_lasx_xvsll_h",
+    "llvm.loongarch.lasx.xvsll.w" => "__builtin_lasx_xvsll_w",
+    "llvm.loongarch.lasx.xvslli.b" => "__builtin_lasx_xvslli_b",
+    "llvm.loongarch.lasx.xvslli.d" => "__builtin_lasx_xvslli_d",
+    "llvm.loongarch.lasx.xvslli.h" => "__builtin_lasx_xvslli_h",
+    "llvm.loongarch.lasx.xvslli.w" => "__builtin_lasx_xvslli_w",
+    "llvm.loongarch.lasx.xvsllwil.d.w" => "__builtin_lasx_xvsllwil_d_w",
+    "llvm.loongarch.lasx.xvsllwil.du.wu" => "__builtin_lasx_xvsllwil_du_wu",
+    "llvm.loongarch.lasx.xvsllwil.h.b" => "__builtin_lasx_xvsllwil_h_b",
+    "llvm.loongarch.lasx.xvsllwil.hu.bu" => "__builtin_lasx_xvsllwil_hu_bu",
+    "llvm.loongarch.lasx.xvsllwil.w.h" => "__builtin_lasx_xvsllwil_w_h",
+    "llvm.loongarch.lasx.xvsllwil.wu.hu" => "__builtin_lasx_xvsllwil_wu_hu",
+    "llvm.loongarch.lasx.xvslt.b" => "__builtin_lasx_xvslt_b",
+    "llvm.loongarch.lasx.xvslt.bu" => "__builtin_lasx_xvslt_bu",
+    "llvm.loongarch.lasx.xvslt.d" => "__builtin_lasx_xvslt_d",
+    "llvm.loongarch.lasx.xvslt.du" => "__builtin_lasx_xvslt_du",
+    "llvm.loongarch.lasx.xvslt.h" => "__builtin_lasx_xvslt_h",
+    "llvm.loongarch.lasx.xvslt.hu" => "__builtin_lasx_xvslt_hu",
+    "llvm.loongarch.lasx.xvslt.w" => "__builtin_lasx_xvslt_w",
+    "llvm.loongarch.lasx.xvslt.wu" => "__builtin_lasx_xvslt_wu",
+    "llvm.loongarch.lasx.xvslti.b" => "__builtin_lasx_xvslti_b",
+    "llvm.loongarch.lasx.xvslti.bu" => "__builtin_lasx_xvslti_bu",
+    "llvm.loongarch.lasx.xvslti.d" => "__builtin_lasx_xvslti_d",
+    "llvm.loongarch.lasx.xvslti.du" => "__builtin_lasx_xvslti_du",
+    "llvm.loongarch.lasx.xvslti.h" => "__builtin_lasx_xvslti_h",
+    "llvm.loongarch.lasx.xvslti.hu" => "__builtin_lasx_xvslti_hu",
+    "llvm.loongarch.lasx.xvslti.w" => "__builtin_lasx_xvslti_w",
+    "llvm.loongarch.lasx.xvslti.wu" => "__builtin_lasx_xvslti_wu",
+    "llvm.loongarch.lasx.xvsra.b" => "__builtin_lasx_xvsra_b",
+    "llvm.loongarch.lasx.xvsra.d" => "__builtin_lasx_xvsra_d",
+    "llvm.loongarch.lasx.xvsra.h" => "__builtin_lasx_xvsra_h",
+    "llvm.loongarch.lasx.xvsra.w" => "__builtin_lasx_xvsra_w",
+    "llvm.loongarch.lasx.xvsrai.b" => "__builtin_lasx_xvsrai_b",
+    "llvm.loongarch.lasx.xvsrai.d" => "__builtin_lasx_xvsrai_d",
+    "llvm.loongarch.lasx.xvsrai.h" => "__builtin_lasx_xvsrai_h",
+    "llvm.loongarch.lasx.xvsrai.w" => "__builtin_lasx_xvsrai_w",
+    "llvm.loongarch.lasx.xvsran.b.h" => "__builtin_lasx_xvsran_b_h",
+    "llvm.loongarch.lasx.xvsran.h.w" => "__builtin_lasx_xvsran_h_w",
+    "llvm.loongarch.lasx.xvsran.w.d" => "__builtin_lasx_xvsran_w_d",
+    "llvm.loongarch.lasx.xvsrani.b.h" => "__builtin_lasx_xvsrani_b_h",
+    "llvm.loongarch.lasx.xvsrani.d.q" => "__builtin_lasx_xvsrani_d_q",
+    "llvm.loongarch.lasx.xvsrani.h.w" => "__builtin_lasx_xvsrani_h_w",
+    "llvm.loongarch.lasx.xvsrani.w.d" => "__builtin_lasx_xvsrani_w_d",
+    "llvm.loongarch.lasx.xvsrar.b" => "__builtin_lasx_xvsrar_b",
+    "llvm.loongarch.lasx.xvsrar.d" => "__builtin_lasx_xvsrar_d",
+    "llvm.loongarch.lasx.xvsrar.h" => "__builtin_lasx_xvsrar_h",
+    "llvm.loongarch.lasx.xvsrar.w" => "__builtin_lasx_xvsrar_w",
+    "llvm.loongarch.lasx.xvsrari.b" => "__builtin_lasx_xvsrari_b",
+    "llvm.loongarch.lasx.xvsrari.d" => "__builtin_lasx_xvsrari_d",
+    "llvm.loongarch.lasx.xvsrari.h" => "__builtin_lasx_xvsrari_h",
+    "llvm.loongarch.lasx.xvsrari.w" => "__builtin_lasx_xvsrari_w",
+    "llvm.loongarch.lasx.xvsrarn.b.h" => "__builtin_lasx_xvsrarn_b_h",
+    "llvm.loongarch.lasx.xvsrarn.h.w" => "__builtin_lasx_xvsrarn_h_w",
+    "llvm.loongarch.lasx.xvsrarn.w.d" => "__builtin_lasx_xvsrarn_w_d",
+    "llvm.loongarch.lasx.xvsrarni.b.h" => "__builtin_lasx_xvsrarni_b_h",
+    "llvm.loongarch.lasx.xvsrarni.d.q" => "__builtin_lasx_xvsrarni_d_q",
+    "llvm.loongarch.lasx.xvsrarni.h.w" => "__builtin_lasx_xvsrarni_h_w",
+    "llvm.loongarch.lasx.xvsrarni.w.d" => "__builtin_lasx_xvsrarni_w_d",
+    "llvm.loongarch.lasx.xvsrl.b" => "__builtin_lasx_xvsrl_b",
+    "llvm.loongarch.lasx.xvsrl.d" => "__builtin_lasx_xvsrl_d",
+    "llvm.loongarch.lasx.xvsrl.h" => "__builtin_lasx_xvsrl_h",
+    "llvm.loongarch.lasx.xvsrl.w" => "__builtin_lasx_xvsrl_w",
+    "llvm.loongarch.lasx.xvsrli.b" => "__builtin_lasx_xvsrli_b",
+    "llvm.loongarch.lasx.xvsrli.d" => "__builtin_lasx_xvsrli_d",
+    "llvm.loongarch.lasx.xvsrli.h" => "__builtin_lasx_xvsrli_h",
+    "llvm.loongarch.lasx.xvsrli.w" => "__builtin_lasx_xvsrli_w",
+    "llvm.loongarch.lasx.xvsrln.b.h" => "__builtin_lasx_xvsrln_b_h",
+    "llvm.loongarch.lasx.xvsrln.h.w" => "__builtin_lasx_xvsrln_h_w",
+    "llvm.loongarch.lasx.xvsrln.w.d" => "__builtin_lasx_xvsrln_w_d",
+    "llvm.loongarch.lasx.xvsrlni.b.h" => "__builtin_lasx_xvsrlni_b_h",
+    "llvm.loongarch.lasx.xvsrlni.d.q" => "__builtin_lasx_xvsrlni_d_q",
+    "llvm.loongarch.lasx.xvsrlni.h.w" => "__builtin_lasx_xvsrlni_h_w",
+    "llvm.loongarch.lasx.xvsrlni.w.d" => "__builtin_lasx_xvsrlni_w_d",
+    "llvm.loongarch.lasx.xvsrlr.b" => "__builtin_lasx_xvsrlr_b",
+    "llvm.loongarch.lasx.xvsrlr.d" => "__builtin_lasx_xvsrlr_d",
+    "llvm.loongarch.lasx.xvsrlr.h" => "__builtin_lasx_xvsrlr_h",
+    "llvm.loongarch.lasx.xvsrlr.w" => "__builtin_lasx_xvsrlr_w",
+    "llvm.loongarch.lasx.xvsrlri.b" => "__builtin_lasx_xvsrlri_b",
+    "llvm.loongarch.lasx.xvsrlri.d" => "__builtin_lasx_xvsrlri_d",
+    "llvm.loongarch.lasx.xvsrlri.h" => "__builtin_lasx_xvsrlri_h",
+    "llvm.loongarch.lasx.xvsrlri.w" => "__builtin_lasx_xvsrlri_w",
+    "llvm.loongarch.lasx.xvsrlrn.b.h" => "__builtin_lasx_xvsrlrn_b_h",
+    "llvm.loongarch.lasx.xvsrlrn.h.w" => "__builtin_lasx_xvsrlrn_h_w",
+    "llvm.loongarch.lasx.xvsrlrn.w.d" => "__builtin_lasx_xvsrlrn_w_d",
+    "llvm.loongarch.lasx.xvsrlrni.b.h" => "__builtin_lasx_xvsrlrni_b_h",
+    "llvm.loongarch.lasx.xvsrlrni.d.q" => "__builtin_lasx_xvsrlrni_d_q",
+    "llvm.loongarch.lasx.xvsrlrni.h.w" => "__builtin_lasx_xvsrlrni_h_w",
+    "llvm.loongarch.lasx.xvsrlrni.w.d" => "__builtin_lasx_xvsrlrni_w_d",
+    "llvm.loongarch.lasx.xvssran.b.h" => "__builtin_lasx_xvssran_b_h",
+    "llvm.loongarch.lasx.xvssran.bu.h" => "__builtin_lasx_xvssran_bu_h",
+    "llvm.loongarch.lasx.xvssran.h.w" => "__builtin_lasx_xvssran_h_w",
+    "llvm.loongarch.lasx.xvssran.hu.w" => "__builtin_lasx_xvssran_hu_w",
+    "llvm.loongarch.lasx.xvssran.w.d" => "__builtin_lasx_xvssran_w_d",
+    "llvm.loongarch.lasx.xvssran.wu.d" => "__builtin_lasx_xvssran_wu_d",
+    "llvm.loongarch.lasx.xvssrani.b.h" => "__builtin_lasx_xvssrani_b_h",
+    "llvm.loongarch.lasx.xvssrani.bu.h" => "__builtin_lasx_xvssrani_bu_h",
+    "llvm.loongarch.lasx.xvssrani.d.q" => "__builtin_lasx_xvssrani_d_q",
+    "llvm.loongarch.lasx.xvssrani.du.q" => "__builtin_lasx_xvssrani_du_q",
+    "llvm.loongarch.lasx.xvssrani.h.w" => "__builtin_lasx_xvssrani_h_w",
+    "llvm.loongarch.lasx.xvssrani.hu.w" => "__builtin_lasx_xvssrani_hu_w",
+    "llvm.loongarch.lasx.xvssrani.w.d" => "__builtin_lasx_xvssrani_w_d",
+    "llvm.loongarch.lasx.xvssrani.wu.d" => "__builtin_lasx_xvssrani_wu_d",
+    "llvm.loongarch.lasx.xvssrarn.b.h" => "__builtin_lasx_xvssrarn_b_h",
+    "llvm.loongarch.lasx.xvssrarn.bu.h" => "__builtin_lasx_xvssrarn_bu_h",
+    "llvm.loongarch.lasx.xvssrarn.h.w" => "__builtin_lasx_xvssrarn_h_w",
+    "llvm.loongarch.lasx.xvssrarn.hu.w" => "__builtin_lasx_xvssrarn_hu_w",
+    "llvm.loongarch.lasx.xvssrarn.w.d" => "__builtin_lasx_xvssrarn_w_d",
+    "llvm.loongarch.lasx.xvssrarn.wu.d" => "__builtin_lasx_xvssrarn_wu_d",
+    "llvm.loongarch.lasx.xvssrarni.b.h" => "__builtin_lasx_xvssrarni_b_h",
+    "llvm.loongarch.lasx.xvssrarni.bu.h" => "__builtin_lasx_xvssrarni_bu_h",
+    "llvm.loongarch.lasx.xvssrarni.d.q" => "__builtin_lasx_xvssrarni_d_q",
+    "llvm.loongarch.lasx.xvssrarni.du.q" => "__builtin_lasx_xvssrarni_du_q",
+    "llvm.loongarch.lasx.xvssrarni.h.w" => "__builtin_lasx_xvssrarni_h_w",
+    "llvm.loongarch.lasx.xvssrarni.hu.w" => "__builtin_lasx_xvssrarni_hu_w",
+    "llvm.loongarch.lasx.xvssrarni.w.d" => "__builtin_lasx_xvssrarni_w_d",
+    "llvm.loongarch.lasx.xvssrarni.wu.d" => "__builtin_lasx_xvssrarni_wu_d",
+    "llvm.loongarch.lasx.xvssrln.b.h" => "__builtin_lasx_xvssrln_b_h",
+    "llvm.loongarch.lasx.xvssrln.bu.h" => "__builtin_lasx_xvssrln_bu_h",
+    "llvm.loongarch.lasx.xvssrln.h.w" => "__builtin_lasx_xvssrln_h_w",
+    "llvm.loongarch.lasx.xvssrln.hu.w" => "__builtin_lasx_xvssrln_hu_w",
+    "llvm.loongarch.lasx.xvssrln.w.d" => "__builtin_lasx_xvssrln_w_d",
+    "llvm.loongarch.lasx.xvssrln.wu.d" => "__builtin_lasx_xvssrln_wu_d",
+    "llvm.loongarch.lasx.xvssrlni.b.h" => "__builtin_lasx_xvssrlni_b_h",
+    "llvm.loongarch.lasx.xvssrlni.bu.h" => "__builtin_lasx_xvssrlni_bu_h",
+    "llvm.loongarch.lasx.xvssrlni.d.q" => "__builtin_lasx_xvssrlni_d_q",
+    "llvm.loongarch.lasx.xvssrlni.du.q" => "__builtin_lasx_xvssrlni_du_q",
+    "llvm.loongarch.lasx.xvssrlni.h.w" => "__builtin_lasx_xvssrlni_h_w",
+    "llvm.loongarch.lasx.xvssrlni.hu.w" => "__builtin_lasx_xvssrlni_hu_w",
+    "llvm.loongarch.lasx.xvssrlni.w.d" => "__builtin_lasx_xvssrlni_w_d",
+    "llvm.loongarch.lasx.xvssrlni.wu.d" => "__builtin_lasx_xvssrlni_wu_d",
+    "llvm.loongarch.lasx.xvssrlrn.b.h" => "__builtin_lasx_xvssrlrn_b_h",
+    "llvm.loongarch.lasx.xvssrlrn.bu.h" => "__builtin_lasx_xvssrlrn_bu_h",
+    "llvm.loongarch.lasx.xvssrlrn.h.w" => "__builtin_lasx_xvssrlrn_h_w",
+    "llvm.loongarch.lasx.xvssrlrn.hu.w" => "__builtin_lasx_xvssrlrn_hu_w",
+    "llvm.loongarch.lasx.xvssrlrn.w.d" => "__builtin_lasx_xvssrlrn_w_d",
+    "llvm.loongarch.lasx.xvssrlrn.wu.d" => "__builtin_lasx_xvssrlrn_wu_d",
+    "llvm.loongarch.lasx.xvssrlrni.b.h" => "__builtin_lasx_xvssrlrni_b_h",
+    "llvm.loongarch.lasx.xvssrlrni.bu.h" => "__builtin_lasx_xvssrlrni_bu_h",
+    "llvm.loongarch.lasx.xvssrlrni.d.q" => "__builtin_lasx_xvssrlrni_d_q",
+    "llvm.loongarch.lasx.xvssrlrni.du.q" => "__builtin_lasx_xvssrlrni_du_q",
+    "llvm.loongarch.lasx.xvssrlrni.h.w" => "__builtin_lasx_xvssrlrni_h_w",
+    "llvm.loongarch.lasx.xvssrlrni.hu.w" => "__builtin_lasx_xvssrlrni_hu_w",
+    "llvm.loongarch.lasx.xvssrlrni.w.d" => "__builtin_lasx_xvssrlrni_w_d",
+    "llvm.loongarch.lasx.xvssrlrni.wu.d" => "__builtin_lasx_xvssrlrni_wu_d",
+    "llvm.loongarch.lasx.xvssub.b" => "__builtin_lasx_xvssub_b",
+    "llvm.loongarch.lasx.xvssub.bu" => "__builtin_lasx_xvssub_bu",
+    "llvm.loongarch.lasx.xvssub.d" => "__builtin_lasx_xvssub_d",
+    "llvm.loongarch.lasx.xvssub.du" => "__builtin_lasx_xvssub_du",
+    "llvm.loongarch.lasx.xvssub.h" => "__builtin_lasx_xvssub_h",
+    "llvm.loongarch.lasx.xvssub.hu" => "__builtin_lasx_xvssub_hu",
+    "llvm.loongarch.lasx.xvssub.w" => "__builtin_lasx_xvssub_w",
+    "llvm.loongarch.lasx.xvssub.wu" => "__builtin_lasx_xvssub_wu",
+    "llvm.loongarch.lasx.xvst" => "__builtin_lasx_xvst",
+    "llvm.loongarch.lasx.xvstelm.b" => "__builtin_lasx_xvstelm_b",
+    "llvm.loongarch.lasx.xvstelm.d" => "__builtin_lasx_xvstelm_d",
+    "llvm.loongarch.lasx.xvstelm.h" => "__builtin_lasx_xvstelm_h",
+    "llvm.loongarch.lasx.xvstelm.w" => "__builtin_lasx_xvstelm_w",
+    "llvm.loongarch.lasx.xvstx" => "__builtin_lasx_xvstx",
+    "llvm.loongarch.lasx.xvsub.b" => "__builtin_lasx_xvsub_b",
+    "llvm.loongarch.lasx.xvsub.d" => "__builtin_lasx_xvsub_d",
+    "llvm.loongarch.lasx.xvsub.h" => "__builtin_lasx_xvsub_h",
+    "llvm.loongarch.lasx.xvsub.q" => "__builtin_lasx_xvsub_q",
+    "llvm.loongarch.lasx.xvsub.w" => "__builtin_lasx_xvsub_w",
+    "llvm.loongarch.lasx.xvsubi.bu" => "__builtin_lasx_xvsubi_bu",
+    "llvm.loongarch.lasx.xvsubi.du" => "__builtin_lasx_xvsubi_du",
+    "llvm.loongarch.lasx.xvsubi.hu" => "__builtin_lasx_xvsubi_hu",
+    "llvm.loongarch.lasx.xvsubi.wu" => "__builtin_lasx_xvsubi_wu",
+    "llvm.loongarch.lasx.xvsubwev.d.w" => "__builtin_lasx_xvsubwev_d_w",
+    "llvm.loongarch.lasx.xvsubwev.d.wu" => "__builtin_lasx_xvsubwev_d_wu",
+    "llvm.loongarch.lasx.xvsubwev.h.b" => "__builtin_lasx_xvsubwev_h_b",
+    "llvm.loongarch.lasx.xvsubwev.h.bu" => "__builtin_lasx_xvsubwev_h_bu",
+    "llvm.loongarch.lasx.xvsubwev.q.d" => "__builtin_lasx_xvsubwev_q_d",
+    "llvm.loongarch.lasx.xvsubwev.q.du" => "__builtin_lasx_xvsubwev_q_du",
+    "llvm.loongarch.lasx.xvsubwev.w.h" => "__builtin_lasx_xvsubwev_w_h",
+    "llvm.loongarch.lasx.xvsubwev.w.hu" => "__builtin_lasx_xvsubwev_w_hu",
+    "llvm.loongarch.lasx.xvsubwod.d.w" => "__builtin_lasx_xvsubwod_d_w",
+    "llvm.loongarch.lasx.xvsubwod.d.wu" => "__builtin_lasx_xvsubwod_d_wu",
+    "llvm.loongarch.lasx.xvsubwod.h.b" => "__builtin_lasx_xvsubwod_h_b",
+    "llvm.loongarch.lasx.xvsubwod.h.bu" => "__builtin_lasx_xvsubwod_h_bu",
+    "llvm.loongarch.lasx.xvsubwod.q.d" => "__builtin_lasx_xvsubwod_q_d",
+    "llvm.loongarch.lasx.xvsubwod.q.du" => "__builtin_lasx_xvsubwod_q_du",
+    "llvm.loongarch.lasx.xvsubwod.w.h" => "__builtin_lasx_xvsubwod_w_h",
+    "llvm.loongarch.lasx.xvsubwod.w.hu" => "__builtin_lasx_xvsubwod_w_hu",
+    "llvm.loongarch.lasx.xvxor.v" => "__builtin_lasx_xvxor_v",
+    "llvm.loongarch.lasx.xvxori.b" => "__builtin_lasx_xvxori_b",
+    "llvm.loongarch.lddir.d" => "__builtin_loongarch_lddir_d",
+    "llvm.loongarch.ldpte.d" => "__builtin_loongarch_ldpte_d",
+    "llvm.loongarch.lsx.bnz.b" => "__builtin_lsx_bnz_b",
+    "llvm.loongarch.lsx.bnz.d" => "__builtin_lsx_bnz_d",
+    "llvm.loongarch.lsx.bnz.h" => "__builtin_lsx_bnz_h",
+    "llvm.loongarch.lsx.bnz.v" => "__builtin_lsx_bnz_v",
+    "llvm.loongarch.lsx.bnz.w" => "__builtin_lsx_bnz_w",
+    "llvm.loongarch.lsx.bz.b" => "__builtin_lsx_bz_b",
+    "llvm.loongarch.lsx.bz.d" => "__builtin_lsx_bz_d",
+    "llvm.loongarch.lsx.bz.h" => "__builtin_lsx_bz_h",
+    "llvm.loongarch.lsx.bz.v" => "__builtin_lsx_bz_v",
+    "llvm.loongarch.lsx.bz.w" => "__builtin_lsx_bz_w",
+    "llvm.loongarch.lsx.vabsd.b" => "__builtin_lsx_vabsd_b",
+    "llvm.loongarch.lsx.vabsd.bu" => "__builtin_lsx_vabsd_bu",
+    "llvm.loongarch.lsx.vabsd.d" => "__builtin_lsx_vabsd_d",
+    "llvm.loongarch.lsx.vabsd.du" => "__builtin_lsx_vabsd_du",
+    "llvm.loongarch.lsx.vabsd.h" => "__builtin_lsx_vabsd_h",
+    "llvm.loongarch.lsx.vabsd.hu" => "__builtin_lsx_vabsd_hu",
+    "llvm.loongarch.lsx.vabsd.w" => "__builtin_lsx_vabsd_w",
+    "llvm.loongarch.lsx.vabsd.wu" => "__builtin_lsx_vabsd_wu",
+    "llvm.loongarch.lsx.vadd.b" => "__builtin_lsx_vadd_b",
+    "llvm.loongarch.lsx.vadd.d" => "__builtin_lsx_vadd_d",
+    "llvm.loongarch.lsx.vadd.h" => "__builtin_lsx_vadd_h",
+    "llvm.loongarch.lsx.vadd.q" => "__builtin_lsx_vadd_q",
+    "llvm.loongarch.lsx.vadd.w" => "__builtin_lsx_vadd_w",
+    "llvm.loongarch.lsx.vadda.b" => "__builtin_lsx_vadda_b",
+    "llvm.loongarch.lsx.vadda.d" => "__builtin_lsx_vadda_d",
+    "llvm.loongarch.lsx.vadda.h" => "__builtin_lsx_vadda_h",
+    "llvm.loongarch.lsx.vadda.w" => "__builtin_lsx_vadda_w",
+    "llvm.loongarch.lsx.vaddi.bu" => "__builtin_lsx_vaddi_bu",
+    "llvm.loongarch.lsx.vaddi.du" => "__builtin_lsx_vaddi_du",
+    "llvm.loongarch.lsx.vaddi.hu" => "__builtin_lsx_vaddi_hu",
+    "llvm.loongarch.lsx.vaddi.wu" => "__builtin_lsx_vaddi_wu",
+    "llvm.loongarch.lsx.vaddwev.d.w" => "__builtin_lsx_vaddwev_d_w",
+    "llvm.loongarch.lsx.vaddwev.d.wu" => "__builtin_lsx_vaddwev_d_wu",
+    "llvm.loongarch.lsx.vaddwev.d.wu.w" => "__builtin_lsx_vaddwev_d_wu_w",
+    "llvm.loongarch.lsx.vaddwev.h.b" => "__builtin_lsx_vaddwev_h_b",
+    "llvm.loongarch.lsx.vaddwev.h.bu" => "__builtin_lsx_vaddwev_h_bu",
+    "llvm.loongarch.lsx.vaddwev.h.bu.b" => "__builtin_lsx_vaddwev_h_bu_b",
+    "llvm.loongarch.lsx.vaddwev.q.d" => "__builtin_lsx_vaddwev_q_d",
+    "llvm.loongarch.lsx.vaddwev.q.du" => "__builtin_lsx_vaddwev_q_du",
+    "llvm.loongarch.lsx.vaddwev.q.du.d" => "__builtin_lsx_vaddwev_q_du_d",
+    "llvm.loongarch.lsx.vaddwev.w.h" => "__builtin_lsx_vaddwev_w_h",
+    "llvm.loongarch.lsx.vaddwev.w.hu" => "__builtin_lsx_vaddwev_w_hu",
+    "llvm.loongarch.lsx.vaddwev.w.hu.h" => "__builtin_lsx_vaddwev_w_hu_h",
+    "llvm.loongarch.lsx.vaddwod.d.w" => "__builtin_lsx_vaddwod_d_w",
+    "llvm.loongarch.lsx.vaddwod.d.wu" => "__builtin_lsx_vaddwod_d_wu",
+    "llvm.loongarch.lsx.vaddwod.d.wu.w" => "__builtin_lsx_vaddwod_d_wu_w",
+    "llvm.loongarch.lsx.vaddwod.h.b" => "__builtin_lsx_vaddwod_h_b",
+    "llvm.loongarch.lsx.vaddwod.h.bu" => "__builtin_lsx_vaddwod_h_bu",
+    "llvm.loongarch.lsx.vaddwod.h.bu.b" => "__builtin_lsx_vaddwod_h_bu_b",
+    "llvm.loongarch.lsx.vaddwod.q.d" => "__builtin_lsx_vaddwod_q_d",
+    "llvm.loongarch.lsx.vaddwod.q.du" => "__builtin_lsx_vaddwod_q_du",
+    "llvm.loongarch.lsx.vaddwod.q.du.d" => "__builtin_lsx_vaddwod_q_du_d",
+    "llvm.loongarch.lsx.vaddwod.w.h" => "__builtin_lsx_vaddwod_w_h",
+    "llvm.loongarch.lsx.vaddwod.w.hu" => "__builtin_lsx_vaddwod_w_hu",
+    "llvm.loongarch.lsx.vaddwod.w.hu.h" => "__builtin_lsx_vaddwod_w_hu_h",
+    "llvm.loongarch.lsx.vand.v" => "__builtin_lsx_vand_v",
+    "llvm.loongarch.lsx.vandi.b" => "__builtin_lsx_vandi_b",
+    "llvm.loongarch.lsx.vandn.v" => "__builtin_lsx_vandn_v",
+    "llvm.loongarch.lsx.vavg.b" => "__builtin_lsx_vavg_b",
+    "llvm.loongarch.lsx.vavg.bu" => "__builtin_lsx_vavg_bu",
+    "llvm.loongarch.lsx.vavg.d" => "__builtin_lsx_vavg_d",
+    "llvm.loongarch.lsx.vavg.du" => "__builtin_lsx_vavg_du",
+    "llvm.loongarch.lsx.vavg.h" => "__builtin_lsx_vavg_h",
+    "llvm.loongarch.lsx.vavg.hu" => "__builtin_lsx_vavg_hu",
+    "llvm.loongarch.lsx.vavg.w" => "__builtin_lsx_vavg_w",
+    "llvm.loongarch.lsx.vavg.wu" => "__builtin_lsx_vavg_wu",
+    "llvm.loongarch.lsx.vavgr.b" => "__builtin_lsx_vavgr_b",
+    "llvm.loongarch.lsx.vavgr.bu" => "__builtin_lsx_vavgr_bu",
+    "llvm.loongarch.lsx.vavgr.d" => "__builtin_lsx_vavgr_d",
+    "llvm.loongarch.lsx.vavgr.du" => "__builtin_lsx_vavgr_du",
+    "llvm.loongarch.lsx.vavgr.h" => "__builtin_lsx_vavgr_h",
+    "llvm.loongarch.lsx.vavgr.hu" => "__builtin_lsx_vavgr_hu",
+    "llvm.loongarch.lsx.vavgr.w" => "__builtin_lsx_vavgr_w",
+    "llvm.loongarch.lsx.vavgr.wu" => "__builtin_lsx_vavgr_wu",
+    "llvm.loongarch.lsx.vbitclr.b" => "__builtin_lsx_vbitclr_b",
+    "llvm.loongarch.lsx.vbitclr.d" => "__builtin_lsx_vbitclr_d",
+    "llvm.loongarch.lsx.vbitclr.h" => "__builtin_lsx_vbitclr_h",
+    "llvm.loongarch.lsx.vbitclr.w" => "__builtin_lsx_vbitclr_w",
+    "llvm.loongarch.lsx.vbitclri.b" => "__builtin_lsx_vbitclri_b",
+    "llvm.loongarch.lsx.vbitclri.d" => "__builtin_lsx_vbitclri_d",
+    "llvm.loongarch.lsx.vbitclri.h" => "__builtin_lsx_vbitclri_h",
+    "llvm.loongarch.lsx.vbitclri.w" => "__builtin_lsx_vbitclri_w",
+    "llvm.loongarch.lsx.vbitrev.b" => "__builtin_lsx_vbitrev_b",
+    "llvm.loongarch.lsx.vbitrev.d" => "__builtin_lsx_vbitrev_d",
+    "llvm.loongarch.lsx.vbitrev.h" => "__builtin_lsx_vbitrev_h",
+    "llvm.loongarch.lsx.vbitrev.w" => "__builtin_lsx_vbitrev_w",
+    "llvm.loongarch.lsx.vbitrevi.b" => "__builtin_lsx_vbitrevi_b",
+    "llvm.loongarch.lsx.vbitrevi.d" => "__builtin_lsx_vbitrevi_d",
+    "llvm.loongarch.lsx.vbitrevi.h" => "__builtin_lsx_vbitrevi_h",
+    "llvm.loongarch.lsx.vbitrevi.w" => "__builtin_lsx_vbitrevi_w",
+    "llvm.loongarch.lsx.vbitsel.v" => "__builtin_lsx_vbitsel_v",
+    "llvm.loongarch.lsx.vbitseli.b" => "__builtin_lsx_vbitseli_b",
+    "llvm.loongarch.lsx.vbitset.b" => "__builtin_lsx_vbitset_b",
+    "llvm.loongarch.lsx.vbitset.d" => "__builtin_lsx_vbitset_d",
+    "llvm.loongarch.lsx.vbitset.h" => "__builtin_lsx_vbitset_h",
+    "llvm.loongarch.lsx.vbitset.w" => "__builtin_lsx_vbitset_w",
+    "llvm.loongarch.lsx.vbitseti.b" => "__builtin_lsx_vbitseti_b",
+    "llvm.loongarch.lsx.vbitseti.d" => "__builtin_lsx_vbitseti_d",
+    "llvm.loongarch.lsx.vbitseti.h" => "__builtin_lsx_vbitseti_h",
+    "llvm.loongarch.lsx.vbitseti.w" => "__builtin_lsx_vbitseti_w",
+    "llvm.loongarch.lsx.vbsll.v" => "__builtin_lsx_vbsll_v",
+    "llvm.loongarch.lsx.vbsrl.v" => "__builtin_lsx_vbsrl_v",
+    "llvm.loongarch.lsx.vclo.b" => "__builtin_lsx_vclo_b",
+    "llvm.loongarch.lsx.vclo.d" => "__builtin_lsx_vclo_d",
+    "llvm.loongarch.lsx.vclo.h" => "__builtin_lsx_vclo_h",
+    "llvm.loongarch.lsx.vclo.w" => "__builtin_lsx_vclo_w",
+    "llvm.loongarch.lsx.vclz.b" => "__builtin_lsx_vclz_b",
+    "llvm.loongarch.lsx.vclz.d" => "__builtin_lsx_vclz_d",
+    "llvm.loongarch.lsx.vclz.h" => "__builtin_lsx_vclz_h",
+    "llvm.loongarch.lsx.vclz.w" => "__builtin_lsx_vclz_w",
+    "llvm.loongarch.lsx.vdiv.b" => "__builtin_lsx_vdiv_b",
+    "llvm.loongarch.lsx.vdiv.bu" => "__builtin_lsx_vdiv_bu",
+    "llvm.loongarch.lsx.vdiv.d" => "__builtin_lsx_vdiv_d",
+    "llvm.loongarch.lsx.vdiv.du" => "__builtin_lsx_vdiv_du",
+    "llvm.loongarch.lsx.vdiv.h" => "__builtin_lsx_vdiv_h",
+    "llvm.loongarch.lsx.vdiv.hu" => "__builtin_lsx_vdiv_hu",
+    "llvm.loongarch.lsx.vdiv.w" => "__builtin_lsx_vdiv_w",
+    "llvm.loongarch.lsx.vdiv.wu" => "__builtin_lsx_vdiv_wu",
+    "llvm.loongarch.lsx.vexth.d.w" => "__builtin_lsx_vexth_d_w",
+    "llvm.loongarch.lsx.vexth.du.wu" => "__builtin_lsx_vexth_du_wu",
+    "llvm.loongarch.lsx.vexth.h.b" => "__builtin_lsx_vexth_h_b",
+    "llvm.loongarch.lsx.vexth.hu.bu" => "__builtin_lsx_vexth_hu_bu",
+    "llvm.loongarch.lsx.vexth.q.d" => "__builtin_lsx_vexth_q_d",
+    "llvm.loongarch.lsx.vexth.qu.du" => "__builtin_lsx_vexth_qu_du",
+    "llvm.loongarch.lsx.vexth.w.h" => "__builtin_lsx_vexth_w_h",
+    "llvm.loongarch.lsx.vexth.wu.hu" => "__builtin_lsx_vexth_wu_hu",
+    "llvm.loongarch.lsx.vextl.q.d" => "__builtin_lsx_vextl_q_d",
+    "llvm.loongarch.lsx.vextl.qu.du" => "__builtin_lsx_vextl_qu_du",
+    "llvm.loongarch.lsx.vextrins.b" => "__builtin_lsx_vextrins_b",
+    "llvm.loongarch.lsx.vextrins.d" => "__builtin_lsx_vextrins_d",
+    "llvm.loongarch.lsx.vextrins.h" => "__builtin_lsx_vextrins_h",
+    "llvm.loongarch.lsx.vextrins.w" => "__builtin_lsx_vextrins_w",
+    "llvm.loongarch.lsx.vfadd.d" => "__builtin_lsx_vfadd_d",
+    "llvm.loongarch.lsx.vfadd.s" => "__builtin_lsx_vfadd_s",
+    "llvm.loongarch.lsx.vfclass.d" => "__builtin_lsx_vfclass_d",
+    "llvm.loongarch.lsx.vfclass.s" => "__builtin_lsx_vfclass_s",
+    "llvm.loongarch.lsx.vfcmp.caf.d" => "__builtin_lsx_vfcmp_caf_d",
+    "llvm.loongarch.lsx.vfcmp.caf.s" => "__builtin_lsx_vfcmp_caf_s",
+    "llvm.loongarch.lsx.vfcmp.ceq.d" => "__builtin_lsx_vfcmp_ceq_d",
+    "llvm.loongarch.lsx.vfcmp.ceq.s" => "__builtin_lsx_vfcmp_ceq_s",
+    "llvm.loongarch.lsx.vfcmp.cle.d" => "__builtin_lsx_vfcmp_cle_d",
+    "llvm.loongarch.lsx.vfcmp.cle.s" => "__builtin_lsx_vfcmp_cle_s",
+    "llvm.loongarch.lsx.vfcmp.clt.d" => "__builtin_lsx_vfcmp_clt_d",
+    "llvm.loongarch.lsx.vfcmp.clt.s" => "__builtin_lsx_vfcmp_clt_s",
+    "llvm.loongarch.lsx.vfcmp.cne.d" => "__builtin_lsx_vfcmp_cne_d",
+    "llvm.loongarch.lsx.vfcmp.cne.s" => "__builtin_lsx_vfcmp_cne_s",
+    "llvm.loongarch.lsx.vfcmp.cor.d" => "__builtin_lsx_vfcmp_cor_d",
+    "llvm.loongarch.lsx.vfcmp.cor.s" => "__builtin_lsx_vfcmp_cor_s",
+    "llvm.loongarch.lsx.vfcmp.cueq.d" => "__builtin_lsx_vfcmp_cueq_d",
+    "llvm.loongarch.lsx.vfcmp.cueq.s" => "__builtin_lsx_vfcmp_cueq_s",
+    "llvm.loongarch.lsx.vfcmp.cule.d" => "__builtin_lsx_vfcmp_cule_d",
+    "llvm.loongarch.lsx.vfcmp.cule.s" => "__builtin_lsx_vfcmp_cule_s",
+    "llvm.loongarch.lsx.vfcmp.cult.d" => "__builtin_lsx_vfcmp_cult_d",
+    "llvm.loongarch.lsx.vfcmp.cult.s" => "__builtin_lsx_vfcmp_cult_s",
+    "llvm.loongarch.lsx.vfcmp.cun.d" => "__builtin_lsx_vfcmp_cun_d",
+    "llvm.loongarch.lsx.vfcmp.cun.s" => "__builtin_lsx_vfcmp_cun_s",
+    "llvm.loongarch.lsx.vfcmp.cune.d" => "__builtin_lsx_vfcmp_cune_d",
+    "llvm.loongarch.lsx.vfcmp.cune.s" => "__builtin_lsx_vfcmp_cune_s",
+    "llvm.loongarch.lsx.vfcmp.saf.d" => "__builtin_lsx_vfcmp_saf_d",
+    "llvm.loongarch.lsx.vfcmp.saf.s" => "__builtin_lsx_vfcmp_saf_s",
+    "llvm.loongarch.lsx.vfcmp.seq.d" => "__builtin_lsx_vfcmp_seq_d",
+    "llvm.loongarch.lsx.vfcmp.seq.s" => "__builtin_lsx_vfcmp_seq_s",
+    "llvm.loongarch.lsx.vfcmp.sle.d" => "__builtin_lsx_vfcmp_sle_d",
+    "llvm.loongarch.lsx.vfcmp.sle.s" => "__builtin_lsx_vfcmp_sle_s",
+    "llvm.loongarch.lsx.vfcmp.slt.d" => "__builtin_lsx_vfcmp_slt_d",
+    "llvm.loongarch.lsx.vfcmp.slt.s" => "__builtin_lsx_vfcmp_slt_s",
+    "llvm.loongarch.lsx.vfcmp.sne.d" => "__builtin_lsx_vfcmp_sne_d",
+    "llvm.loongarch.lsx.vfcmp.sne.s" => "__builtin_lsx_vfcmp_sne_s",
+    "llvm.loongarch.lsx.vfcmp.sor.d" => "__builtin_lsx_vfcmp_sor_d",
+    "llvm.loongarch.lsx.vfcmp.sor.s" => "__builtin_lsx_vfcmp_sor_s",
+    "llvm.loongarch.lsx.vfcmp.sueq.d" => "__builtin_lsx_vfcmp_sueq_d",
+    "llvm.loongarch.lsx.vfcmp.sueq.s" => "__builtin_lsx_vfcmp_sueq_s",
+    "llvm.loongarch.lsx.vfcmp.sule.d" => "__builtin_lsx_vfcmp_sule_d",
+    "llvm.loongarch.lsx.vfcmp.sule.s" => "__builtin_lsx_vfcmp_sule_s",
+    "llvm.loongarch.lsx.vfcmp.sult.d" => "__builtin_lsx_vfcmp_sult_d",
+    "llvm.loongarch.lsx.vfcmp.sult.s" => "__builtin_lsx_vfcmp_sult_s",
+    "llvm.loongarch.lsx.vfcmp.sun.d" => "__builtin_lsx_vfcmp_sun_d",
+    "llvm.loongarch.lsx.vfcmp.sun.s" => "__builtin_lsx_vfcmp_sun_s",
+    "llvm.loongarch.lsx.vfcmp.sune.d" => "__builtin_lsx_vfcmp_sune_d",
+    "llvm.loongarch.lsx.vfcmp.sune.s" => "__builtin_lsx_vfcmp_sune_s",
+    "llvm.loongarch.lsx.vfcvt.h.s" => "__builtin_lsx_vfcvt_h_s",
+    "llvm.loongarch.lsx.vfcvt.s.d" => "__builtin_lsx_vfcvt_s_d",
+    "llvm.loongarch.lsx.vfcvth.d.s" => "__builtin_lsx_vfcvth_d_s",
+    "llvm.loongarch.lsx.vfcvth.s.h" => "__builtin_lsx_vfcvth_s_h",
+    "llvm.loongarch.lsx.vfcvtl.d.s" => "__builtin_lsx_vfcvtl_d_s",
+    "llvm.loongarch.lsx.vfcvtl.s.h" => "__builtin_lsx_vfcvtl_s_h",
+    "llvm.loongarch.lsx.vfdiv.d" => "__builtin_lsx_vfdiv_d",
+    "llvm.loongarch.lsx.vfdiv.s" => "__builtin_lsx_vfdiv_s",
+    "llvm.loongarch.lsx.vffint.d.l" => "__builtin_lsx_vffint_d_l",
+    "llvm.loongarch.lsx.vffint.d.lu" => "__builtin_lsx_vffint_d_lu",
+    "llvm.loongarch.lsx.vffint.s.l" => "__builtin_lsx_vffint_s_l",
+    "llvm.loongarch.lsx.vffint.s.w" => "__builtin_lsx_vffint_s_w",
+    "llvm.loongarch.lsx.vffint.s.wu" => "__builtin_lsx_vffint_s_wu",
+    "llvm.loongarch.lsx.vffinth.d.w" => "__builtin_lsx_vffinth_d_w",
+    "llvm.loongarch.lsx.vffintl.d.w" => "__builtin_lsx_vffintl_d_w",
+    "llvm.loongarch.lsx.vflogb.d" => "__builtin_lsx_vflogb_d",
+    "llvm.loongarch.lsx.vflogb.s" => "__builtin_lsx_vflogb_s",
+    "llvm.loongarch.lsx.vfmadd.d" => "__builtin_lsx_vfmadd_d",
+    "llvm.loongarch.lsx.vfmadd.s" => "__builtin_lsx_vfmadd_s",
+    "llvm.loongarch.lsx.vfmax.d" => "__builtin_lsx_vfmax_d",
+    "llvm.loongarch.lsx.vfmax.s" => "__builtin_lsx_vfmax_s",
+    "llvm.loongarch.lsx.vfmaxa.d" => "__builtin_lsx_vfmaxa_d",
+    "llvm.loongarch.lsx.vfmaxa.s" => "__builtin_lsx_vfmaxa_s",
+    "llvm.loongarch.lsx.vfmin.d" => "__builtin_lsx_vfmin_d",
+    "llvm.loongarch.lsx.vfmin.s" => "__builtin_lsx_vfmin_s",
+    "llvm.loongarch.lsx.vfmina.d" => "__builtin_lsx_vfmina_d",
+    "llvm.loongarch.lsx.vfmina.s" => "__builtin_lsx_vfmina_s",
+    "llvm.loongarch.lsx.vfmsub.d" => "__builtin_lsx_vfmsub_d",
+    "llvm.loongarch.lsx.vfmsub.s" => "__builtin_lsx_vfmsub_s",
+    "llvm.loongarch.lsx.vfmul.d" => "__builtin_lsx_vfmul_d",
+    "llvm.loongarch.lsx.vfmul.s" => "__builtin_lsx_vfmul_s",
+    "llvm.loongarch.lsx.vfnmadd.d" => "__builtin_lsx_vfnmadd_d",
+    "llvm.loongarch.lsx.vfnmadd.s" => "__builtin_lsx_vfnmadd_s",
+    "llvm.loongarch.lsx.vfnmsub.d" => "__builtin_lsx_vfnmsub_d",
+    "llvm.loongarch.lsx.vfnmsub.s" => "__builtin_lsx_vfnmsub_s",
+    "llvm.loongarch.lsx.vfrecip.d" => "__builtin_lsx_vfrecip_d",
+    "llvm.loongarch.lsx.vfrecip.s" => "__builtin_lsx_vfrecip_s",
+    "llvm.loongarch.lsx.vfrecipe.d" => "__builtin_lsx_vfrecipe_d",
+    "llvm.loongarch.lsx.vfrecipe.s" => "__builtin_lsx_vfrecipe_s",
+    "llvm.loongarch.lsx.vfrint.d" => "__builtin_lsx_vfrint_d",
+    "llvm.loongarch.lsx.vfrint.s" => "__builtin_lsx_vfrint_s",
+    "llvm.loongarch.lsx.vfrintrm.d" => "__builtin_lsx_vfrintrm_d",
+    "llvm.loongarch.lsx.vfrintrm.s" => "__builtin_lsx_vfrintrm_s",
+    "llvm.loongarch.lsx.vfrintrne.d" => "__builtin_lsx_vfrintrne_d",
+    "llvm.loongarch.lsx.vfrintrne.s" => "__builtin_lsx_vfrintrne_s",
+    "llvm.loongarch.lsx.vfrintrp.d" => "__builtin_lsx_vfrintrp_d",
+    "llvm.loongarch.lsx.vfrintrp.s" => "__builtin_lsx_vfrintrp_s",
+    "llvm.loongarch.lsx.vfrintrz.d" => "__builtin_lsx_vfrintrz_d",
+    "llvm.loongarch.lsx.vfrintrz.s" => "__builtin_lsx_vfrintrz_s",
+    "llvm.loongarch.lsx.vfrsqrt.d" => "__builtin_lsx_vfrsqrt_d",
+    "llvm.loongarch.lsx.vfrsqrt.s" => "__builtin_lsx_vfrsqrt_s",
+    "llvm.loongarch.lsx.vfrsqrte.d" => "__builtin_lsx_vfrsqrte_d",
+    "llvm.loongarch.lsx.vfrsqrte.s" => "__builtin_lsx_vfrsqrte_s",
+    "llvm.loongarch.lsx.vfrstp.b" => "__builtin_lsx_vfrstp_b",
+    "llvm.loongarch.lsx.vfrstp.h" => "__builtin_lsx_vfrstp_h",
+    "llvm.loongarch.lsx.vfrstpi.b" => "__builtin_lsx_vfrstpi_b",
+    "llvm.loongarch.lsx.vfrstpi.h" => "__builtin_lsx_vfrstpi_h",
+    "llvm.loongarch.lsx.vfsqrt.d" => "__builtin_lsx_vfsqrt_d",
+    "llvm.loongarch.lsx.vfsqrt.s" => "__builtin_lsx_vfsqrt_s",
+    "llvm.loongarch.lsx.vfsub.d" => "__builtin_lsx_vfsub_d",
+    "llvm.loongarch.lsx.vfsub.s" => "__builtin_lsx_vfsub_s",
+    "llvm.loongarch.lsx.vftint.l.d" => "__builtin_lsx_vftint_l_d",
+    "llvm.loongarch.lsx.vftint.lu.d" => "__builtin_lsx_vftint_lu_d",
+    "llvm.loongarch.lsx.vftint.w.d" => "__builtin_lsx_vftint_w_d",
+    "llvm.loongarch.lsx.vftint.w.s" => "__builtin_lsx_vftint_w_s",
+    "llvm.loongarch.lsx.vftint.wu.s" => "__builtin_lsx_vftint_wu_s",
+    "llvm.loongarch.lsx.vftinth.l.s" => "__builtin_lsx_vftinth_l_s",
+    "llvm.loongarch.lsx.vftintl.l.s" => "__builtin_lsx_vftintl_l_s",
+    "llvm.loongarch.lsx.vftintrm.l.d" => "__builtin_lsx_vftintrm_l_d",
+    "llvm.loongarch.lsx.vftintrm.w.d" => "__builtin_lsx_vftintrm_w_d",
+    "llvm.loongarch.lsx.vftintrm.w.s" => "__builtin_lsx_vftintrm_w_s",
+    "llvm.loongarch.lsx.vftintrmh.l.s" => "__builtin_lsx_vftintrmh_l_s",
+    "llvm.loongarch.lsx.vftintrml.l.s" => "__builtin_lsx_vftintrml_l_s",
+    "llvm.loongarch.lsx.vftintrne.l.d" => "__builtin_lsx_vftintrne_l_d",
+    "llvm.loongarch.lsx.vftintrne.w.d" => "__builtin_lsx_vftintrne_w_d",
+    "llvm.loongarch.lsx.vftintrne.w.s" => "__builtin_lsx_vftintrne_w_s",
+    "llvm.loongarch.lsx.vftintrneh.l.s" => "__builtin_lsx_vftintrneh_l_s",
+    "llvm.loongarch.lsx.vftintrnel.l.s" => "__builtin_lsx_vftintrnel_l_s",
+    "llvm.loongarch.lsx.vftintrp.l.d" => "__builtin_lsx_vftintrp_l_d",
+    "llvm.loongarch.lsx.vftintrp.w.d" => "__builtin_lsx_vftintrp_w_d",
+    "llvm.loongarch.lsx.vftintrp.w.s" => "__builtin_lsx_vftintrp_w_s",
+    "llvm.loongarch.lsx.vftintrph.l.s" => "__builtin_lsx_vftintrph_l_s",
+    "llvm.loongarch.lsx.vftintrpl.l.s" => "__builtin_lsx_vftintrpl_l_s",
+    "llvm.loongarch.lsx.vftintrz.l.d" => "__builtin_lsx_vftintrz_l_d",
+    "llvm.loongarch.lsx.vftintrz.lu.d" => "__builtin_lsx_vftintrz_lu_d",
+    "llvm.loongarch.lsx.vftintrz.w.d" => "__builtin_lsx_vftintrz_w_d",
+    "llvm.loongarch.lsx.vftintrz.w.s" => "__builtin_lsx_vftintrz_w_s",
+    "llvm.loongarch.lsx.vftintrz.wu.s" => "__builtin_lsx_vftintrz_wu_s",
+    "llvm.loongarch.lsx.vftintrzh.l.s" => "__builtin_lsx_vftintrzh_l_s",
+    "llvm.loongarch.lsx.vftintrzl.l.s" => "__builtin_lsx_vftintrzl_l_s",
+    "llvm.loongarch.lsx.vhaddw.d.w" => "__builtin_lsx_vhaddw_d_w",
+    "llvm.loongarch.lsx.vhaddw.du.wu" => "__builtin_lsx_vhaddw_du_wu",
+    "llvm.loongarch.lsx.vhaddw.h.b" => "__builtin_lsx_vhaddw_h_b",
+    "llvm.loongarch.lsx.vhaddw.hu.bu" => "__builtin_lsx_vhaddw_hu_bu",
+    "llvm.loongarch.lsx.vhaddw.q.d" => "__builtin_lsx_vhaddw_q_d",
+    "llvm.loongarch.lsx.vhaddw.qu.du" => "__builtin_lsx_vhaddw_qu_du",
+    "llvm.loongarch.lsx.vhaddw.w.h" => "__builtin_lsx_vhaddw_w_h",
+    "llvm.loongarch.lsx.vhaddw.wu.hu" => "__builtin_lsx_vhaddw_wu_hu",
+    "llvm.loongarch.lsx.vhsubw.d.w" => "__builtin_lsx_vhsubw_d_w",
+    "llvm.loongarch.lsx.vhsubw.du.wu" => "__builtin_lsx_vhsubw_du_wu",
+    "llvm.loongarch.lsx.vhsubw.h.b" => "__builtin_lsx_vhsubw_h_b",
+    "llvm.loongarch.lsx.vhsubw.hu.bu" => "__builtin_lsx_vhsubw_hu_bu",
+    "llvm.loongarch.lsx.vhsubw.q.d" => "__builtin_lsx_vhsubw_q_d",
+    "llvm.loongarch.lsx.vhsubw.qu.du" => "__builtin_lsx_vhsubw_qu_du",
+    "llvm.loongarch.lsx.vhsubw.w.h" => "__builtin_lsx_vhsubw_w_h",
+    "llvm.loongarch.lsx.vhsubw.wu.hu" => "__builtin_lsx_vhsubw_wu_hu",
+    "llvm.loongarch.lsx.vilvh.b" => "__builtin_lsx_vilvh_b",
+    "llvm.loongarch.lsx.vilvh.d" => "__builtin_lsx_vilvh_d",
+    "llvm.loongarch.lsx.vilvh.h" => "__builtin_lsx_vilvh_h",
+    "llvm.loongarch.lsx.vilvh.w" => "__builtin_lsx_vilvh_w",
+    "llvm.loongarch.lsx.vilvl.b" => "__builtin_lsx_vilvl_b",
+    "llvm.loongarch.lsx.vilvl.d" => "__builtin_lsx_vilvl_d",
+    "llvm.loongarch.lsx.vilvl.h" => "__builtin_lsx_vilvl_h",
+    "llvm.loongarch.lsx.vilvl.w" => "__builtin_lsx_vilvl_w",
+    "llvm.loongarch.lsx.vinsgr2vr.b" => "__builtin_lsx_vinsgr2vr_b",
+    "llvm.loongarch.lsx.vinsgr2vr.d" => "__builtin_lsx_vinsgr2vr_d",
+    "llvm.loongarch.lsx.vinsgr2vr.h" => "__builtin_lsx_vinsgr2vr_h",
+    "llvm.loongarch.lsx.vinsgr2vr.w" => "__builtin_lsx_vinsgr2vr_w",
+    "llvm.loongarch.lsx.vld" => "__builtin_lsx_vld",
+    "llvm.loongarch.lsx.vldi" => "__builtin_lsx_vldi",
+    "llvm.loongarch.lsx.vldrepl.b" => "__builtin_lsx_vldrepl_b",
+    "llvm.loongarch.lsx.vldrepl.d" => "__builtin_lsx_vldrepl_d",
+    "llvm.loongarch.lsx.vldrepl.h" => "__builtin_lsx_vldrepl_h",
+    "llvm.loongarch.lsx.vldrepl.w" => "__builtin_lsx_vldrepl_w",
+    "llvm.loongarch.lsx.vldx" => "__builtin_lsx_vldx",
+    "llvm.loongarch.lsx.vmadd.b" => "__builtin_lsx_vmadd_b",
+    "llvm.loongarch.lsx.vmadd.d" => "__builtin_lsx_vmadd_d",
+    "llvm.loongarch.lsx.vmadd.h" => "__builtin_lsx_vmadd_h",
+    "llvm.loongarch.lsx.vmadd.w" => "__builtin_lsx_vmadd_w",
+    "llvm.loongarch.lsx.vmaddwev.d.w" => "__builtin_lsx_vmaddwev_d_w",
+    "llvm.loongarch.lsx.vmaddwev.d.wu" => "__builtin_lsx_vmaddwev_d_wu",
+    "llvm.loongarch.lsx.vmaddwev.d.wu.w" => "__builtin_lsx_vmaddwev_d_wu_w",
+    "llvm.loongarch.lsx.vmaddwev.h.b" => "__builtin_lsx_vmaddwev_h_b",
+    "llvm.loongarch.lsx.vmaddwev.h.bu" => "__builtin_lsx_vmaddwev_h_bu",
+    "llvm.loongarch.lsx.vmaddwev.h.bu.b" => "__builtin_lsx_vmaddwev_h_bu_b",
+    "llvm.loongarch.lsx.vmaddwev.q.d" => "__builtin_lsx_vmaddwev_q_d",
+    "llvm.loongarch.lsx.vmaddwev.q.du" => "__builtin_lsx_vmaddwev_q_du",
+    "llvm.loongarch.lsx.vmaddwev.q.du.d" => "__builtin_lsx_vmaddwev_q_du_d",
+    "llvm.loongarch.lsx.vmaddwev.w.h" => "__builtin_lsx_vmaddwev_w_h",
+    "llvm.loongarch.lsx.vmaddwev.w.hu" => "__builtin_lsx_vmaddwev_w_hu",
+    "llvm.loongarch.lsx.vmaddwev.w.hu.h" => "__builtin_lsx_vmaddwev_w_hu_h",
+    "llvm.loongarch.lsx.vmaddwod.d.w" => "__builtin_lsx_vmaddwod_d_w",
+    "llvm.loongarch.lsx.vmaddwod.d.wu" => "__builtin_lsx_vmaddwod_d_wu",
+    "llvm.loongarch.lsx.vmaddwod.d.wu.w" => "__builtin_lsx_vmaddwod_d_wu_w",
+    "llvm.loongarch.lsx.vmaddwod.h.b" => "__builtin_lsx_vmaddwod_h_b",
+    "llvm.loongarch.lsx.vmaddwod.h.bu" => "__builtin_lsx_vmaddwod_h_bu",
+    "llvm.loongarch.lsx.vmaddwod.h.bu.b" => "__builtin_lsx_vmaddwod_h_bu_b",
+    "llvm.loongarch.lsx.vmaddwod.q.d" => "__builtin_lsx_vmaddwod_q_d",
+    "llvm.loongarch.lsx.vmaddwod.q.du" => "__builtin_lsx_vmaddwod_q_du",
+    "llvm.loongarch.lsx.vmaddwod.q.du.d" => "__builtin_lsx_vmaddwod_q_du_d",
+    "llvm.loongarch.lsx.vmaddwod.w.h" => "__builtin_lsx_vmaddwod_w_h",
+    "llvm.loongarch.lsx.vmaddwod.w.hu" => "__builtin_lsx_vmaddwod_w_hu",
+    "llvm.loongarch.lsx.vmaddwod.w.hu.h" => "__builtin_lsx_vmaddwod_w_hu_h",
+    "llvm.loongarch.lsx.vmax.b" => "__builtin_lsx_vmax_b",
+    "llvm.loongarch.lsx.vmax.bu" => "__builtin_lsx_vmax_bu",
+    "llvm.loongarch.lsx.vmax.d" => "__builtin_lsx_vmax_d",
+    "llvm.loongarch.lsx.vmax.du" => "__builtin_lsx_vmax_du",
+    "llvm.loongarch.lsx.vmax.h" => "__builtin_lsx_vmax_h",
+    "llvm.loongarch.lsx.vmax.hu" => "__builtin_lsx_vmax_hu",
+    "llvm.loongarch.lsx.vmax.w" => "__builtin_lsx_vmax_w",
+    "llvm.loongarch.lsx.vmax.wu" => "__builtin_lsx_vmax_wu",
+    "llvm.loongarch.lsx.vmaxi.b" => "__builtin_lsx_vmaxi_b",
+    "llvm.loongarch.lsx.vmaxi.bu" => "__builtin_lsx_vmaxi_bu",
+    "llvm.loongarch.lsx.vmaxi.d" => "__builtin_lsx_vmaxi_d",
+    "llvm.loongarch.lsx.vmaxi.du" => "__builtin_lsx_vmaxi_du",
+    "llvm.loongarch.lsx.vmaxi.h" => "__builtin_lsx_vmaxi_h",
+    "llvm.loongarch.lsx.vmaxi.hu" => "__builtin_lsx_vmaxi_hu",
+    "llvm.loongarch.lsx.vmaxi.w" => "__builtin_lsx_vmaxi_w",
+    "llvm.loongarch.lsx.vmaxi.wu" => "__builtin_lsx_vmaxi_wu",
+    "llvm.loongarch.lsx.vmin.b" => "__builtin_lsx_vmin_b",
+    "llvm.loongarch.lsx.vmin.bu" => "__builtin_lsx_vmin_bu",
+    "llvm.loongarch.lsx.vmin.d" => "__builtin_lsx_vmin_d",
+    "llvm.loongarch.lsx.vmin.du" => "__builtin_lsx_vmin_du",
+    "llvm.loongarch.lsx.vmin.h" => "__builtin_lsx_vmin_h",
+    "llvm.loongarch.lsx.vmin.hu" => "__builtin_lsx_vmin_hu",
+    "llvm.loongarch.lsx.vmin.w" => "__builtin_lsx_vmin_w",
+    "llvm.loongarch.lsx.vmin.wu" => "__builtin_lsx_vmin_wu",
+    "llvm.loongarch.lsx.vmini.b" => "__builtin_lsx_vmini_b",
+    "llvm.loongarch.lsx.vmini.bu" => "__builtin_lsx_vmini_bu",
+    "llvm.loongarch.lsx.vmini.d" => "__builtin_lsx_vmini_d",
+    "llvm.loongarch.lsx.vmini.du" => "__builtin_lsx_vmini_du",
+    "llvm.loongarch.lsx.vmini.h" => "__builtin_lsx_vmini_h",
+    "llvm.loongarch.lsx.vmini.hu" => "__builtin_lsx_vmini_hu",
+    "llvm.loongarch.lsx.vmini.w" => "__builtin_lsx_vmini_w",
+    "llvm.loongarch.lsx.vmini.wu" => "__builtin_lsx_vmini_wu",
+    "llvm.loongarch.lsx.vmod.b" => "__builtin_lsx_vmod_b",
+    "llvm.loongarch.lsx.vmod.bu" => "__builtin_lsx_vmod_bu",
+    "llvm.loongarch.lsx.vmod.d" => "__builtin_lsx_vmod_d",
+    "llvm.loongarch.lsx.vmod.du" => "__builtin_lsx_vmod_du",
+    "llvm.loongarch.lsx.vmod.h" => "__builtin_lsx_vmod_h",
+    "llvm.loongarch.lsx.vmod.hu" => "__builtin_lsx_vmod_hu",
+    "llvm.loongarch.lsx.vmod.w" => "__builtin_lsx_vmod_w",
+    "llvm.loongarch.lsx.vmod.wu" => "__builtin_lsx_vmod_wu",
+    "llvm.loongarch.lsx.vmskgez.b" => "__builtin_lsx_vmskgez_b",
+    "llvm.loongarch.lsx.vmskltz.b" => "__builtin_lsx_vmskltz_b",
+    "llvm.loongarch.lsx.vmskltz.d" => "__builtin_lsx_vmskltz_d",
+    "llvm.loongarch.lsx.vmskltz.h" => "__builtin_lsx_vmskltz_h",
+    "llvm.loongarch.lsx.vmskltz.w" => "__builtin_lsx_vmskltz_w",
+    "llvm.loongarch.lsx.vmsknz.b" => "__builtin_lsx_vmsknz_b",
+    "llvm.loongarch.lsx.vmsub.b" => "__builtin_lsx_vmsub_b",
+    "llvm.loongarch.lsx.vmsub.d" => "__builtin_lsx_vmsub_d",
+    "llvm.loongarch.lsx.vmsub.h" => "__builtin_lsx_vmsub_h",
+    "llvm.loongarch.lsx.vmsub.w" => "__builtin_lsx_vmsub_w",
+    "llvm.loongarch.lsx.vmuh.b" => "__builtin_lsx_vmuh_b",
+    "llvm.loongarch.lsx.vmuh.bu" => "__builtin_lsx_vmuh_bu",
+    "llvm.loongarch.lsx.vmuh.d" => "__builtin_lsx_vmuh_d",
+    "llvm.loongarch.lsx.vmuh.du" => "__builtin_lsx_vmuh_du",
+    "llvm.loongarch.lsx.vmuh.h" => "__builtin_lsx_vmuh_h",
+    "llvm.loongarch.lsx.vmuh.hu" => "__builtin_lsx_vmuh_hu",
+    "llvm.loongarch.lsx.vmuh.w" => "__builtin_lsx_vmuh_w",
+    "llvm.loongarch.lsx.vmuh.wu" => "__builtin_lsx_vmuh_wu",
+    "llvm.loongarch.lsx.vmul.b" => "__builtin_lsx_vmul_b",
+    "llvm.loongarch.lsx.vmul.d" => "__builtin_lsx_vmul_d",
+    "llvm.loongarch.lsx.vmul.h" => "__builtin_lsx_vmul_h",
+    "llvm.loongarch.lsx.vmul.w" => "__builtin_lsx_vmul_w",
+    "llvm.loongarch.lsx.vmulwev.d.w" => "__builtin_lsx_vmulwev_d_w",
+    "llvm.loongarch.lsx.vmulwev.d.wu" => "__builtin_lsx_vmulwev_d_wu",
+    "llvm.loongarch.lsx.vmulwev.d.wu.w" => "__builtin_lsx_vmulwev_d_wu_w",
+    "llvm.loongarch.lsx.vmulwev.h.b" => "__builtin_lsx_vmulwev_h_b",
+    "llvm.loongarch.lsx.vmulwev.h.bu" => "__builtin_lsx_vmulwev_h_bu",
+    "llvm.loongarch.lsx.vmulwev.h.bu.b" => "__builtin_lsx_vmulwev_h_bu_b",
+    "llvm.loongarch.lsx.vmulwev.q.d" => "__builtin_lsx_vmulwev_q_d",
+    "llvm.loongarch.lsx.vmulwev.q.du" => "__builtin_lsx_vmulwev_q_du",
+    "llvm.loongarch.lsx.vmulwev.q.du.d" => "__builtin_lsx_vmulwev_q_du_d",
+    "llvm.loongarch.lsx.vmulwev.w.h" => "__builtin_lsx_vmulwev_w_h",
+    "llvm.loongarch.lsx.vmulwev.w.hu" => "__builtin_lsx_vmulwev_w_hu",
+    "llvm.loongarch.lsx.vmulwev.w.hu.h" => "__builtin_lsx_vmulwev_w_hu_h",
+    "llvm.loongarch.lsx.vmulwod.d.w" => "__builtin_lsx_vmulwod_d_w",
+    "llvm.loongarch.lsx.vmulwod.d.wu" => "__builtin_lsx_vmulwod_d_wu",
+    "llvm.loongarch.lsx.vmulwod.d.wu.w" => "__builtin_lsx_vmulwod_d_wu_w",
+    "llvm.loongarch.lsx.vmulwod.h.b" => "__builtin_lsx_vmulwod_h_b",
+    "llvm.loongarch.lsx.vmulwod.h.bu" => "__builtin_lsx_vmulwod_h_bu",
+    "llvm.loongarch.lsx.vmulwod.h.bu.b" => "__builtin_lsx_vmulwod_h_bu_b",
+    "llvm.loongarch.lsx.vmulwod.q.d" => "__builtin_lsx_vmulwod_q_d",
+    "llvm.loongarch.lsx.vmulwod.q.du" => "__builtin_lsx_vmulwod_q_du",
+    "llvm.loongarch.lsx.vmulwod.q.du.d" => "__builtin_lsx_vmulwod_q_du_d",
+    "llvm.loongarch.lsx.vmulwod.w.h" => "__builtin_lsx_vmulwod_w_h",
+    "llvm.loongarch.lsx.vmulwod.w.hu" => "__builtin_lsx_vmulwod_w_hu",
+    "llvm.loongarch.lsx.vmulwod.w.hu.h" => "__builtin_lsx_vmulwod_w_hu_h",
+    "llvm.loongarch.lsx.vneg.b" => "__builtin_lsx_vneg_b",
+    "llvm.loongarch.lsx.vneg.d" => "__builtin_lsx_vneg_d",
+    "llvm.loongarch.lsx.vneg.h" => "__builtin_lsx_vneg_h",
+    "llvm.loongarch.lsx.vneg.w" => "__builtin_lsx_vneg_w",
+    "llvm.loongarch.lsx.vnor.v" => "__builtin_lsx_vnor_v",
+    "llvm.loongarch.lsx.vnori.b" => "__builtin_lsx_vnori_b",
+    "llvm.loongarch.lsx.vor.v" => "__builtin_lsx_vor_v",
+    "llvm.loongarch.lsx.vori.b" => "__builtin_lsx_vori_b",
+    "llvm.loongarch.lsx.vorn.v" => "__builtin_lsx_vorn_v",
+    "llvm.loongarch.lsx.vpackev.b" => "__builtin_lsx_vpackev_b",
+    "llvm.loongarch.lsx.vpackev.d" => "__builtin_lsx_vpackev_d",
+    "llvm.loongarch.lsx.vpackev.h" => "__builtin_lsx_vpackev_h",
+    "llvm.loongarch.lsx.vpackev.w" => "__builtin_lsx_vpackev_w",
+    "llvm.loongarch.lsx.vpackod.b" => "__builtin_lsx_vpackod_b",
+    "llvm.loongarch.lsx.vpackod.d" => "__builtin_lsx_vpackod_d",
+    "llvm.loongarch.lsx.vpackod.h" => "__builtin_lsx_vpackod_h",
+    "llvm.loongarch.lsx.vpackod.w" => "__builtin_lsx_vpackod_w",
+    "llvm.loongarch.lsx.vpcnt.b" => "__builtin_lsx_vpcnt_b",
+    "llvm.loongarch.lsx.vpcnt.d" => "__builtin_lsx_vpcnt_d",
+    "llvm.loongarch.lsx.vpcnt.h" => "__builtin_lsx_vpcnt_h",
+    "llvm.loongarch.lsx.vpcnt.w" => "__builtin_lsx_vpcnt_w",
+    "llvm.loongarch.lsx.vpermi.w" => "__builtin_lsx_vpermi_w",
+    "llvm.loongarch.lsx.vpickev.b" => "__builtin_lsx_vpickev_b",
+    "llvm.loongarch.lsx.vpickev.d" => "__builtin_lsx_vpickev_d",
+    "llvm.loongarch.lsx.vpickev.h" => "__builtin_lsx_vpickev_h",
+    "llvm.loongarch.lsx.vpickev.w" => "__builtin_lsx_vpickev_w",
+    "llvm.loongarch.lsx.vpickod.b" => "__builtin_lsx_vpickod_b",
+    "llvm.loongarch.lsx.vpickod.d" => "__builtin_lsx_vpickod_d",
+    "llvm.loongarch.lsx.vpickod.h" => "__builtin_lsx_vpickod_h",
+    "llvm.loongarch.lsx.vpickod.w" => "__builtin_lsx_vpickod_w",
+    "llvm.loongarch.lsx.vpickve2gr.b" => "__builtin_lsx_vpickve2gr_b",
+    "llvm.loongarch.lsx.vpickve2gr.bu" => "__builtin_lsx_vpickve2gr_bu",
+    "llvm.loongarch.lsx.vpickve2gr.d" => "__builtin_lsx_vpickve2gr_d",
+    "llvm.loongarch.lsx.vpickve2gr.du" => "__builtin_lsx_vpickve2gr_du",
+    "llvm.loongarch.lsx.vpickve2gr.h" => "__builtin_lsx_vpickve2gr_h",
+    "llvm.loongarch.lsx.vpickve2gr.hu" => "__builtin_lsx_vpickve2gr_hu",
+    "llvm.loongarch.lsx.vpickve2gr.w" => "__builtin_lsx_vpickve2gr_w",
+    "llvm.loongarch.lsx.vpickve2gr.wu" => "__builtin_lsx_vpickve2gr_wu",
+    "llvm.loongarch.lsx.vreplgr2vr.b" => "__builtin_lsx_vreplgr2vr_b",
+    "llvm.loongarch.lsx.vreplgr2vr.d" => "__builtin_lsx_vreplgr2vr_d",
+    "llvm.loongarch.lsx.vreplgr2vr.h" => "__builtin_lsx_vreplgr2vr_h",
+    "llvm.loongarch.lsx.vreplgr2vr.w" => "__builtin_lsx_vreplgr2vr_w",
+    "llvm.loongarch.lsx.vrepli.b" => "__builtin_lsx_vrepli_b",
+    "llvm.loongarch.lsx.vrepli.d" => "__builtin_lsx_vrepli_d",
+    "llvm.loongarch.lsx.vrepli.h" => "__builtin_lsx_vrepli_h",
+    "llvm.loongarch.lsx.vrepli.w" => "__builtin_lsx_vrepli_w",
+    "llvm.loongarch.lsx.vreplve.b" => "__builtin_lsx_vreplve_b",
+    "llvm.loongarch.lsx.vreplve.d" => "__builtin_lsx_vreplve_d",
+    "llvm.loongarch.lsx.vreplve.h" => "__builtin_lsx_vreplve_h",
+    "llvm.loongarch.lsx.vreplve.w" => "__builtin_lsx_vreplve_w",
+    "llvm.loongarch.lsx.vreplvei.b" => "__builtin_lsx_vreplvei_b",
+    "llvm.loongarch.lsx.vreplvei.d" => "__builtin_lsx_vreplvei_d",
+    "llvm.loongarch.lsx.vreplvei.h" => "__builtin_lsx_vreplvei_h",
+    "llvm.loongarch.lsx.vreplvei.w" => "__builtin_lsx_vreplvei_w",
+    "llvm.loongarch.lsx.vrotr.b" => "__builtin_lsx_vrotr_b",
+    "llvm.loongarch.lsx.vrotr.d" => "__builtin_lsx_vrotr_d",
+    "llvm.loongarch.lsx.vrotr.h" => "__builtin_lsx_vrotr_h",
+    "llvm.loongarch.lsx.vrotr.w" => "__builtin_lsx_vrotr_w",
+    "llvm.loongarch.lsx.vrotri.b" => "__builtin_lsx_vrotri_b",
+    "llvm.loongarch.lsx.vrotri.d" => "__builtin_lsx_vrotri_d",
+    "llvm.loongarch.lsx.vrotri.h" => "__builtin_lsx_vrotri_h",
+    "llvm.loongarch.lsx.vrotri.w" => "__builtin_lsx_vrotri_w",
+    "llvm.loongarch.lsx.vsadd.b" => "__builtin_lsx_vsadd_b",
+    "llvm.loongarch.lsx.vsadd.bu" => "__builtin_lsx_vsadd_bu",
+    "llvm.loongarch.lsx.vsadd.d" => "__builtin_lsx_vsadd_d",
+    "llvm.loongarch.lsx.vsadd.du" => "__builtin_lsx_vsadd_du",
+    "llvm.loongarch.lsx.vsadd.h" => "__builtin_lsx_vsadd_h",
+    "llvm.loongarch.lsx.vsadd.hu" => "__builtin_lsx_vsadd_hu",
+    "llvm.loongarch.lsx.vsadd.w" => "__builtin_lsx_vsadd_w",
+    "llvm.loongarch.lsx.vsadd.wu" => "__builtin_lsx_vsadd_wu",
+    "llvm.loongarch.lsx.vsat.b" => "__builtin_lsx_vsat_b",
+    "llvm.loongarch.lsx.vsat.bu" => "__builtin_lsx_vsat_bu",
+    "llvm.loongarch.lsx.vsat.d" => "__builtin_lsx_vsat_d",
+    "llvm.loongarch.lsx.vsat.du" => "__builtin_lsx_vsat_du",
+    "llvm.loongarch.lsx.vsat.h" => "__builtin_lsx_vsat_h",
+    "llvm.loongarch.lsx.vsat.hu" => "__builtin_lsx_vsat_hu",
+    "llvm.loongarch.lsx.vsat.w" => "__builtin_lsx_vsat_w",
+    "llvm.loongarch.lsx.vsat.wu" => "__builtin_lsx_vsat_wu",
+    "llvm.loongarch.lsx.vseq.b" => "__builtin_lsx_vseq_b",
+    "llvm.loongarch.lsx.vseq.d" => "__builtin_lsx_vseq_d",
+    "llvm.loongarch.lsx.vseq.h" => "__builtin_lsx_vseq_h",
+    "llvm.loongarch.lsx.vseq.w" => "__builtin_lsx_vseq_w",
+    "llvm.loongarch.lsx.vseqi.b" => "__builtin_lsx_vseqi_b",
+    "llvm.loongarch.lsx.vseqi.d" => "__builtin_lsx_vseqi_d",
+    "llvm.loongarch.lsx.vseqi.h" => "__builtin_lsx_vseqi_h",
+    "llvm.loongarch.lsx.vseqi.w" => "__builtin_lsx_vseqi_w",
+    "llvm.loongarch.lsx.vshuf.b" => "__builtin_lsx_vshuf_b",
+    "llvm.loongarch.lsx.vshuf.d" => "__builtin_lsx_vshuf_d",
+    "llvm.loongarch.lsx.vshuf.h" => "__builtin_lsx_vshuf_h",
+    "llvm.loongarch.lsx.vshuf.w" => "__builtin_lsx_vshuf_w",
+    "llvm.loongarch.lsx.vshuf4i.b" => "__builtin_lsx_vshuf4i_b",
+    "llvm.loongarch.lsx.vshuf4i.d" => "__builtin_lsx_vshuf4i_d",
+    "llvm.loongarch.lsx.vshuf4i.h" => "__builtin_lsx_vshuf4i_h",
+    "llvm.loongarch.lsx.vshuf4i.w" => "__builtin_lsx_vshuf4i_w",
+    "llvm.loongarch.lsx.vsigncov.b" => "__builtin_lsx_vsigncov_b",
+    "llvm.loongarch.lsx.vsigncov.d" => "__builtin_lsx_vsigncov_d",
+    "llvm.loongarch.lsx.vsigncov.h" => "__builtin_lsx_vsigncov_h",
+    "llvm.loongarch.lsx.vsigncov.w" => "__builtin_lsx_vsigncov_w",
+    "llvm.loongarch.lsx.vsle.b" => "__builtin_lsx_vsle_b",
+    "llvm.loongarch.lsx.vsle.bu" => "__builtin_lsx_vsle_bu",
+    "llvm.loongarch.lsx.vsle.d" => "__builtin_lsx_vsle_d",
+    "llvm.loongarch.lsx.vsle.du" => "__builtin_lsx_vsle_du",
+    "llvm.loongarch.lsx.vsle.h" => "__builtin_lsx_vsle_h",
+    "llvm.loongarch.lsx.vsle.hu" => "__builtin_lsx_vsle_hu",
+    "llvm.loongarch.lsx.vsle.w" => "__builtin_lsx_vsle_w",
+    "llvm.loongarch.lsx.vsle.wu" => "__builtin_lsx_vsle_wu",
+    "llvm.loongarch.lsx.vslei.b" => "__builtin_lsx_vslei_b",
+    "llvm.loongarch.lsx.vslei.bu" => "__builtin_lsx_vslei_bu",
+    "llvm.loongarch.lsx.vslei.d" => "__builtin_lsx_vslei_d",
+    "llvm.loongarch.lsx.vslei.du" => "__builtin_lsx_vslei_du",
+    "llvm.loongarch.lsx.vslei.h" => "__builtin_lsx_vslei_h",
+    "llvm.loongarch.lsx.vslei.hu" => "__builtin_lsx_vslei_hu",
+    "llvm.loongarch.lsx.vslei.w" => "__builtin_lsx_vslei_w",
+    "llvm.loongarch.lsx.vslei.wu" => "__builtin_lsx_vslei_wu",
+    "llvm.loongarch.lsx.vsll.b" => "__builtin_lsx_vsll_b",
+    "llvm.loongarch.lsx.vsll.d" => "__builtin_lsx_vsll_d",
+    "llvm.loongarch.lsx.vsll.h" => "__builtin_lsx_vsll_h",
+    "llvm.loongarch.lsx.vsll.w" => "__builtin_lsx_vsll_w",
+    "llvm.loongarch.lsx.vslli.b" => "__builtin_lsx_vslli_b",
+    "llvm.loongarch.lsx.vslli.d" => "__builtin_lsx_vslli_d",
+    "llvm.loongarch.lsx.vslli.h" => "__builtin_lsx_vslli_h",
+    "llvm.loongarch.lsx.vslli.w" => "__builtin_lsx_vslli_w",
+    "llvm.loongarch.lsx.vsllwil.d.w" => "__builtin_lsx_vsllwil_d_w",
+    "llvm.loongarch.lsx.vsllwil.du.wu" => "__builtin_lsx_vsllwil_du_wu",
+    "llvm.loongarch.lsx.vsllwil.h.b" => "__builtin_lsx_vsllwil_h_b",
+    "llvm.loongarch.lsx.vsllwil.hu.bu" => "__builtin_lsx_vsllwil_hu_bu",
+    "llvm.loongarch.lsx.vsllwil.w.h" => "__builtin_lsx_vsllwil_w_h",
+    "llvm.loongarch.lsx.vsllwil.wu.hu" => "__builtin_lsx_vsllwil_wu_hu",
+    "llvm.loongarch.lsx.vslt.b" => "__builtin_lsx_vslt_b",
+    "llvm.loongarch.lsx.vslt.bu" => "__builtin_lsx_vslt_bu",
+    "llvm.loongarch.lsx.vslt.d" => "__builtin_lsx_vslt_d",
+    "llvm.loongarch.lsx.vslt.du" => "__builtin_lsx_vslt_du",
+    "llvm.loongarch.lsx.vslt.h" => "__builtin_lsx_vslt_h",
+    "llvm.loongarch.lsx.vslt.hu" => "__builtin_lsx_vslt_hu",
+    "llvm.loongarch.lsx.vslt.w" => "__builtin_lsx_vslt_w",
+    "llvm.loongarch.lsx.vslt.wu" => "__builtin_lsx_vslt_wu",
+    "llvm.loongarch.lsx.vslti.b" => "__builtin_lsx_vslti_b",
+    "llvm.loongarch.lsx.vslti.bu" => "__builtin_lsx_vslti_bu",
+    "llvm.loongarch.lsx.vslti.d" => "__builtin_lsx_vslti_d",
+    "llvm.loongarch.lsx.vslti.du" => "__builtin_lsx_vslti_du",
+    "llvm.loongarch.lsx.vslti.h" => "__builtin_lsx_vslti_h",
+    "llvm.loongarch.lsx.vslti.hu" => "__builtin_lsx_vslti_hu",
+    "llvm.loongarch.lsx.vslti.w" => "__builtin_lsx_vslti_w",
+    "llvm.loongarch.lsx.vslti.wu" => "__builtin_lsx_vslti_wu",
+    "llvm.loongarch.lsx.vsra.b" => "__builtin_lsx_vsra_b",
+    "llvm.loongarch.lsx.vsra.d" => "__builtin_lsx_vsra_d",
+    "llvm.loongarch.lsx.vsra.h" => "__builtin_lsx_vsra_h",
+    "llvm.loongarch.lsx.vsra.w" => "__builtin_lsx_vsra_w",
+    "llvm.loongarch.lsx.vsrai.b" => "__builtin_lsx_vsrai_b",
+    "llvm.loongarch.lsx.vsrai.d" => "__builtin_lsx_vsrai_d",
+    "llvm.loongarch.lsx.vsrai.h" => "__builtin_lsx_vsrai_h",
+    "llvm.loongarch.lsx.vsrai.w" => "__builtin_lsx_vsrai_w",
+    "llvm.loongarch.lsx.vsran.b.h" => "__builtin_lsx_vsran_b_h",
+    "llvm.loongarch.lsx.vsran.h.w" => "__builtin_lsx_vsran_h_w",
+    "llvm.loongarch.lsx.vsran.w.d" => "__builtin_lsx_vsran_w_d",
+    "llvm.loongarch.lsx.vsrani.b.h" => "__builtin_lsx_vsrani_b_h",
+    "llvm.loongarch.lsx.vsrani.d.q" => "__builtin_lsx_vsrani_d_q",
+    "llvm.loongarch.lsx.vsrani.h.w" => "__builtin_lsx_vsrani_h_w",
+    "llvm.loongarch.lsx.vsrani.w.d" => "__builtin_lsx_vsrani_w_d",
+    "llvm.loongarch.lsx.vsrar.b" => "__builtin_lsx_vsrar_b",
+    "llvm.loongarch.lsx.vsrar.d" => "__builtin_lsx_vsrar_d",
+    "llvm.loongarch.lsx.vsrar.h" => "__builtin_lsx_vsrar_h",
+    "llvm.loongarch.lsx.vsrar.w" => "__builtin_lsx_vsrar_w",
+    "llvm.loongarch.lsx.vsrari.b" => "__builtin_lsx_vsrari_b",
+    "llvm.loongarch.lsx.vsrari.d" => "__builtin_lsx_vsrari_d",
+    "llvm.loongarch.lsx.vsrari.h" => "__builtin_lsx_vsrari_h",
+    "llvm.loongarch.lsx.vsrari.w" => "__builtin_lsx_vsrari_w",
+    "llvm.loongarch.lsx.vsrarn.b.h" => "__builtin_lsx_vsrarn_b_h",
+    "llvm.loongarch.lsx.vsrarn.h.w" => "__builtin_lsx_vsrarn_h_w",
+    "llvm.loongarch.lsx.vsrarn.w.d" => "__builtin_lsx_vsrarn_w_d",
+    "llvm.loongarch.lsx.vsrarni.b.h" => "__builtin_lsx_vsrarni_b_h",
+    "llvm.loongarch.lsx.vsrarni.d.q" => "__builtin_lsx_vsrarni_d_q",
+    "llvm.loongarch.lsx.vsrarni.h.w" => "__builtin_lsx_vsrarni_h_w",
+    "llvm.loongarch.lsx.vsrarni.w.d" => "__builtin_lsx_vsrarni_w_d",
+    "llvm.loongarch.lsx.vsrl.b" => "__builtin_lsx_vsrl_b",
+    "llvm.loongarch.lsx.vsrl.d" => "__builtin_lsx_vsrl_d",
+    "llvm.loongarch.lsx.vsrl.h" => "__builtin_lsx_vsrl_h",
+    "llvm.loongarch.lsx.vsrl.w" => "__builtin_lsx_vsrl_w",
+    "llvm.loongarch.lsx.vsrli.b" => "__builtin_lsx_vsrli_b",
+    "llvm.loongarch.lsx.vsrli.d" => "__builtin_lsx_vsrli_d",
+    "llvm.loongarch.lsx.vsrli.h" => "__builtin_lsx_vsrli_h",
+    "llvm.loongarch.lsx.vsrli.w" => "__builtin_lsx_vsrli_w",
+    "llvm.loongarch.lsx.vsrln.b.h" => "__builtin_lsx_vsrln_b_h",
+    "llvm.loongarch.lsx.vsrln.h.w" => "__builtin_lsx_vsrln_h_w",
+    "llvm.loongarch.lsx.vsrln.w.d" => "__builtin_lsx_vsrln_w_d",
+    "llvm.loongarch.lsx.vsrlni.b.h" => "__builtin_lsx_vsrlni_b_h",
+    "llvm.loongarch.lsx.vsrlni.d.q" => "__builtin_lsx_vsrlni_d_q",
+    "llvm.loongarch.lsx.vsrlni.h.w" => "__builtin_lsx_vsrlni_h_w",
+    "llvm.loongarch.lsx.vsrlni.w.d" => "__builtin_lsx_vsrlni_w_d",
+    "llvm.loongarch.lsx.vsrlr.b" => "__builtin_lsx_vsrlr_b",
+    "llvm.loongarch.lsx.vsrlr.d" => "__builtin_lsx_vsrlr_d",
+    "llvm.loongarch.lsx.vsrlr.h" => "__builtin_lsx_vsrlr_h",
+    "llvm.loongarch.lsx.vsrlr.w" => "__builtin_lsx_vsrlr_w",
+    "llvm.loongarch.lsx.vsrlri.b" => "__builtin_lsx_vsrlri_b",
+    "llvm.loongarch.lsx.vsrlri.d" => "__builtin_lsx_vsrlri_d",
+    "llvm.loongarch.lsx.vsrlri.h" => "__builtin_lsx_vsrlri_h",
+    "llvm.loongarch.lsx.vsrlri.w" => "__builtin_lsx_vsrlri_w",
+    "llvm.loongarch.lsx.vsrlrn.b.h" => "__builtin_lsx_vsrlrn_b_h",
+    "llvm.loongarch.lsx.vsrlrn.h.w" => "__builtin_lsx_vsrlrn_h_w",
+    "llvm.loongarch.lsx.vsrlrn.w.d" => "__builtin_lsx_vsrlrn_w_d",
+    "llvm.loongarch.lsx.vsrlrni.b.h" => "__builtin_lsx_vsrlrni_b_h",
+    "llvm.loongarch.lsx.vsrlrni.d.q" => "__builtin_lsx_vsrlrni_d_q",
+    "llvm.loongarch.lsx.vsrlrni.h.w" => "__builtin_lsx_vsrlrni_h_w",
+    "llvm.loongarch.lsx.vsrlrni.w.d" => "__builtin_lsx_vsrlrni_w_d",
+    "llvm.loongarch.lsx.vssran.b.h" => "__builtin_lsx_vssran_b_h",
+    "llvm.loongarch.lsx.vssran.bu.h" => "__builtin_lsx_vssran_bu_h",
+    "llvm.loongarch.lsx.vssran.h.w" => "__builtin_lsx_vssran_h_w",
+    "llvm.loongarch.lsx.vssran.hu.w" => "__builtin_lsx_vssran_hu_w",
+    "llvm.loongarch.lsx.vssran.w.d" => "__builtin_lsx_vssran_w_d",
+    "llvm.loongarch.lsx.vssran.wu.d" => "__builtin_lsx_vssran_wu_d",
+    "llvm.loongarch.lsx.vssrani.b.h" => "__builtin_lsx_vssrani_b_h",
+    "llvm.loongarch.lsx.vssrani.bu.h" => "__builtin_lsx_vssrani_bu_h",
+    "llvm.loongarch.lsx.vssrani.d.q" => "__builtin_lsx_vssrani_d_q",
+    "llvm.loongarch.lsx.vssrani.du.q" => "__builtin_lsx_vssrani_du_q",
+    "llvm.loongarch.lsx.vssrani.h.w" => "__builtin_lsx_vssrani_h_w",
+    "llvm.loongarch.lsx.vssrani.hu.w" => "__builtin_lsx_vssrani_hu_w",
+    "llvm.loongarch.lsx.vssrani.w.d" => "__builtin_lsx_vssrani_w_d",
+    "llvm.loongarch.lsx.vssrani.wu.d" => "__builtin_lsx_vssrani_wu_d",
+    "llvm.loongarch.lsx.vssrarn.b.h" => "__builtin_lsx_vssrarn_b_h",
+    "llvm.loongarch.lsx.vssrarn.bu.h" => "__builtin_lsx_vssrarn_bu_h",
+    "llvm.loongarch.lsx.vssrarn.h.w" => "__builtin_lsx_vssrarn_h_w",
+    "llvm.loongarch.lsx.vssrarn.hu.w" => "__builtin_lsx_vssrarn_hu_w",
+    "llvm.loongarch.lsx.vssrarn.w.d" => "__builtin_lsx_vssrarn_w_d",
+    "llvm.loongarch.lsx.vssrarn.wu.d" => "__builtin_lsx_vssrarn_wu_d",
+    "llvm.loongarch.lsx.vssrarni.b.h" => "__builtin_lsx_vssrarni_b_h",
+    "llvm.loongarch.lsx.vssrarni.bu.h" => "__builtin_lsx_vssrarni_bu_h",
+    "llvm.loongarch.lsx.vssrarni.d.q" => "__builtin_lsx_vssrarni_d_q",
+    "llvm.loongarch.lsx.vssrarni.du.q" => "__builtin_lsx_vssrarni_du_q",
+    "llvm.loongarch.lsx.vssrarni.h.w" => "__builtin_lsx_vssrarni_h_w",
+    "llvm.loongarch.lsx.vssrarni.hu.w" => "__builtin_lsx_vssrarni_hu_w",
+    "llvm.loongarch.lsx.vssrarni.w.d" => "__builtin_lsx_vssrarni_w_d",
+    "llvm.loongarch.lsx.vssrarni.wu.d" => "__builtin_lsx_vssrarni_wu_d",
+    "llvm.loongarch.lsx.vssrln.b.h" => "__builtin_lsx_vssrln_b_h",
+    "llvm.loongarch.lsx.vssrln.bu.h" => "__builtin_lsx_vssrln_bu_h",
+    "llvm.loongarch.lsx.vssrln.h.w" => "__builtin_lsx_vssrln_h_w",
+    "llvm.loongarch.lsx.vssrln.hu.w" => "__builtin_lsx_vssrln_hu_w",
+    "llvm.loongarch.lsx.vssrln.w.d" => "__builtin_lsx_vssrln_w_d",
+    "llvm.loongarch.lsx.vssrln.wu.d" => "__builtin_lsx_vssrln_wu_d",
+    "llvm.loongarch.lsx.vssrlni.b.h" => "__builtin_lsx_vssrlni_b_h",
+    "llvm.loongarch.lsx.vssrlni.bu.h" => "__builtin_lsx_vssrlni_bu_h",
+    "llvm.loongarch.lsx.vssrlni.d.q" => "__builtin_lsx_vssrlni_d_q",
+    "llvm.loongarch.lsx.vssrlni.du.q" => "__builtin_lsx_vssrlni_du_q",
+    "llvm.loongarch.lsx.vssrlni.h.w" => "__builtin_lsx_vssrlni_h_w",
+    "llvm.loongarch.lsx.vssrlni.hu.w" => "__builtin_lsx_vssrlni_hu_w",
+    "llvm.loongarch.lsx.vssrlni.w.d" => "__builtin_lsx_vssrlni_w_d",
+    "llvm.loongarch.lsx.vssrlni.wu.d" => "__builtin_lsx_vssrlni_wu_d",
+    "llvm.loongarch.lsx.vssrlrn.b.h" => "__builtin_lsx_vssrlrn_b_h",
+    "llvm.loongarch.lsx.vssrlrn.bu.h" => "__builtin_lsx_vssrlrn_bu_h",
+    "llvm.loongarch.lsx.vssrlrn.h.w" => "__builtin_lsx_vssrlrn_h_w",
+    "llvm.loongarch.lsx.vssrlrn.hu.w" => "__builtin_lsx_vssrlrn_hu_w",
+    "llvm.loongarch.lsx.vssrlrn.w.d" => "__builtin_lsx_vssrlrn_w_d",
+    "llvm.loongarch.lsx.vssrlrn.wu.d" => "__builtin_lsx_vssrlrn_wu_d",
+    "llvm.loongarch.lsx.vssrlrni.b.h" => "__builtin_lsx_vssrlrni_b_h",
+    "llvm.loongarch.lsx.vssrlrni.bu.h" => "__builtin_lsx_vssrlrni_bu_h",
+    "llvm.loongarch.lsx.vssrlrni.d.q" => "__builtin_lsx_vssrlrni_d_q",
+    "llvm.loongarch.lsx.vssrlrni.du.q" => "__builtin_lsx_vssrlrni_du_q",
+    "llvm.loongarch.lsx.vssrlrni.h.w" => "__builtin_lsx_vssrlrni_h_w",
+    "llvm.loongarch.lsx.vssrlrni.hu.w" => "__builtin_lsx_vssrlrni_hu_w",
+    "llvm.loongarch.lsx.vssrlrni.w.d" => "__builtin_lsx_vssrlrni_w_d",
+    "llvm.loongarch.lsx.vssrlrni.wu.d" => "__builtin_lsx_vssrlrni_wu_d",
+    "llvm.loongarch.lsx.vssub.b" => "__builtin_lsx_vssub_b",
+    "llvm.loongarch.lsx.vssub.bu" => "__builtin_lsx_vssub_bu",
+    "llvm.loongarch.lsx.vssub.d" => "__builtin_lsx_vssub_d",
+    "llvm.loongarch.lsx.vssub.du" => "__builtin_lsx_vssub_du",
+    "llvm.loongarch.lsx.vssub.h" => "__builtin_lsx_vssub_h",
+    "llvm.loongarch.lsx.vssub.hu" => "__builtin_lsx_vssub_hu",
+    "llvm.loongarch.lsx.vssub.w" => "__builtin_lsx_vssub_w",
+    "llvm.loongarch.lsx.vssub.wu" => "__builtin_lsx_vssub_wu",
+    "llvm.loongarch.lsx.vst" => "__builtin_lsx_vst",
+    "llvm.loongarch.lsx.vstelm.b" => "__builtin_lsx_vstelm_b",
+    "llvm.loongarch.lsx.vstelm.d" => "__builtin_lsx_vstelm_d",
+    "llvm.loongarch.lsx.vstelm.h" => "__builtin_lsx_vstelm_h",
+    "llvm.loongarch.lsx.vstelm.w" => "__builtin_lsx_vstelm_w",
+    "llvm.loongarch.lsx.vstx" => "__builtin_lsx_vstx",
+    "llvm.loongarch.lsx.vsub.b" => "__builtin_lsx_vsub_b",
+    "llvm.loongarch.lsx.vsub.d" => "__builtin_lsx_vsub_d",
+    "llvm.loongarch.lsx.vsub.h" => "__builtin_lsx_vsub_h",
+    "llvm.loongarch.lsx.vsub.q" => "__builtin_lsx_vsub_q",
+    "llvm.loongarch.lsx.vsub.w" => "__builtin_lsx_vsub_w",
+    "llvm.loongarch.lsx.vsubi.bu" => "__builtin_lsx_vsubi_bu",
+    "llvm.loongarch.lsx.vsubi.du" => "__builtin_lsx_vsubi_du",
+    "llvm.loongarch.lsx.vsubi.hu" => "__builtin_lsx_vsubi_hu",
+    "llvm.loongarch.lsx.vsubi.wu" => "__builtin_lsx_vsubi_wu",
+    "llvm.loongarch.lsx.vsubwev.d.w" => "__builtin_lsx_vsubwev_d_w",
+    "llvm.loongarch.lsx.vsubwev.d.wu" => "__builtin_lsx_vsubwev_d_wu",
+    "llvm.loongarch.lsx.vsubwev.h.b" => "__builtin_lsx_vsubwev_h_b",
+    "llvm.loongarch.lsx.vsubwev.h.bu" => "__builtin_lsx_vsubwev_h_bu",
+    "llvm.loongarch.lsx.vsubwev.q.d" => "__builtin_lsx_vsubwev_q_d",
+    "llvm.loongarch.lsx.vsubwev.q.du" => "__builtin_lsx_vsubwev_q_du",
+    "llvm.loongarch.lsx.vsubwev.w.h" => "__builtin_lsx_vsubwev_w_h",
+    "llvm.loongarch.lsx.vsubwev.w.hu" => "__builtin_lsx_vsubwev_w_hu",
+    "llvm.loongarch.lsx.vsubwod.d.w" => "__builtin_lsx_vsubwod_d_w",
+    "llvm.loongarch.lsx.vsubwod.d.wu" => "__builtin_lsx_vsubwod_d_wu",
+    "llvm.loongarch.lsx.vsubwod.h.b" => "__builtin_lsx_vsubwod_h_b",
+    "llvm.loongarch.lsx.vsubwod.h.bu" => "__builtin_lsx_vsubwod_h_bu",
+    "llvm.loongarch.lsx.vsubwod.q.d" => "__builtin_lsx_vsubwod_q_d",
+    "llvm.loongarch.lsx.vsubwod.q.du" => "__builtin_lsx_vsubwod_q_du",
+    "llvm.loongarch.lsx.vsubwod.w.h" => "__builtin_lsx_vsubwod_w_h",
+    "llvm.loongarch.lsx.vsubwod.w.hu" => "__builtin_lsx_vsubwod_w_hu",
+    "llvm.loongarch.lsx.vxor.v" => "__builtin_lsx_vxor_v",
+    "llvm.loongarch.lsx.vxori.b" => "__builtin_lsx_vxori_b",
+    "llvm.loongarch.movfcsr2gr" => "__builtin_loongarch_movfcsr2gr",
+    "llvm.loongarch.movgr2fcsr" => "__builtin_loongarch_movgr2fcsr",
+    "llvm.loongarch.syscall" => "__builtin_loongarch_syscall",
+    // mips
+    "llvm.mips.absq.s.ph" => "__builtin_mips_absq_s_ph",
+    "llvm.mips.absq.s.qb" => "__builtin_mips_absq_s_qb",
+    "llvm.mips.absq.s.w" => "__builtin_mips_absq_s_w",
+    "llvm.mips.add.a.b" => "__builtin_msa_add_a_b",
+    "llvm.mips.add.a.d" => "__builtin_msa_add_a_d",
+    "llvm.mips.add.a.h" => "__builtin_msa_add_a_h",
+    "llvm.mips.add.a.w" => "__builtin_msa_add_a_w",
+    "llvm.mips.addq.ph" => "__builtin_mips_addq_ph",
+    "llvm.mips.addq.s.ph" => "__builtin_mips_addq_s_ph",
+    "llvm.mips.addq.s.w" => "__builtin_mips_addq_s_w",
+    "llvm.mips.addqh.ph" => "__builtin_mips_addqh_ph",
+    "llvm.mips.addqh.r.ph" => "__builtin_mips_addqh_r_ph",
+    "llvm.mips.addqh.r.w" => "__builtin_mips_addqh_r_w",
+    "llvm.mips.addqh.w" => "__builtin_mips_addqh_w",
+    "llvm.mips.adds.a.b" => "__builtin_msa_adds_a_b",
+    "llvm.mips.adds.a.d" => "__builtin_msa_adds_a_d",
+    "llvm.mips.adds.a.h" => "__builtin_msa_adds_a_h",
+    "llvm.mips.adds.a.w" => "__builtin_msa_adds_a_w",
+    "llvm.mips.adds.s.b" => "__builtin_msa_adds_s_b",
+    "llvm.mips.adds.s.d" => "__builtin_msa_adds_s_d",
+    "llvm.mips.adds.s.h" => "__builtin_msa_adds_s_h",
+    "llvm.mips.adds.s.w" => "__builtin_msa_adds_s_w",
+    "llvm.mips.adds.u.b" => "__builtin_msa_adds_u_b",
+    "llvm.mips.adds.u.d" => "__builtin_msa_adds_u_d",
+    "llvm.mips.adds.u.h" => "__builtin_msa_adds_u_h",
+    "llvm.mips.adds.u.w" => "__builtin_msa_adds_u_w",
+    "llvm.mips.addsc" => "__builtin_mips_addsc",
+    "llvm.mips.addu.ph" => "__builtin_mips_addu_ph",
+    "llvm.mips.addu.qb" => "__builtin_mips_addu_qb",
+    "llvm.mips.addu.s.ph" => "__builtin_mips_addu_s_ph",
+    "llvm.mips.addu.s.qb" => "__builtin_mips_addu_s_qb",
+    "llvm.mips.adduh.qb" => "__builtin_mips_adduh_qb",
+    "llvm.mips.adduh.r.qb" => "__builtin_mips_adduh_r_qb",
+    "llvm.mips.addv.b" => "__builtin_msa_addv_b",
+    "llvm.mips.addv.d" => "__builtin_msa_addv_d",
+    "llvm.mips.addv.h" => "__builtin_msa_addv_h",
+    "llvm.mips.addv.w" => "__builtin_msa_addv_w",
+    "llvm.mips.addvi.b" => "__builtin_msa_addvi_b",
+    "llvm.mips.addvi.d" => "__builtin_msa_addvi_d",
+    "llvm.mips.addvi.h" => "__builtin_msa_addvi_h",
+    "llvm.mips.addvi.w" => "__builtin_msa_addvi_w",
+    "llvm.mips.addwc" => "__builtin_mips_addwc",
+    "llvm.mips.and.v" => "__builtin_msa_and_v",
+    "llvm.mips.andi.b" => "__builtin_msa_andi_b",
+    "llvm.mips.append" => "__builtin_mips_append",
+    "llvm.mips.asub.s.b" => "__builtin_msa_asub_s_b",
+    "llvm.mips.asub.s.d" => "__builtin_msa_asub_s_d",
+    "llvm.mips.asub.s.h" => "__builtin_msa_asub_s_h",
+    "llvm.mips.asub.s.w" => "__builtin_msa_asub_s_w",
+    "llvm.mips.asub.u.b" => "__builtin_msa_asub_u_b",
+    "llvm.mips.asub.u.d" => "__builtin_msa_asub_u_d",
+    "llvm.mips.asub.u.h" => "__builtin_msa_asub_u_h",
+    "llvm.mips.asub.u.w" => "__builtin_msa_asub_u_w",
+    "llvm.mips.ave.s.b" => "__builtin_msa_ave_s_b",
+    "llvm.mips.ave.s.d" => "__builtin_msa_ave_s_d",
+    "llvm.mips.ave.s.h" => "__builtin_msa_ave_s_h",
+    "llvm.mips.ave.s.w" => "__builtin_msa_ave_s_w",
+    "llvm.mips.ave.u.b" => "__builtin_msa_ave_u_b",
+    "llvm.mips.ave.u.d" => "__builtin_msa_ave_u_d",
+    "llvm.mips.ave.u.h" => "__builtin_msa_ave_u_h",
+    "llvm.mips.ave.u.w" => "__builtin_msa_ave_u_w",
+    "llvm.mips.aver.s.b" => "__builtin_msa_aver_s_b",
+    "llvm.mips.aver.s.d" => "__builtin_msa_aver_s_d",
+    "llvm.mips.aver.s.h" => "__builtin_msa_aver_s_h",
+    "llvm.mips.aver.s.w" => "__builtin_msa_aver_s_w",
+    "llvm.mips.aver.u.b" => "__builtin_msa_aver_u_b",
+    "llvm.mips.aver.u.d" => "__builtin_msa_aver_u_d",
+    "llvm.mips.aver.u.h" => "__builtin_msa_aver_u_h",
+    "llvm.mips.aver.u.w" => "__builtin_msa_aver_u_w",
+    "llvm.mips.balign" => "__builtin_mips_balign",
+    "llvm.mips.bclr.b" => "__builtin_msa_bclr_b",
+    "llvm.mips.bclr.d" => "__builtin_msa_bclr_d",
+    "llvm.mips.bclr.h" => "__builtin_msa_bclr_h",
+    "llvm.mips.bclr.w" => "__builtin_msa_bclr_w",
+    "llvm.mips.bclri.b" => "__builtin_msa_bclri_b",
+    "llvm.mips.bclri.d" => "__builtin_msa_bclri_d",
+    "llvm.mips.bclri.h" => "__builtin_msa_bclri_h",
+    "llvm.mips.bclri.w" => "__builtin_msa_bclri_w",
+    "llvm.mips.binsl.b" => "__builtin_msa_binsl_b",
+    "llvm.mips.binsl.d" => "__builtin_msa_binsl_d",
+    "llvm.mips.binsl.h" => "__builtin_msa_binsl_h",
+    "llvm.mips.binsl.w" => "__builtin_msa_binsl_w",
+    "llvm.mips.binsli.b" => "__builtin_msa_binsli_b",
+    "llvm.mips.binsli.d" => "__builtin_msa_binsli_d",
+    "llvm.mips.binsli.h" => "__builtin_msa_binsli_h",
+    "llvm.mips.binsli.w" => "__builtin_msa_binsli_w",
+    "llvm.mips.binsr.b" => "__builtin_msa_binsr_b",
+    "llvm.mips.binsr.d" => "__builtin_msa_binsr_d",
+    "llvm.mips.binsr.h" => "__builtin_msa_binsr_h",
+    "llvm.mips.binsr.w" => "__builtin_msa_binsr_w",
+    "llvm.mips.binsri.b" => "__builtin_msa_binsri_b",
+    "llvm.mips.binsri.d" => "__builtin_msa_binsri_d",
+    "llvm.mips.binsri.h" => "__builtin_msa_binsri_h",
+    "llvm.mips.binsri.w" => "__builtin_msa_binsri_w",
+    "llvm.mips.bitrev" => "__builtin_mips_bitrev",
+    "llvm.mips.bmnz.v" => "__builtin_msa_bmnz_v",
+    "llvm.mips.bmnzi.b" => "__builtin_msa_bmnzi_b",
+    "llvm.mips.bmz.v" => "__builtin_msa_bmz_v",
+    "llvm.mips.bmzi.b" => "__builtin_msa_bmzi_b",
+    "llvm.mips.bneg.b" => "__builtin_msa_bneg_b",
+    "llvm.mips.bneg.d" => "__builtin_msa_bneg_d",
+    "llvm.mips.bneg.h" => "__builtin_msa_bneg_h",
+    "llvm.mips.bneg.w" => "__builtin_msa_bneg_w",
+    "llvm.mips.bnegi.b" => "__builtin_msa_bnegi_b",
+    "llvm.mips.bnegi.d" => "__builtin_msa_bnegi_d",
+    "llvm.mips.bnegi.h" => "__builtin_msa_bnegi_h",
+    "llvm.mips.bnegi.w" => "__builtin_msa_bnegi_w",
+    "llvm.mips.bnz.b" => "__builtin_msa_bnz_b",
+    "llvm.mips.bnz.d" => "__builtin_msa_bnz_d",
+    "llvm.mips.bnz.h" => "__builtin_msa_bnz_h",
+    "llvm.mips.bnz.v" => "__builtin_msa_bnz_v",
+    "llvm.mips.bnz.w" => "__builtin_msa_bnz_w",
+    "llvm.mips.bposge32" => "__builtin_mips_bposge32",
+    "llvm.mips.bsel.v" => "__builtin_msa_bsel_v",
+    "llvm.mips.bseli.b" => "__builtin_msa_bseli_b",
+    "llvm.mips.bset.b" => "__builtin_msa_bset_b",
+    "llvm.mips.bset.d" => "__builtin_msa_bset_d",
+    "llvm.mips.bset.h" => "__builtin_msa_bset_h",
+    "llvm.mips.bset.w" => "__builtin_msa_bset_w",
+    "llvm.mips.bseti.b" => "__builtin_msa_bseti_b",
+    "llvm.mips.bseti.d" => "__builtin_msa_bseti_d",
+    "llvm.mips.bseti.h" => "__builtin_msa_bseti_h",
+    "llvm.mips.bseti.w" => "__builtin_msa_bseti_w",
+    "llvm.mips.bz.b" => "__builtin_msa_bz_b",
+    "llvm.mips.bz.d" => "__builtin_msa_bz_d",
+    "llvm.mips.bz.h" => "__builtin_msa_bz_h",
+    "llvm.mips.bz.v" => "__builtin_msa_bz_v",
+    "llvm.mips.bz.w" => "__builtin_msa_bz_w",
+    "llvm.mips.ceq.b" => "__builtin_msa_ceq_b",
+    "llvm.mips.ceq.d" => "__builtin_msa_ceq_d",
+    "llvm.mips.ceq.h" => "__builtin_msa_ceq_h",
+    "llvm.mips.ceq.w" => "__builtin_msa_ceq_w",
+    "llvm.mips.ceqi.b" => "__builtin_msa_ceqi_b",
+    "llvm.mips.ceqi.d" => "__builtin_msa_ceqi_d",
+    "llvm.mips.ceqi.h" => "__builtin_msa_ceqi_h",
+    "llvm.mips.ceqi.w" => "__builtin_msa_ceqi_w",
+    "llvm.mips.cfcmsa" => "__builtin_msa_cfcmsa",
+    "llvm.mips.cle.s.b" => "__builtin_msa_cle_s_b",
+    "llvm.mips.cle.s.d" => "__builtin_msa_cle_s_d",
+    "llvm.mips.cle.s.h" => "__builtin_msa_cle_s_h",
+    "llvm.mips.cle.s.w" => "__builtin_msa_cle_s_w",
+    "llvm.mips.cle.u.b" => "__builtin_msa_cle_u_b",
+    "llvm.mips.cle.u.d" => "__builtin_msa_cle_u_d",
+    "llvm.mips.cle.u.h" => "__builtin_msa_cle_u_h",
+    "llvm.mips.cle.u.w" => "__builtin_msa_cle_u_w",
+    "llvm.mips.clei.s.b" => "__builtin_msa_clei_s_b",
+    "llvm.mips.clei.s.d" => "__builtin_msa_clei_s_d",
+    "llvm.mips.clei.s.h" => "__builtin_msa_clei_s_h",
+    "llvm.mips.clei.s.w" => "__builtin_msa_clei_s_w",
+    "llvm.mips.clei.u.b" => "__builtin_msa_clei_u_b",
+    "llvm.mips.clei.u.d" => "__builtin_msa_clei_u_d",
+    "llvm.mips.clei.u.h" => "__builtin_msa_clei_u_h",
+    "llvm.mips.clei.u.w" => "__builtin_msa_clei_u_w",
+    "llvm.mips.clt.s.b" => "__builtin_msa_clt_s_b",
+    "llvm.mips.clt.s.d" => "__builtin_msa_clt_s_d",
+    "llvm.mips.clt.s.h" => "__builtin_msa_clt_s_h",
+    "llvm.mips.clt.s.w" => "__builtin_msa_clt_s_w",
+    "llvm.mips.clt.u.b" => "__builtin_msa_clt_u_b",
+    "llvm.mips.clt.u.d" => "__builtin_msa_clt_u_d",
+    "llvm.mips.clt.u.h" => "__builtin_msa_clt_u_h",
+    "llvm.mips.clt.u.w" => "__builtin_msa_clt_u_w",
+    "llvm.mips.clti.s.b" => "__builtin_msa_clti_s_b",
+    "llvm.mips.clti.s.d" => "__builtin_msa_clti_s_d",
+    "llvm.mips.clti.s.h" => "__builtin_msa_clti_s_h",
+    "llvm.mips.clti.s.w" => "__builtin_msa_clti_s_w",
+    "llvm.mips.clti.u.b" => "__builtin_msa_clti_u_b",
+    "llvm.mips.clti.u.d" => "__builtin_msa_clti_u_d",
+    "llvm.mips.clti.u.h" => "__builtin_msa_clti_u_h",
+    "llvm.mips.clti.u.w" => "__builtin_msa_clti_u_w",
+    "llvm.mips.cmp.eq.ph" => "__builtin_mips_cmp_eq_ph",
+    "llvm.mips.cmp.le.ph" => "__builtin_mips_cmp_le_ph",
+    "llvm.mips.cmp.lt.ph" => "__builtin_mips_cmp_lt_ph",
+    "llvm.mips.cmpgdu.eq.qb" => "__builtin_mips_cmpgdu_eq_qb",
+    "llvm.mips.cmpgdu.le.qb" => "__builtin_mips_cmpgdu_le_qb",
+    "llvm.mips.cmpgdu.lt.qb" => "__builtin_mips_cmpgdu_lt_qb",
+    "llvm.mips.cmpgu.eq.qb" => "__builtin_mips_cmpgu_eq_qb",
+    "llvm.mips.cmpgu.le.qb" => "__builtin_mips_cmpgu_le_qb",
+    "llvm.mips.cmpgu.lt.qb" => "__builtin_mips_cmpgu_lt_qb",
+    "llvm.mips.cmpu.eq.qb" => "__builtin_mips_cmpu_eq_qb",
+    "llvm.mips.cmpu.le.qb" => "__builtin_mips_cmpu_le_qb",
+    "llvm.mips.cmpu.lt.qb" => "__builtin_mips_cmpu_lt_qb",
+    "llvm.mips.copy.s.b" => "__builtin_msa_copy_s_b",
+    "llvm.mips.copy.s.d" => "__builtin_msa_copy_s_d",
+    "llvm.mips.copy.s.h" => "__builtin_msa_copy_s_h",
+    "llvm.mips.copy.s.w" => "__builtin_msa_copy_s_w",
+    "llvm.mips.copy.u.b" => "__builtin_msa_copy_u_b",
+    "llvm.mips.copy.u.d" => "__builtin_msa_copy_u_d",
+    "llvm.mips.copy.u.h" => "__builtin_msa_copy_u_h",
+    "llvm.mips.copy.u.w" => "__builtin_msa_copy_u_w",
+    "llvm.mips.ctcmsa" => "__builtin_msa_ctcmsa",
+    "llvm.mips.div.s.b" => "__builtin_msa_div_s_b",
+    "llvm.mips.div.s.d" => "__builtin_msa_div_s_d",
+    "llvm.mips.div.s.h" => "__builtin_msa_div_s_h",
+    "llvm.mips.div.s.w" => "__builtin_msa_div_s_w",
+    "llvm.mips.div.u.b" => "__builtin_msa_div_u_b",
+    "llvm.mips.div.u.d" => "__builtin_msa_div_u_d",
+    "llvm.mips.div.u.h" => "__builtin_msa_div_u_h",
+    "llvm.mips.div.u.w" => "__builtin_msa_div_u_w",
+    "llvm.mips.dlsa" => "__builtin_mips_dlsa",
+    "llvm.mips.dotp.s.d" => "__builtin_msa_dotp_s_d",
+    "llvm.mips.dotp.s.h" => "__builtin_msa_dotp_s_h",
+    "llvm.mips.dotp.s.w" => "__builtin_msa_dotp_s_w",
+    "llvm.mips.dotp.u.d" => "__builtin_msa_dotp_u_d",
+    "llvm.mips.dotp.u.h" => "__builtin_msa_dotp_u_h",
+    "llvm.mips.dotp.u.w" => "__builtin_msa_dotp_u_w",
+    "llvm.mips.dpa.w.ph" => "__builtin_mips_dpa_w_ph",
+    "llvm.mips.dpadd.s.d" => "__builtin_msa_dpadd_s_d",
+    "llvm.mips.dpadd.s.h" => "__builtin_msa_dpadd_s_h",
+    "llvm.mips.dpadd.s.w" => "__builtin_msa_dpadd_s_w",
+    "llvm.mips.dpadd.u.d" => "__builtin_msa_dpadd_u_d",
+    "llvm.mips.dpadd.u.h" => "__builtin_msa_dpadd_u_h",
+    "llvm.mips.dpadd.u.w" => "__builtin_msa_dpadd_u_w",
+    "llvm.mips.dpaq.s.w.ph" => "__builtin_mips_dpaq_s_w_ph",
+    "llvm.mips.dpaq.sa.l.w" => "__builtin_mips_dpaq_sa_l_w",
+    "llvm.mips.dpaqx.s.w.ph" => "__builtin_mips_dpaqx_s_w_ph",
+    "llvm.mips.dpaqx.sa.w.ph" => "__builtin_mips_dpaqx_sa_w_ph",
+    "llvm.mips.dpau.h.qbl" => "__builtin_mips_dpau_h_qbl",
+    "llvm.mips.dpau.h.qbr" => "__builtin_mips_dpau_h_qbr",
+    "llvm.mips.dpax.w.ph" => "__builtin_mips_dpax_w_ph",
+    "llvm.mips.dps.w.ph" => "__builtin_mips_dps_w_ph",
+    "llvm.mips.dpsq.s.w.ph" => "__builtin_mips_dpsq_s_w_ph",
+    "llvm.mips.dpsq.sa.l.w" => "__builtin_mips_dpsq_sa_l_w",
+    "llvm.mips.dpsqx.s.w.ph" => "__builtin_mips_dpsqx_s_w_ph",
+    "llvm.mips.dpsqx.sa.w.ph" => "__builtin_mips_dpsqx_sa_w_ph",
+    "llvm.mips.dpsu.h.qbl" => "__builtin_mips_dpsu_h_qbl",
+    "llvm.mips.dpsu.h.qbr" => "__builtin_mips_dpsu_h_qbr",
+    "llvm.mips.dpsub.s.d" => "__builtin_msa_dpsub_s_d",
+    "llvm.mips.dpsub.s.h" => "__builtin_msa_dpsub_s_h",
+    "llvm.mips.dpsub.s.w" => "__builtin_msa_dpsub_s_w",
+    "llvm.mips.dpsub.u.d" => "__builtin_msa_dpsub_u_d",
+    "llvm.mips.dpsub.u.h" => "__builtin_msa_dpsub_u_h",
+    "llvm.mips.dpsub.u.w" => "__builtin_msa_dpsub_u_w",
+    "llvm.mips.dpsx.w.ph" => "__builtin_mips_dpsx_w_ph",
+    "llvm.mips.extp" => "__builtin_mips_extp",
+    "llvm.mips.extpdp" => "__builtin_mips_extpdp",
+    "llvm.mips.extr.r.w" => "__builtin_mips_extr_r_w",
+    "llvm.mips.extr.rs.w" => "__builtin_mips_extr_rs_w",
+    "llvm.mips.extr.s.h" => "__builtin_mips_extr_s_h",
+    "llvm.mips.extr.w" => "__builtin_mips_extr_w",
+    "llvm.mips.fadd.d" => "__builtin_msa_fadd_d",
+    "llvm.mips.fadd.w" => "__builtin_msa_fadd_w",
+    "llvm.mips.fcaf.d" => "__builtin_msa_fcaf_d",
+    "llvm.mips.fcaf.w" => "__builtin_msa_fcaf_w",
+    "llvm.mips.fceq.d" => "__builtin_msa_fceq_d",
+    "llvm.mips.fceq.w" => "__builtin_msa_fceq_w",
+    "llvm.mips.fclass.d" => "__builtin_msa_fclass_d",
+    "llvm.mips.fclass.w" => "__builtin_msa_fclass_w",
+    "llvm.mips.fcle.d" => "__builtin_msa_fcle_d",
+    "llvm.mips.fcle.w" => "__builtin_msa_fcle_w",
+    "llvm.mips.fclt.d" => "__builtin_msa_fclt_d",
+    "llvm.mips.fclt.w" => "__builtin_msa_fclt_w",
+    "llvm.mips.fcne.d" => "__builtin_msa_fcne_d",
+    "llvm.mips.fcne.w" => "__builtin_msa_fcne_w",
+    "llvm.mips.fcor.d" => "__builtin_msa_fcor_d",
+    "llvm.mips.fcor.w" => "__builtin_msa_fcor_w",
+    "llvm.mips.fcueq.d" => "__builtin_msa_fcueq_d",
+    "llvm.mips.fcueq.w" => "__builtin_msa_fcueq_w",
+    "llvm.mips.fcule.d" => "__builtin_msa_fcule_d",
+    "llvm.mips.fcule.w" => "__builtin_msa_fcule_w",
+    "llvm.mips.fcult.d" => "__builtin_msa_fcult_d",
+    "llvm.mips.fcult.w" => "__builtin_msa_fcult_w",
+    "llvm.mips.fcun.d" => "__builtin_msa_fcun_d",
+    "llvm.mips.fcun.w" => "__builtin_msa_fcun_w",
+    "llvm.mips.fcune.d" => "__builtin_msa_fcune_d",
+    "llvm.mips.fcune.w" => "__builtin_msa_fcune_w",
+    "llvm.mips.fdiv.d" => "__builtin_msa_fdiv_d",
+    "llvm.mips.fdiv.w" => "__builtin_msa_fdiv_w",
+    "llvm.mips.fexdo.h" => "__builtin_msa_fexdo_h",
+    "llvm.mips.fexdo.w" => "__builtin_msa_fexdo_w",
+    "llvm.mips.fexp2.d" => "__builtin_msa_fexp2_d",
+    "llvm.mips.fexp2.w" => "__builtin_msa_fexp2_w",
+    "llvm.mips.fexupl.d" => "__builtin_msa_fexupl_d",
+    "llvm.mips.fexupl.w" => "__builtin_msa_fexupl_w",
+    "llvm.mips.fexupr.d" => "__builtin_msa_fexupr_d",
+    "llvm.mips.fexupr.w" => "__builtin_msa_fexupr_w",
+    "llvm.mips.ffint.s.d" => "__builtin_msa_ffint_s_d",
+    "llvm.mips.ffint.s.w" => "__builtin_msa_ffint_s_w",
+    "llvm.mips.ffint.u.d" => "__builtin_msa_ffint_u_d",
+    "llvm.mips.ffint.u.w" => "__builtin_msa_ffint_u_w",
+    "llvm.mips.ffql.d" => "__builtin_msa_ffql_d",
+    "llvm.mips.ffql.w" => "__builtin_msa_ffql_w",
+    "llvm.mips.ffqr.d" => "__builtin_msa_ffqr_d",
+    "llvm.mips.ffqr.w" => "__builtin_msa_ffqr_w",
+    "llvm.mips.fill.b" => "__builtin_msa_fill_b",
+    "llvm.mips.fill.d" => "__builtin_msa_fill_d",
+    "llvm.mips.fill.h" => "__builtin_msa_fill_h",
+    "llvm.mips.fill.w" => "__builtin_msa_fill_w",
+    "llvm.mips.flog2.d" => "__builtin_msa_flog2_d",
+    "llvm.mips.flog2.w" => "__builtin_msa_flog2_w",
+    "llvm.mips.fmadd.d" => "__builtin_msa_fmadd_d",
+    "llvm.mips.fmadd.w" => "__builtin_msa_fmadd_w",
+    "llvm.mips.fmax.a.d" => "__builtin_msa_fmax_a_d",
+    "llvm.mips.fmax.a.w" => "__builtin_msa_fmax_a_w",
+    "llvm.mips.fmax.d" => "__builtin_msa_fmax_d",
+    "llvm.mips.fmax.w" => "__builtin_msa_fmax_w",
+    "llvm.mips.fmin.a.d" => "__builtin_msa_fmin_a_d",
+    "llvm.mips.fmin.a.w" => "__builtin_msa_fmin_a_w",
+    "llvm.mips.fmin.d" => "__builtin_msa_fmin_d",
+    "llvm.mips.fmin.w" => "__builtin_msa_fmin_w",
+    "llvm.mips.fmsub.d" => "__builtin_msa_fmsub_d",
+    "llvm.mips.fmsub.w" => "__builtin_msa_fmsub_w",
+    "llvm.mips.fmul.d" => "__builtin_msa_fmul_d",
+    "llvm.mips.fmul.w" => "__builtin_msa_fmul_w",
+    "llvm.mips.frcp.d" => "__builtin_msa_frcp_d",
+    "llvm.mips.frcp.w" => "__builtin_msa_frcp_w",
+    "llvm.mips.frint.d" => "__builtin_msa_frint_d",
+    "llvm.mips.frint.w" => "__builtin_msa_frint_w",
+    "llvm.mips.frsqrt.d" => "__builtin_msa_frsqrt_d",
+    "llvm.mips.frsqrt.w" => "__builtin_msa_frsqrt_w",
+    "llvm.mips.fsaf.d" => "__builtin_msa_fsaf_d",
+    "llvm.mips.fsaf.w" => "__builtin_msa_fsaf_w",
+    "llvm.mips.fseq.d" => "__builtin_msa_fseq_d",
+    "llvm.mips.fseq.w" => "__builtin_msa_fseq_w",
+    "llvm.mips.fsle.d" => "__builtin_msa_fsle_d",
+    "llvm.mips.fsle.w" => "__builtin_msa_fsle_w",
+    "llvm.mips.fslt.d" => "__builtin_msa_fslt_d",
+    "llvm.mips.fslt.w" => "__builtin_msa_fslt_w",
+    "llvm.mips.fsne.d" => "__builtin_msa_fsne_d",
+    "llvm.mips.fsne.w" => "__builtin_msa_fsne_w",
+    "llvm.mips.fsor.d" => "__builtin_msa_fsor_d",
+    "llvm.mips.fsor.w" => "__builtin_msa_fsor_w",
+    "llvm.mips.fsqrt.d" => "__builtin_msa_fsqrt_d",
+    "llvm.mips.fsqrt.w" => "__builtin_msa_fsqrt_w",
+    "llvm.mips.fsub.d" => "__builtin_msa_fsub_d",
+    "llvm.mips.fsub.w" => "__builtin_msa_fsub_w",
+    "llvm.mips.fsueq.d" => "__builtin_msa_fsueq_d",
+    "llvm.mips.fsueq.w" => "__builtin_msa_fsueq_w",
+    "llvm.mips.fsule.d" => "__builtin_msa_fsule_d",
+    "llvm.mips.fsule.w" => "__builtin_msa_fsule_w",
+    "llvm.mips.fsult.d" => "__builtin_msa_fsult_d",
+    "llvm.mips.fsult.w" => "__builtin_msa_fsult_w",
+    "llvm.mips.fsun.d" => "__builtin_msa_fsun_d",
+    "llvm.mips.fsun.w" => "__builtin_msa_fsun_w",
+    "llvm.mips.fsune.d" => "__builtin_msa_fsune_d",
+    "llvm.mips.fsune.w" => "__builtin_msa_fsune_w",
+    "llvm.mips.ftint.s.d" => "__builtin_msa_ftint_s_d",
+    "llvm.mips.ftint.s.w" => "__builtin_msa_ftint_s_w",
+    "llvm.mips.ftint.u.d" => "__builtin_msa_ftint_u_d",
+    "llvm.mips.ftint.u.w" => "__builtin_msa_ftint_u_w",
+    "llvm.mips.ftq.h" => "__builtin_msa_ftq_h",
+    "llvm.mips.ftq.w" => "__builtin_msa_ftq_w",
+    "llvm.mips.ftrunc.s.d" => "__builtin_msa_ftrunc_s_d",
+    "llvm.mips.ftrunc.s.w" => "__builtin_msa_ftrunc_s_w",
+    "llvm.mips.ftrunc.u.d" => "__builtin_msa_ftrunc_u_d",
+    "llvm.mips.ftrunc.u.w" => "__builtin_msa_ftrunc_u_w",
+    "llvm.mips.hadd.s.d" => "__builtin_msa_hadd_s_d",
+    "llvm.mips.hadd.s.h" => "__builtin_msa_hadd_s_h",
+    "llvm.mips.hadd.s.w" => "__builtin_msa_hadd_s_w",
+    "llvm.mips.hadd.u.d" => "__builtin_msa_hadd_u_d",
+    "llvm.mips.hadd.u.h" => "__builtin_msa_hadd_u_h",
+    "llvm.mips.hadd.u.w" => "__builtin_msa_hadd_u_w",
+    "llvm.mips.hsub.s.d" => "__builtin_msa_hsub_s_d",
+    "llvm.mips.hsub.s.h" => "__builtin_msa_hsub_s_h",
+    "llvm.mips.hsub.s.w" => "__builtin_msa_hsub_s_w",
+    "llvm.mips.hsub.u.d" => "__builtin_msa_hsub_u_d",
+    "llvm.mips.hsub.u.h" => "__builtin_msa_hsub_u_h",
+    "llvm.mips.hsub.u.w" => "__builtin_msa_hsub_u_w",
+    "llvm.mips.ilvev.b" => "__builtin_msa_ilvev_b",
+    "llvm.mips.ilvev.d" => "__builtin_msa_ilvev_d",
+    "llvm.mips.ilvev.h" => "__builtin_msa_ilvev_h",
+    "llvm.mips.ilvev.w" => "__builtin_msa_ilvev_w",
+    "llvm.mips.ilvl.b" => "__builtin_msa_ilvl_b",
+    "llvm.mips.ilvl.d" => "__builtin_msa_ilvl_d",
+    "llvm.mips.ilvl.h" => "__builtin_msa_ilvl_h",
+    "llvm.mips.ilvl.w" => "__builtin_msa_ilvl_w",
+    "llvm.mips.ilvod.b" => "__builtin_msa_ilvod_b",
+    "llvm.mips.ilvod.d" => "__builtin_msa_ilvod_d",
+    "llvm.mips.ilvod.h" => "__builtin_msa_ilvod_h",
+    "llvm.mips.ilvod.w" => "__builtin_msa_ilvod_w",
+    "llvm.mips.ilvr.b" => "__builtin_msa_ilvr_b",
+    "llvm.mips.ilvr.d" => "__builtin_msa_ilvr_d",
+    "llvm.mips.ilvr.h" => "__builtin_msa_ilvr_h",
+    "llvm.mips.ilvr.w" => "__builtin_msa_ilvr_w",
+    "llvm.mips.insert.b" => "__builtin_msa_insert_b",
+    "llvm.mips.insert.d" => "__builtin_msa_insert_d",
+    "llvm.mips.insert.h" => "__builtin_msa_insert_h",
+    "llvm.mips.insert.w" => "__builtin_msa_insert_w",
+    "llvm.mips.insv" => "__builtin_mips_insv",
+    "llvm.mips.insve.b" => "__builtin_msa_insve_b",
+    "llvm.mips.insve.d" => "__builtin_msa_insve_d",
+    "llvm.mips.insve.h" => "__builtin_msa_insve_h",
+    "llvm.mips.insve.w" => "__builtin_msa_insve_w",
+    "llvm.mips.lbux" => "__builtin_mips_lbux",
+    "llvm.mips.ld.b" => "__builtin_msa_ld_b",
+    "llvm.mips.ld.d" => "__builtin_msa_ld_d",
+    "llvm.mips.ld.h" => "__builtin_msa_ld_h",
+    "llvm.mips.ld.w" => "__builtin_msa_ld_w",
+    "llvm.mips.ldi.b" => "__builtin_msa_ldi_b",
+    "llvm.mips.ldi.d" => "__builtin_msa_ldi_d",
+    "llvm.mips.ldi.h" => "__builtin_msa_ldi_h",
+    "llvm.mips.ldi.w" => "__builtin_msa_ldi_w",
+    "llvm.mips.ldr.d" => "__builtin_msa_ldr_d",
+    "llvm.mips.ldr.w" => "__builtin_msa_ldr_w",
+    "llvm.mips.lhx" => "__builtin_mips_lhx",
+    "llvm.mips.lsa" => "__builtin_mips_lsa",
+    "llvm.mips.lwx" => "__builtin_mips_lwx",
+    "llvm.mips.madd" => "__builtin_mips_madd",
+    "llvm.mips.madd.q.h" => "__builtin_msa_madd_q_h",
+    "llvm.mips.madd.q.w" => "__builtin_msa_madd_q_w",
+    "llvm.mips.maddr.q.h" => "__builtin_msa_maddr_q_h",
+    "llvm.mips.maddr.q.w" => "__builtin_msa_maddr_q_w",
+    "llvm.mips.maddu" => "__builtin_mips_maddu",
+    "llvm.mips.maddv.b" => "__builtin_msa_maddv_b",
+    "llvm.mips.maddv.d" => "__builtin_msa_maddv_d",
+    "llvm.mips.maddv.h" => "__builtin_msa_maddv_h",
+    "llvm.mips.maddv.w" => "__builtin_msa_maddv_w",
+    "llvm.mips.maq.s.w.phl" => "__builtin_mips_maq_s_w_phl",
+    "llvm.mips.maq.s.w.phr" => "__builtin_mips_maq_s_w_phr",
+    "llvm.mips.maq.sa.w.phl" => "__builtin_mips_maq_sa_w_phl",
+    "llvm.mips.maq.sa.w.phr" => "__builtin_mips_maq_sa_w_phr",
+    "llvm.mips.max.a.b" => "__builtin_msa_max_a_b",
+    "llvm.mips.max.a.d" => "__builtin_msa_max_a_d",
+    "llvm.mips.max.a.h" => "__builtin_msa_max_a_h",
+    "llvm.mips.max.a.w" => "__builtin_msa_max_a_w",
+    "llvm.mips.max.s.b" => "__builtin_msa_max_s_b",
+    "llvm.mips.max.s.d" => "__builtin_msa_max_s_d",
+    "llvm.mips.max.s.h" => "__builtin_msa_max_s_h",
+    "llvm.mips.max.s.w" => "__builtin_msa_max_s_w",
+    "llvm.mips.max.u.b" => "__builtin_msa_max_u_b",
+    "llvm.mips.max.u.d" => "__builtin_msa_max_u_d",
+    "llvm.mips.max.u.h" => "__builtin_msa_max_u_h",
+    "llvm.mips.max.u.w" => "__builtin_msa_max_u_w",
+    "llvm.mips.maxi.s.b" => "__builtin_msa_maxi_s_b",
+    "llvm.mips.maxi.s.d" => "__builtin_msa_maxi_s_d",
+    "llvm.mips.maxi.s.h" => "__builtin_msa_maxi_s_h",
+    "llvm.mips.maxi.s.w" => "__builtin_msa_maxi_s_w",
+    "llvm.mips.maxi.u.b" => "__builtin_msa_maxi_u_b",
+    "llvm.mips.maxi.u.d" => "__builtin_msa_maxi_u_d",
+    "llvm.mips.maxi.u.h" => "__builtin_msa_maxi_u_h",
+    "llvm.mips.maxi.u.w" => "__builtin_msa_maxi_u_w",
+    "llvm.mips.min.a.b" => "__builtin_msa_min_a_b",
+    "llvm.mips.min.a.d" => "__builtin_msa_min_a_d",
+    "llvm.mips.min.a.h" => "__builtin_msa_min_a_h",
+    "llvm.mips.min.a.w" => "__builtin_msa_min_a_w",
+    "llvm.mips.min.s.b" => "__builtin_msa_min_s_b",
+    "llvm.mips.min.s.d" => "__builtin_msa_min_s_d",
+    "llvm.mips.min.s.h" => "__builtin_msa_min_s_h",
+    "llvm.mips.min.s.w" => "__builtin_msa_min_s_w",
+    "llvm.mips.min.u.b" => "__builtin_msa_min_u_b",
+    "llvm.mips.min.u.d" => "__builtin_msa_min_u_d",
+    "llvm.mips.min.u.h" => "__builtin_msa_min_u_h",
+    "llvm.mips.min.u.w" => "__builtin_msa_min_u_w",
+    "llvm.mips.mini.s.b" => "__builtin_msa_mini_s_b",
+    "llvm.mips.mini.s.d" => "__builtin_msa_mini_s_d",
+    "llvm.mips.mini.s.h" => "__builtin_msa_mini_s_h",
+    "llvm.mips.mini.s.w" => "__builtin_msa_mini_s_w",
+    "llvm.mips.mini.u.b" => "__builtin_msa_mini_u_b",
+    "llvm.mips.mini.u.d" => "__builtin_msa_mini_u_d",
+    "llvm.mips.mini.u.h" => "__builtin_msa_mini_u_h",
+    "llvm.mips.mini.u.w" => "__builtin_msa_mini_u_w",
+    "llvm.mips.mod.s.b" => "__builtin_msa_mod_s_b",
+    "llvm.mips.mod.s.d" => "__builtin_msa_mod_s_d",
+    "llvm.mips.mod.s.h" => "__builtin_msa_mod_s_h",
+    "llvm.mips.mod.s.w" => "__builtin_msa_mod_s_w",
+    "llvm.mips.mod.u.b" => "__builtin_msa_mod_u_b",
+    "llvm.mips.mod.u.d" => "__builtin_msa_mod_u_d",
+    "llvm.mips.mod.u.h" => "__builtin_msa_mod_u_h",
+    "llvm.mips.mod.u.w" => "__builtin_msa_mod_u_w",
+    "llvm.mips.modsub" => "__builtin_mips_modsub",
+    "llvm.mips.move.v" => "__builtin_msa_move_v",
+    "llvm.mips.msub" => "__builtin_mips_msub",
+    "llvm.mips.msub.q.h" => "__builtin_msa_msub_q_h",
+    "llvm.mips.msub.q.w" => "__builtin_msa_msub_q_w",
+    "llvm.mips.msubr.q.h" => "__builtin_msa_msubr_q_h",
+    "llvm.mips.msubr.q.w" => "__builtin_msa_msubr_q_w",
+    "llvm.mips.msubu" => "__builtin_mips_msubu",
+    "llvm.mips.msubv.b" => "__builtin_msa_msubv_b",
+    "llvm.mips.msubv.d" => "__builtin_msa_msubv_d",
+    "llvm.mips.msubv.h" => "__builtin_msa_msubv_h",
+    "llvm.mips.msubv.w" => "__builtin_msa_msubv_w",
+    "llvm.mips.mthlip" => "__builtin_mips_mthlip",
+    "llvm.mips.mul.ph" => "__builtin_mips_mul_ph",
+    "llvm.mips.mul.q.h" => "__builtin_msa_mul_q_h",
+    "llvm.mips.mul.q.w" => "__builtin_msa_mul_q_w",
+    "llvm.mips.mul.s.ph" => "__builtin_mips_mul_s_ph",
+    "llvm.mips.muleq.s.w.phl" => "__builtin_mips_muleq_s_w_phl",
+    "llvm.mips.muleq.s.w.phr" => "__builtin_mips_muleq_s_w_phr",
+    "llvm.mips.muleu.s.ph.qbl" => "__builtin_mips_muleu_s_ph_qbl",
+    "llvm.mips.muleu.s.ph.qbr" => "__builtin_mips_muleu_s_ph_qbr",
+    "llvm.mips.mulq.rs.ph" => "__builtin_mips_mulq_rs_ph",
+    "llvm.mips.mulq.rs.w" => "__builtin_mips_mulq_rs_w",
+    "llvm.mips.mulq.s.ph" => "__builtin_mips_mulq_s_ph",
+    "llvm.mips.mulq.s.w" => "__builtin_mips_mulq_s_w",
+    "llvm.mips.mulr.q.h" => "__builtin_msa_mulr_q_h",
+    "llvm.mips.mulr.q.w" => "__builtin_msa_mulr_q_w",
+    "llvm.mips.mulsa.w.ph" => "__builtin_mips_mulsa_w_ph",
+    "llvm.mips.mulsaq.s.w.ph" => "__builtin_mips_mulsaq_s_w_ph",
+    "llvm.mips.mult" => "__builtin_mips_mult",
+    "llvm.mips.multu" => "__builtin_mips_multu",
+    "llvm.mips.mulv.b" => "__builtin_msa_mulv_b",
+    "llvm.mips.mulv.d" => "__builtin_msa_mulv_d",
+    "llvm.mips.mulv.h" => "__builtin_msa_mulv_h",
+    "llvm.mips.mulv.w" => "__builtin_msa_mulv_w",
+    "llvm.mips.nloc.b" => "__builtin_msa_nloc_b",
+    "llvm.mips.nloc.d" => "__builtin_msa_nloc_d",
+    "llvm.mips.nloc.h" => "__builtin_msa_nloc_h",
+    "llvm.mips.nloc.w" => "__builtin_msa_nloc_w",
+    "llvm.mips.nlzc.b" => "__builtin_msa_nlzc_b",
+    "llvm.mips.nlzc.d" => "__builtin_msa_nlzc_d",
+    "llvm.mips.nlzc.h" => "__builtin_msa_nlzc_h",
+    "llvm.mips.nlzc.w" => "__builtin_msa_nlzc_w",
+    "llvm.mips.nor.v" => "__builtin_msa_nor_v",
+    "llvm.mips.nori.b" => "__builtin_msa_nori_b",
+    "llvm.mips.or.v" => "__builtin_msa_or_v",
+    "llvm.mips.ori.b" => "__builtin_msa_ori_b",
+    "llvm.mips.packrl.ph" => "__builtin_mips_packrl_ph",
+    "llvm.mips.pckev.b" => "__builtin_msa_pckev_b",
+    "llvm.mips.pckev.d" => "__builtin_msa_pckev_d",
+    "llvm.mips.pckev.h" => "__builtin_msa_pckev_h",
+    "llvm.mips.pckev.w" => "__builtin_msa_pckev_w",
+    "llvm.mips.pckod.b" => "__builtin_msa_pckod_b",
+    "llvm.mips.pckod.d" => "__builtin_msa_pckod_d",
+    "llvm.mips.pckod.h" => "__builtin_msa_pckod_h",
+    "llvm.mips.pckod.w" => "__builtin_msa_pckod_w",
+    "llvm.mips.pcnt.b" => "__builtin_msa_pcnt_b",
+    "llvm.mips.pcnt.d" => "__builtin_msa_pcnt_d",
+    "llvm.mips.pcnt.h" => "__builtin_msa_pcnt_h",
+    "llvm.mips.pcnt.w" => "__builtin_msa_pcnt_w",
+    "llvm.mips.pick.ph" => "__builtin_mips_pick_ph",
+    "llvm.mips.pick.qb" => "__builtin_mips_pick_qb",
+    "llvm.mips.preceq.w.phl" => "__builtin_mips_preceq_w_phl",
+    "llvm.mips.preceq.w.phr" => "__builtin_mips_preceq_w_phr",
+    "llvm.mips.precequ.ph.qbl" => "__builtin_mips_precequ_ph_qbl",
+    "llvm.mips.precequ.ph.qbla" => "__builtin_mips_precequ_ph_qbla",
+    "llvm.mips.precequ.ph.qbr" => "__builtin_mips_precequ_ph_qbr",
+    "llvm.mips.precequ.ph.qbra" => "__builtin_mips_precequ_ph_qbra",
+    "llvm.mips.preceu.ph.qbl" => "__builtin_mips_preceu_ph_qbl",
+    "llvm.mips.preceu.ph.qbla" => "__builtin_mips_preceu_ph_qbla",
+    "llvm.mips.preceu.ph.qbr" => "__builtin_mips_preceu_ph_qbr",
+    "llvm.mips.preceu.ph.qbra" => "__builtin_mips_preceu_ph_qbra",
+    "llvm.mips.precr.qb.ph" => "__builtin_mips_precr_qb_ph",
+    "llvm.mips.precr.sra.ph.w" => "__builtin_mips_precr_sra_ph_w",
+    "llvm.mips.precr.sra.r.ph.w" => "__builtin_mips_precr_sra_r_ph_w",
+    "llvm.mips.precrq.ph.w" => "__builtin_mips_precrq_ph_w",
+    "llvm.mips.precrq.qb.ph" => "__builtin_mips_precrq_qb_ph",
+    "llvm.mips.precrq.rs.ph.w" => "__builtin_mips_precrq_rs_ph_w",
+    "llvm.mips.precrqu.s.qb.ph" => "__builtin_mips_precrqu_s_qb_ph",
+    "llvm.mips.prepend" => "__builtin_mips_prepend",
+    "llvm.mips.raddu.w.qb" => "__builtin_mips_raddu_w_qb",
+    "llvm.mips.rddsp" => "__builtin_mips_rddsp",
+    "llvm.mips.repl.ph" => "__builtin_mips_repl_ph",
+    "llvm.mips.repl.qb" => "__builtin_mips_repl_qb",
+    "llvm.mips.sat.s.b" => "__builtin_msa_sat_s_b",
+    "llvm.mips.sat.s.d" => "__builtin_msa_sat_s_d",
+    "llvm.mips.sat.s.h" => "__builtin_msa_sat_s_h",
+    "llvm.mips.sat.s.w" => "__builtin_msa_sat_s_w",
+    "llvm.mips.sat.u.b" => "__builtin_msa_sat_u_b",
+    "llvm.mips.sat.u.d" => "__builtin_msa_sat_u_d",
+    "llvm.mips.sat.u.h" => "__builtin_msa_sat_u_h",
+    "llvm.mips.sat.u.w" => "__builtin_msa_sat_u_w",
+    "llvm.mips.shf.b" => "__builtin_msa_shf_b",
+    "llvm.mips.shf.h" => "__builtin_msa_shf_h",
+    "llvm.mips.shf.w" => "__builtin_msa_shf_w",
+    "llvm.mips.shilo" => "__builtin_mips_shilo",
+    "llvm.mips.shll.ph" => "__builtin_mips_shll_ph",
+    "llvm.mips.shll.qb" => "__builtin_mips_shll_qb",
+    "llvm.mips.shll.s.ph" => "__builtin_mips_shll_s_ph",
+    "llvm.mips.shll.s.w" => "__builtin_mips_shll_s_w",
+    "llvm.mips.shra.ph" => "__builtin_mips_shra_ph",
+    "llvm.mips.shra.qb" => "__builtin_mips_shra_qb",
+    "llvm.mips.shra.r.ph" => "__builtin_mips_shra_r_ph",
+    "llvm.mips.shra.r.qb" => "__builtin_mips_shra_r_qb",
+    "llvm.mips.shra.r.w" => "__builtin_mips_shra_r_w",
+    "llvm.mips.shrl.ph" => "__builtin_mips_shrl_ph",
+    "llvm.mips.shrl.qb" => "__builtin_mips_shrl_qb",
+    "llvm.mips.sld.b" => "__builtin_msa_sld_b",
+    "llvm.mips.sld.d" => "__builtin_msa_sld_d",
+    "llvm.mips.sld.h" => "__builtin_msa_sld_h",
+    "llvm.mips.sld.w" => "__builtin_msa_sld_w",
+    "llvm.mips.sldi.b" => "__builtin_msa_sldi_b",
+    "llvm.mips.sldi.d" => "__builtin_msa_sldi_d",
+    "llvm.mips.sldi.h" => "__builtin_msa_sldi_h",
+    "llvm.mips.sldi.w" => "__builtin_msa_sldi_w",
+    "llvm.mips.sll.b" => "__builtin_msa_sll_b",
+    "llvm.mips.sll.d" => "__builtin_msa_sll_d",
+    "llvm.mips.sll.h" => "__builtin_msa_sll_h",
+    "llvm.mips.sll.w" => "__builtin_msa_sll_w",
+    "llvm.mips.slli.b" => "__builtin_msa_slli_b",
+    "llvm.mips.slli.d" => "__builtin_msa_slli_d",
+    "llvm.mips.slli.h" => "__builtin_msa_slli_h",
+    "llvm.mips.slli.w" => "__builtin_msa_slli_w",
+    "llvm.mips.splat.b" => "__builtin_msa_splat_b",
+    "llvm.mips.splat.d" => "__builtin_msa_splat_d",
+    "llvm.mips.splat.h" => "__builtin_msa_splat_h",
+    "llvm.mips.splat.w" => "__builtin_msa_splat_w",
+    "llvm.mips.splati.b" => "__builtin_msa_splati_b",
+    "llvm.mips.splati.d" => "__builtin_msa_splati_d",
+    "llvm.mips.splati.h" => "__builtin_msa_splati_h",
+    "llvm.mips.splati.w" => "__builtin_msa_splati_w",
+    "llvm.mips.sra.b" => "__builtin_msa_sra_b",
+    "llvm.mips.sra.d" => "__builtin_msa_sra_d",
+    "llvm.mips.sra.h" => "__builtin_msa_sra_h",
+    "llvm.mips.sra.w" => "__builtin_msa_sra_w",
+    "llvm.mips.srai.b" => "__builtin_msa_srai_b",
+    "llvm.mips.srai.d" => "__builtin_msa_srai_d",
+    "llvm.mips.srai.h" => "__builtin_msa_srai_h",
+    "llvm.mips.srai.w" => "__builtin_msa_srai_w",
+    "llvm.mips.srar.b" => "__builtin_msa_srar_b",
+    "llvm.mips.srar.d" => "__builtin_msa_srar_d",
+    "llvm.mips.srar.h" => "__builtin_msa_srar_h",
+    "llvm.mips.srar.w" => "__builtin_msa_srar_w",
+    "llvm.mips.srari.b" => "__builtin_msa_srari_b",
+    "llvm.mips.srari.d" => "__builtin_msa_srari_d",
+    "llvm.mips.srari.h" => "__builtin_msa_srari_h",
+    "llvm.mips.srari.w" => "__builtin_msa_srari_w",
+    "llvm.mips.srl.b" => "__builtin_msa_srl_b",
+    "llvm.mips.srl.d" => "__builtin_msa_srl_d",
+    "llvm.mips.srl.h" => "__builtin_msa_srl_h",
+    "llvm.mips.srl.w" => "__builtin_msa_srl_w",
+    "llvm.mips.srli.b" => "__builtin_msa_srli_b",
+    "llvm.mips.srli.d" => "__builtin_msa_srli_d",
+    "llvm.mips.srli.h" => "__builtin_msa_srli_h",
+    "llvm.mips.srli.w" => "__builtin_msa_srli_w",
+    "llvm.mips.srlr.b" => "__builtin_msa_srlr_b",
+    "llvm.mips.srlr.d" => "__builtin_msa_srlr_d",
+    "llvm.mips.srlr.h" => "__builtin_msa_srlr_h",
+    "llvm.mips.srlr.w" => "__builtin_msa_srlr_w",
+    "llvm.mips.srlri.b" => "__builtin_msa_srlri_b",
+    "llvm.mips.srlri.d" => "__builtin_msa_srlri_d",
+    "llvm.mips.srlri.h" => "__builtin_msa_srlri_h",
+    "llvm.mips.srlri.w" => "__builtin_msa_srlri_w",
+    "llvm.mips.st.b" => "__builtin_msa_st_b",
+    "llvm.mips.st.d" => "__builtin_msa_st_d",
+    "llvm.mips.st.h" => "__builtin_msa_st_h",
+    "llvm.mips.st.w" => "__builtin_msa_st_w",
+    "llvm.mips.str.d" => "__builtin_msa_str_d",
+    "llvm.mips.str.w" => "__builtin_msa_str_w",
+    "llvm.mips.subq.ph" => "__builtin_mips_subq_ph",
+    "llvm.mips.subq.s.ph" => "__builtin_mips_subq_s_ph",
+    "llvm.mips.subq.s.w" => "__builtin_mips_subq_s_w",
+    "llvm.mips.subqh.ph" => "__builtin_mips_subqh_ph",
+    "llvm.mips.subqh.r.ph" => "__builtin_mips_subqh_r_ph",
+    "llvm.mips.subqh.r.w" => "__builtin_mips_subqh_r_w",
+    "llvm.mips.subqh.w" => "__builtin_mips_subqh_w",
+    "llvm.mips.subs.s.b" => "__builtin_msa_subs_s_b",
+    "llvm.mips.subs.s.d" => "__builtin_msa_subs_s_d",
+    "llvm.mips.subs.s.h" => "__builtin_msa_subs_s_h",
+    "llvm.mips.subs.s.w" => "__builtin_msa_subs_s_w",
+    "llvm.mips.subs.u.b" => "__builtin_msa_subs_u_b",
+    "llvm.mips.subs.u.d" => "__builtin_msa_subs_u_d",
+    "llvm.mips.subs.u.h" => "__builtin_msa_subs_u_h",
+    "llvm.mips.subs.u.w" => "__builtin_msa_subs_u_w",
+    "llvm.mips.subsus.u.b" => "__builtin_msa_subsus_u_b",
+    "llvm.mips.subsus.u.d" => "__builtin_msa_subsus_u_d",
+    "llvm.mips.subsus.u.h" => "__builtin_msa_subsus_u_h",
+    "llvm.mips.subsus.u.w" => "__builtin_msa_subsus_u_w",
+    "llvm.mips.subsuu.s.b" => "__builtin_msa_subsuu_s_b",
+    "llvm.mips.subsuu.s.d" => "__builtin_msa_subsuu_s_d",
+    "llvm.mips.subsuu.s.h" => "__builtin_msa_subsuu_s_h",
+    "llvm.mips.subsuu.s.w" => "__builtin_msa_subsuu_s_w",
+    "llvm.mips.subu.ph" => "__builtin_mips_subu_ph",
+    "llvm.mips.subu.qb" => "__builtin_mips_subu_qb",
+    "llvm.mips.subu.s.ph" => "__builtin_mips_subu_s_ph",
+    "llvm.mips.subu.s.qb" => "__builtin_mips_subu_s_qb",
+    "llvm.mips.subuh.qb" => "__builtin_mips_subuh_qb",
+    "llvm.mips.subuh.r.qb" => "__builtin_mips_subuh_r_qb",
+    "llvm.mips.subv.b" => "__builtin_msa_subv_b",
+    "llvm.mips.subv.d" => "__builtin_msa_subv_d",
+    "llvm.mips.subv.h" => "__builtin_msa_subv_h",
+    "llvm.mips.subv.w" => "__builtin_msa_subv_w",
+    "llvm.mips.subvi.b" => "__builtin_msa_subvi_b",
+    "llvm.mips.subvi.d" => "__builtin_msa_subvi_d",
+    "llvm.mips.subvi.h" => "__builtin_msa_subvi_h",
+    "llvm.mips.subvi.w" => "__builtin_msa_subvi_w",
+    "llvm.mips.vshf.b" => "__builtin_msa_vshf_b",
+    "llvm.mips.vshf.d" => "__builtin_msa_vshf_d",
+    "llvm.mips.vshf.h" => "__builtin_msa_vshf_h",
+    "llvm.mips.vshf.w" => "__builtin_msa_vshf_w",
+    "llvm.mips.wrdsp" => "__builtin_mips_wrdsp",
+    "llvm.mips.xor.v" => "__builtin_msa_xor_v",
+    "llvm.mips.xori.b" => "__builtin_msa_xori_b",
+    // nvvm
+    "llvm.nvvm.abs.bf16" => "__nvvm_abs_bf16",
+    "llvm.nvvm.abs.bf16x2" => "__nvvm_abs_bf16x2",
+    "llvm.nvvm.abs.i" => "__nvvm_abs_i",
+    "llvm.nvvm.abs.ll" => "__nvvm_abs_ll",
+    "llvm.nvvm.activemask" => "__nvvm_activemask",
+    "llvm.nvvm.add.rm.d" => "__nvvm_add_rm_d",
+    "llvm.nvvm.add.rm.f" => "__nvvm_add_rm_f",
+    "llvm.nvvm.add.rm.ftz.f" => "__nvvm_add_rm_ftz_f",
+    "llvm.nvvm.add.rn.d" => "__nvvm_add_rn_d",
+    "llvm.nvvm.add.rn.f" => "__nvvm_add_rn_f",
+    "llvm.nvvm.add.rn.ftz.f" => "__nvvm_add_rn_ftz_f",
+    "llvm.nvvm.add.rp.d" => "__nvvm_add_rp_d",
+    "llvm.nvvm.add.rp.f" => "__nvvm_add_rp_f",
+    "llvm.nvvm.add.rp.ftz.f" => "__nvvm_add_rp_ftz_f",
+    "llvm.nvvm.add.rz.d" => "__nvvm_add_rz_d",
+    "llvm.nvvm.add.rz.f" => "__nvvm_add_rz_f",
+    "llvm.nvvm.add.rz.ftz.f" => "__nvvm_add_rz_ftz_f",
+    "llvm.nvvm.bar.sync" => "__nvvm_bar_sync",
+    "llvm.nvvm.bar.warp.sync" => "__nvvm_bar_warp_sync",
+    "llvm.nvvm.barrier" => "__nvvm_bar",
+    "llvm.nvvm.barrier.n" => "__nvvm_bar_n",
+    "llvm.nvvm.barrier.sync" => "__nvvm_barrier_sync",
+    "llvm.nvvm.barrier.sync.cnt" => "__nvvm_barrier_sync_cnt",
+    "llvm.nvvm.barrier0" => "__syncthreads",
+    // [DUPLICATE]: "llvm.nvvm.barrier0" => "__nvvm_bar0",
+    "llvm.nvvm.barrier0.and" => "__nvvm_bar0_and",
+    "llvm.nvvm.barrier0.or" => "__nvvm_bar0_or",
+    "llvm.nvvm.barrier0.popc" => "__nvvm_bar0_popc",
+    "llvm.nvvm.bf2h.rn" => "__nvvm_bf2h_rn",
+    "llvm.nvvm.bf2h.rn.ftz" => "__nvvm_bf2h_rn_ftz",
+    "llvm.nvvm.bitcast.d2ll" => "__nvvm_bitcast_d2ll",
+    "llvm.nvvm.bitcast.f2i" => "__nvvm_bitcast_f2i",
+    "llvm.nvvm.bitcast.i2f" => "__nvvm_bitcast_i2f",
+    "llvm.nvvm.bitcast.ll2d" => "__nvvm_bitcast_ll2d",
+    "llvm.nvvm.brev32" => "__nvvm_brev32",
+    "llvm.nvvm.brev64" => "__nvvm_brev64",
+    "llvm.nvvm.ceil.d" => "__nvvm_ceil_d",
+    "llvm.nvvm.ceil.f" => "__nvvm_ceil_f",
+    "llvm.nvvm.ceil.ftz.f" => "__nvvm_ceil_ftz_f",
+    "llvm.nvvm.clz.i" => "__nvvm_clz_i",
+    "llvm.nvvm.clz.ll" => "__nvvm_clz_ll",
+    "llvm.nvvm.cos.approx.f" => "__nvvm_cos_approx_f",
+    "llvm.nvvm.cos.approx.ftz.f" => "__nvvm_cos_approx_ftz_f",
+    "llvm.nvvm.cp.async.commit.group" => "__nvvm_cp_async_commit_group",
+    "llvm.nvvm.cp.async.mbarrier.arrive" => "__nvvm_cp_async_mbarrier_arrive",
+    "llvm.nvvm.cp.async.mbarrier.arrive.noinc" => "__nvvm_cp_async_mbarrier_arrive_noinc",
+    "llvm.nvvm.cp.async.mbarrier.arrive.noinc.shared" => "__nvvm_cp_async_mbarrier_arrive_noinc_shared",
+    "llvm.nvvm.cp.async.mbarrier.arrive.shared" => "__nvvm_cp_async_mbarrier_arrive_shared",
+    "llvm.nvvm.cp.async.wait.all" => "__nvvm_cp_async_wait_all",
+    "llvm.nvvm.cp.async.wait.group" => "__nvvm_cp_async_wait_group",
+    "llvm.nvvm.d2f.rm" => "__nvvm_d2f_rm",
+    "llvm.nvvm.d2f.rm.ftz" => "__nvvm_d2f_rm_ftz",
+    "llvm.nvvm.d2f.rn" => "__nvvm_d2f_rn",
+    "llvm.nvvm.d2f.rn.ftz" => "__nvvm_d2f_rn_ftz",
+    "llvm.nvvm.d2f.rp" => "__nvvm_d2f_rp",
+    "llvm.nvvm.d2f.rp.ftz" => "__nvvm_d2f_rp_ftz",
+    "llvm.nvvm.d2f.rz" => "__nvvm_d2f_rz",
+    "llvm.nvvm.d2f.rz.ftz" => "__nvvm_d2f_rz_ftz",
+    "llvm.nvvm.d2i.hi" => "__nvvm_d2i_hi",
+    "llvm.nvvm.d2i.lo" => "__nvvm_d2i_lo",
+    "llvm.nvvm.d2i.rm" => "__nvvm_d2i_rm",
+    "llvm.nvvm.d2i.rn" => "__nvvm_d2i_rn",
+    "llvm.nvvm.d2i.rp" => "__nvvm_d2i_rp",
+    "llvm.nvvm.d2i.rz" => "__nvvm_d2i_rz",
+    "llvm.nvvm.d2ll.rm" => "__nvvm_d2ll_rm",
+    "llvm.nvvm.d2ll.rn" => "__nvvm_d2ll_rn",
+    "llvm.nvvm.d2ll.rp" => "__nvvm_d2ll_rp",
+    "llvm.nvvm.d2ll.rz" => "__nvvm_d2ll_rz",
+    "llvm.nvvm.d2ui.rm" => "__nvvm_d2ui_rm",
+    "llvm.nvvm.d2ui.rn" => "__nvvm_d2ui_rn",
+    "llvm.nvvm.d2ui.rp" => "__nvvm_d2ui_rp",
+    "llvm.nvvm.d2ui.rz" => "__nvvm_d2ui_rz",
+    "llvm.nvvm.d2ull.rm" => "__nvvm_d2ull_rm",
+    "llvm.nvvm.d2ull.rn" => "__nvvm_d2ull_rn",
+    "llvm.nvvm.d2ull.rp" => "__nvvm_d2ull_rp",
+    "llvm.nvvm.d2ull.rz" => "__nvvm_d2ull_rz",
+    "llvm.nvvm.div.approx.f" => "__nvvm_div_approx_f",
+    "llvm.nvvm.div.approx.ftz.f" => "__nvvm_div_approx_ftz_f",
+    "llvm.nvvm.div.rm.d" => "__nvvm_div_rm_d",
+    "llvm.nvvm.div.rm.f" => "__nvvm_div_rm_f",
+    "llvm.nvvm.div.rm.ftz.f" => "__nvvm_div_rm_ftz_f",
+    "llvm.nvvm.div.rn.d" => "__nvvm_div_rn_d",
+    "llvm.nvvm.div.rn.f" => "__nvvm_div_rn_f",
+    "llvm.nvvm.div.rn.ftz.f" => "__nvvm_div_rn_ftz_f",
+    "llvm.nvvm.div.rp.d" => "__nvvm_div_rp_d",
+    "llvm.nvvm.div.rp.f" => "__nvvm_div_rp_f",
+    "llvm.nvvm.div.rp.ftz.f" => "__nvvm_div_rp_ftz_f",
+    "llvm.nvvm.div.rz.d" => "__nvvm_div_rz_d",
+    "llvm.nvvm.div.rz.f" => "__nvvm_div_rz_f",
+    "llvm.nvvm.div.rz.ftz.f" => "__nvvm_div_rz_ftz_f",
+    "llvm.nvvm.e4m3x2.to.f16x2.rn" => "__nvvm_e4m3x2_to_f16x2_rn",
+    "llvm.nvvm.e4m3x2.to.f16x2.rn.relu" => "__nvvm_e4m3x2_to_f16x2_rn_relu",
+    "llvm.nvvm.e5m2x2.to.f16x2.rn" => "__nvvm_e5m2x2_to_f16x2_rn",
+    "llvm.nvvm.e5m2x2.to.f16x2.rn.relu" => "__nvvm_e5m2x2_to_f16x2_rn_relu",
+    "llvm.nvvm.ex2.approx.d" => "__nvvm_ex2_approx_d",
+    "llvm.nvvm.ex2.approx.f" => "__nvvm_ex2_approx_f",
+    "llvm.nvvm.ex2.approx.ftz.f" => "__nvvm_ex2_approx_ftz_f",
+    "llvm.nvvm.exit" => "__nvvm_exit",
+    "llvm.nvvm.f16x2.to.e4m3x2.rn" => "__nvvm_f16x2_to_e4m3x2_rn",
+    "llvm.nvvm.f16x2.to.e4m3x2.rn.relu" => "__nvvm_f16x2_to_e4m3x2_rn_relu",
+    "llvm.nvvm.f16x2.to.e5m2x2.rn" => "__nvvm_f16x2_to_e5m2x2_rn",
+    "llvm.nvvm.f16x2.to.e5m2x2.rn.relu" => "__nvvm_f16x2_to_e5m2x2_rn_relu",
+    "llvm.nvvm.f2bf16.rn" => "__nvvm_f2bf16_rn",
+    "llvm.nvvm.f2bf16.rn.relu" => "__nvvm_f2bf16_rn_relu",
+    "llvm.nvvm.f2bf16.rz" => "__nvvm_f2bf16_rz",
+    "llvm.nvvm.f2bf16.rz.relu" => "__nvvm_f2bf16_rz_relu",
+    "llvm.nvvm.f2h.rn" => "__nvvm_f2h_rn",
+    "llvm.nvvm.f2h.rn.ftz" => "__nvvm_f2h_rn_ftz",
+    "llvm.nvvm.f2i.rm" => "__nvvm_f2i_rm",
+    "llvm.nvvm.f2i.rm.ftz" => "__nvvm_f2i_rm_ftz",
+    "llvm.nvvm.f2i.rn" => "__nvvm_f2i_rn",
+    "llvm.nvvm.f2i.rn.ftz" => "__nvvm_f2i_rn_ftz",
+    "llvm.nvvm.f2i.rp" => "__nvvm_f2i_rp",
+    "llvm.nvvm.f2i.rp.ftz" => "__nvvm_f2i_rp_ftz",
+    "llvm.nvvm.f2i.rz" => "__nvvm_f2i_rz",
+    "llvm.nvvm.f2i.rz.ftz" => "__nvvm_f2i_rz_ftz",
+    "llvm.nvvm.f2ll.rm" => "__nvvm_f2ll_rm",
+    "llvm.nvvm.f2ll.rm.ftz" => "__nvvm_f2ll_rm_ftz",
+    "llvm.nvvm.f2ll.rn" => "__nvvm_f2ll_rn",
+    "llvm.nvvm.f2ll.rn.ftz" => "__nvvm_f2ll_rn_ftz",
+    "llvm.nvvm.f2ll.rp" => "__nvvm_f2ll_rp",
+    "llvm.nvvm.f2ll.rp.ftz" => "__nvvm_f2ll_rp_ftz",
+    "llvm.nvvm.f2ll.rz" => "__nvvm_f2ll_rz",
+    "llvm.nvvm.f2ll.rz.ftz" => "__nvvm_f2ll_rz_ftz",
+    "llvm.nvvm.f2tf32.rna" => "__nvvm_f2tf32_rna",
+    "llvm.nvvm.f2ui.rm" => "__nvvm_f2ui_rm",
+    "llvm.nvvm.f2ui.rm.ftz" => "__nvvm_f2ui_rm_ftz",
+    "llvm.nvvm.f2ui.rn" => "__nvvm_f2ui_rn",
+    "llvm.nvvm.f2ui.rn.ftz" => "__nvvm_f2ui_rn_ftz",
+    "llvm.nvvm.f2ui.rp" => "__nvvm_f2ui_rp",
+    "llvm.nvvm.f2ui.rp.ftz" => "__nvvm_f2ui_rp_ftz",
+    "llvm.nvvm.f2ui.rz" => "__nvvm_f2ui_rz",
+    "llvm.nvvm.f2ui.rz.ftz" => "__nvvm_f2ui_rz_ftz",
+    "llvm.nvvm.f2ull.rm" => "__nvvm_f2ull_rm",
+    "llvm.nvvm.f2ull.rm.ftz" => "__nvvm_f2ull_rm_ftz",
+    "llvm.nvvm.f2ull.rn" => "__nvvm_f2ull_rn",
+    "llvm.nvvm.f2ull.rn.ftz" => "__nvvm_f2ull_rn_ftz",
+    "llvm.nvvm.f2ull.rp" => "__nvvm_f2ull_rp",
+    "llvm.nvvm.f2ull.rp.ftz" => "__nvvm_f2ull_rp_ftz",
+    "llvm.nvvm.f2ull.rz" => "__nvvm_f2ull_rz",
+    "llvm.nvvm.f2ull.rz.ftz" => "__nvvm_f2ull_rz_ftz",
+    "llvm.nvvm.fabs.d" => "__nvvm_fabs_d",
+    "llvm.nvvm.fabs.f" => "__nvvm_fabs_f",
+    "llvm.nvvm.fabs.ftz.f" => "__nvvm_fabs_ftz_f",
+    "llvm.nvvm.ff.to.e4m3x2.rn" => "__nvvm_ff_to_e4m3x2_rn",
+    "llvm.nvvm.ff.to.e4m3x2.rn.relu" => "__nvvm_ff_to_e4m3x2_rn_relu",
+    "llvm.nvvm.ff.to.e5m2x2.rn" => "__nvvm_ff_to_e5m2x2_rn",
+    "llvm.nvvm.ff.to.e5m2x2.rn.relu" => "__nvvm_ff_to_e5m2x2_rn_relu",
+    "llvm.nvvm.ff2bf16x2.rn" => "__nvvm_ff2bf16x2_rn",
+    "llvm.nvvm.ff2bf16x2.rn.relu" => "__nvvm_ff2bf16x2_rn_relu",
+    "llvm.nvvm.ff2bf16x2.rz" => "__nvvm_ff2bf16x2_rz",
+    "llvm.nvvm.ff2bf16x2.rz.relu" => "__nvvm_ff2bf16x2_rz_relu",
+    "llvm.nvvm.ff2f16x2.rn" => "__nvvm_ff2f16x2_rn",
+    "llvm.nvvm.ff2f16x2.rn.relu" => "__nvvm_ff2f16x2_rn_relu",
+    "llvm.nvvm.ff2f16x2.rz" => "__nvvm_ff2f16x2_rz",
+    "llvm.nvvm.ff2f16x2.rz.relu" => "__nvvm_ff2f16x2_rz_relu",
+    "llvm.nvvm.floor.d" => "__nvvm_floor_d",
+    "llvm.nvvm.floor.f" => "__nvvm_floor_f",
+    "llvm.nvvm.floor.ftz.f" => "__nvvm_floor_ftz_f",
+    "llvm.nvvm.fma.rm.d" => "__nvvm_fma_rm_d",
+    "llvm.nvvm.fma.rm.f" => "__nvvm_fma_rm_f",
+    "llvm.nvvm.fma.rm.ftz.f" => "__nvvm_fma_rm_ftz_f",
+    "llvm.nvvm.fma.rn.bf16" => "__nvvm_fma_rn_bf16",
+    "llvm.nvvm.fma.rn.bf16x2" => "__nvvm_fma_rn_bf16x2",
+    "llvm.nvvm.fma.rn.d" => "__nvvm_fma_rn_d",
+    "llvm.nvvm.fma.rn.f" => "__nvvm_fma_rn_f",
+    "llvm.nvvm.fma.rn.ftz.bf16" => "__nvvm_fma_rn_ftz_bf16",
+    "llvm.nvvm.fma.rn.ftz.bf16x2" => "__nvvm_fma_rn_ftz_bf16x2",
+    "llvm.nvvm.fma.rn.ftz.f" => "__nvvm_fma_rn_ftz_f",
+    "llvm.nvvm.fma.rn.ftz.relu.bf16" => "__nvvm_fma_rn_ftz_relu_bf16",
+    "llvm.nvvm.fma.rn.ftz.relu.bf16x2" => "__nvvm_fma_rn_ftz_relu_bf16x2",
+    "llvm.nvvm.fma.rn.ftz.sat.bf16" => "__nvvm_fma_rn_ftz_sat_bf16",
+    "llvm.nvvm.fma.rn.ftz.sat.bf16x2" => "__nvvm_fma_rn_ftz_sat_bf16x2",
+    "llvm.nvvm.fma.rn.relu.bf16" => "__nvvm_fma_rn_relu_bf16",
+    "llvm.nvvm.fma.rn.relu.bf16x2" => "__nvvm_fma_rn_relu_bf16x2",
+    "llvm.nvvm.fma.rn.sat.bf16" => "__nvvm_fma_rn_sat_bf16",
+    "llvm.nvvm.fma.rn.sat.bf16x2" => "__nvvm_fma_rn_sat_bf16x2",
+    "llvm.nvvm.fma.rp.d" => "__nvvm_fma_rp_d",
+    "llvm.nvvm.fma.rp.f" => "__nvvm_fma_rp_f",
+    "llvm.nvvm.fma.rp.ftz.f" => "__nvvm_fma_rp_ftz_f",
+    "llvm.nvvm.fma.rz.d" => "__nvvm_fma_rz_d",
+    "llvm.nvvm.fma.rz.f" => "__nvvm_fma_rz_f",
+    "llvm.nvvm.fma.rz.ftz.f" => "__nvvm_fma_rz_ftz_f",
+    "llvm.nvvm.fmax.bf16" => "__nvvm_fmax_bf16",
+    "llvm.nvvm.fmax.bf16x2" => "__nvvm_fmax_bf16x2",
+    "llvm.nvvm.fmax.d" => "__nvvm_fmax_d",
+    "llvm.nvvm.fmax.f" => "__nvvm_fmax_f",
+    "llvm.nvvm.fmax.ftz.bf16" => "__nvvm_fmax_ftz_bf16",
+    "llvm.nvvm.fmax.ftz.bf16x2" => "__nvvm_fmax_ftz_bf16x2",
+    "llvm.nvvm.fmax.ftz.f" => "__nvvm_fmax_ftz_f",
+    "llvm.nvvm.fmax.ftz.nan.bf16" => "__nvvm_fmax_ftz_nan_bf16",
+    "llvm.nvvm.fmax.ftz.nan.bf16x2" => "__nvvm_fmax_ftz_nan_bf16x2",
+    "llvm.nvvm.fmax.ftz.nan.f" => "__nvvm_fmax_ftz_nan_f",
+    "llvm.nvvm.fmax.ftz.nan.xorsign.abs.bf16" => "__nvvm_fmax_ftz_nan_xorsign_abs_bf16",
+    "llvm.nvvm.fmax.ftz.nan.xorsign.abs.bf16x2" => "__nvvm_fmax_ftz_nan_xorsign_abs_bf16x2",
+    "llvm.nvvm.fmax.ftz.nan.xorsign.abs.f" => "__nvvm_fmax_ftz_nan_xorsign_abs_f",
+    "llvm.nvvm.fmax.ftz.xorsign.abs.bf16" => "__nvvm_fmax_ftz_xorsign_abs_bf16",
+    "llvm.nvvm.fmax.ftz.xorsign.abs.bf16x2" => "__nvvm_fmax_ftz_xorsign_abs_bf16x2",
+    "llvm.nvvm.fmax.ftz.xorsign.abs.f" => "__nvvm_fmax_ftz_xorsign_abs_f",
+    "llvm.nvvm.fmax.nan.bf16" => "__nvvm_fmax_nan_bf16",
+    "llvm.nvvm.fmax.nan.bf16x2" => "__nvvm_fmax_nan_bf16x2",
+    "llvm.nvvm.fmax.nan.f" => "__nvvm_fmax_nan_f",
+    "llvm.nvvm.fmax.nan.xorsign.abs.bf16" => "__nvvm_fmax_nan_xorsign_abs_bf16",
+    "llvm.nvvm.fmax.nan.xorsign.abs.bf16x2" => "__nvvm_fmax_nan_xorsign_abs_bf16x2",
+    "llvm.nvvm.fmax.nan.xorsign.abs.f" => "__nvvm_fmax_nan_xorsign_abs_f",
+    "llvm.nvvm.fmax.xorsign.abs.bf16" => "__nvvm_fmax_xorsign_abs_bf16",
+    "llvm.nvvm.fmax.xorsign.abs.bf16x2" => "__nvvm_fmax_xorsign_abs_bf16x2",
+    "llvm.nvvm.fmax.xorsign.abs.f" => "__nvvm_fmax_xorsign_abs_f",
+    "llvm.nvvm.fmin.bf16" => "__nvvm_fmin_bf16",
+    "llvm.nvvm.fmin.bf16x2" => "__nvvm_fmin_bf16x2",
+    "llvm.nvvm.fmin.d" => "__nvvm_fmin_d",
+    "llvm.nvvm.fmin.f" => "__nvvm_fmin_f",
+    "llvm.nvvm.fmin.ftz.bf16" => "__nvvm_fmin_ftz_bf16",
+    "llvm.nvvm.fmin.ftz.bf16x2" => "__nvvm_fmin_ftz_bf16x2",
+    "llvm.nvvm.fmin.ftz.f" => "__nvvm_fmin_ftz_f",
+    "llvm.nvvm.fmin.ftz.nan.bf16" => "__nvvm_fmin_ftz_nan_bf16",
+    "llvm.nvvm.fmin.ftz.nan.bf16x2" => "__nvvm_fmin_ftz_nan_bf16x2",
+    "llvm.nvvm.fmin.ftz.nan.f" => "__nvvm_fmin_ftz_nan_f",
+    "llvm.nvvm.fmin.ftz.nan.xorsign.abs.bf16" => "__nvvm_fmin_ftz_nan_xorsign_abs_bf16",
+    "llvm.nvvm.fmin.ftz.nan.xorsign.abs.bf16x2" => "__nvvm_fmin_ftz_nan_xorsign_abs_bf16x2",
+    "llvm.nvvm.fmin.ftz.nan.xorsign.abs.f" => "__nvvm_fmin_ftz_nan_xorsign_abs_f",
+    "llvm.nvvm.fmin.ftz.xorsign.abs.bf16" => "__nvvm_fmin_ftz_xorsign_abs_bf16",
+    "llvm.nvvm.fmin.ftz.xorsign.abs.bf16x2" => "__nvvm_fmin_ftz_xorsign_abs_bf16x2",
+    "llvm.nvvm.fmin.ftz.xorsign.abs.f" => "__nvvm_fmin_ftz_xorsign_abs_f",
+    "llvm.nvvm.fmin.nan.bf16" => "__nvvm_fmin_nan_bf16",
+    "llvm.nvvm.fmin.nan.bf16x2" => "__nvvm_fmin_nan_bf16x2",
+    "llvm.nvvm.fmin.nan.f" => "__nvvm_fmin_nan_f",
+    "llvm.nvvm.fmin.nan.xorsign.abs.bf16" => "__nvvm_fmin_nan_xorsign_abs_bf16",
+    "llvm.nvvm.fmin.nan.xorsign.abs.bf16x2" => "__nvvm_fmin_nan_xorsign_abs_bf16x2",
+    "llvm.nvvm.fmin.nan.xorsign.abs.f" => "__nvvm_fmin_nan_xorsign_abs_f",
+    "llvm.nvvm.fmin.xorsign.abs.bf16" => "__nvvm_fmin_xorsign_abs_bf16",
+    "llvm.nvvm.fmin.xorsign.abs.bf16x2" => "__nvvm_fmin_xorsign_abs_bf16x2",
+    "llvm.nvvm.fmin.xorsign.abs.f" => "__nvvm_fmin_xorsign_abs_f",
+    "llvm.nvvm.fns" => "__nvvm_fns",
+    "llvm.nvvm.h2f" => "__nvvm_h2f",
+    "llvm.nvvm.i2d.rm" => "__nvvm_i2d_rm",
+    "llvm.nvvm.i2d.rn" => "__nvvm_i2d_rn",
+    "llvm.nvvm.i2d.rp" => "__nvvm_i2d_rp",
+    "llvm.nvvm.i2d.rz" => "__nvvm_i2d_rz",
+    "llvm.nvvm.i2f.rm" => "__nvvm_i2f_rm",
+    "llvm.nvvm.i2f.rn" => "__nvvm_i2f_rn",
+    "llvm.nvvm.i2f.rp" => "__nvvm_i2f_rp",
+    "llvm.nvvm.i2f.rz" => "__nvvm_i2f_rz",
+    "llvm.nvvm.isspacep.const" => "__nvvm_isspacep_const",
+    "llvm.nvvm.isspacep.global" => "__nvvm_isspacep_global",
+    "llvm.nvvm.isspacep.local" => "__nvvm_isspacep_local",
+    "llvm.nvvm.isspacep.shared" => "__nvvm_isspacep_shared",
+    "llvm.nvvm.istypep.sampler" => "__nvvm_istypep_sampler",
+    "llvm.nvvm.istypep.surface" => "__nvvm_istypep_surface",
+    "llvm.nvvm.istypep.texture" => "__nvvm_istypep_texture",
+    "llvm.nvvm.lg2.approx.d" => "__nvvm_lg2_approx_d",
+    "llvm.nvvm.lg2.approx.f" => "__nvvm_lg2_approx_f",
+    "llvm.nvvm.lg2.approx.ftz.f" => "__nvvm_lg2_approx_ftz_f",
+    "llvm.nvvm.ll2d.rm" => "__nvvm_ll2d_rm",
+    "llvm.nvvm.ll2d.rn" => "__nvvm_ll2d_rn",
+    "llvm.nvvm.ll2d.rp" => "__nvvm_ll2d_rp",
+    "llvm.nvvm.ll2d.rz" => "__nvvm_ll2d_rz",
+    "llvm.nvvm.ll2f.rm" => "__nvvm_ll2f_rm",
+    "llvm.nvvm.ll2f.rn" => "__nvvm_ll2f_rn",
+    "llvm.nvvm.ll2f.rp" => "__nvvm_ll2f_rp",
+    "llvm.nvvm.ll2f.rz" => "__nvvm_ll2f_rz",
+    "llvm.nvvm.lohi.i2d" => "__nvvm_lohi_i2d",
+    "llvm.nvvm.match.any.sync.i32" => "__nvvm_match_any_sync_i32",
+    "llvm.nvvm.match.any.sync.i64" => "__nvvm_match_any_sync_i64",
+    "llvm.nvvm.max.i" => "__nvvm_max_i",
+    "llvm.nvvm.max.ll" => "__nvvm_max_ll",
+    "llvm.nvvm.max.ui" => "__nvvm_max_ui",
+    "llvm.nvvm.max.ull" => "__nvvm_max_ull",
+    "llvm.nvvm.mbarrier.arrive" => "__nvvm_mbarrier_arrive",
+    "llvm.nvvm.mbarrier.arrive.drop" => "__nvvm_mbarrier_arrive_drop",
+    "llvm.nvvm.mbarrier.arrive.drop.noComplete" => "__nvvm_mbarrier_arrive_drop_noComplete",
+    "llvm.nvvm.mbarrier.arrive.drop.noComplete.shared" => "__nvvm_mbarrier_arrive_drop_noComplete_shared",
+    "llvm.nvvm.mbarrier.arrive.drop.shared" => "__nvvm_mbarrier_arrive_drop_shared",
+    "llvm.nvvm.mbarrier.arrive.noComplete" => "__nvvm_mbarrier_arrive_noComplete",
+    "llvm.nvvm.mbarrier.arrive.noComplete.shared" => "__nvvm_mbarrier_arrive_noComplete_shared",
+    "llvm.nvvm.mbarrier.arrive.shared" => "__nvvm_mbarrier_arrive_shared",
+    "llvm.nvvm.mbarrier.init" => "__nvvm_mbarrier_init",
+    "llvm.nvvm.mbarrier.init.shared" => "__nvvm_mbarrier_init_shared",
+    "llvm.nvvm.mbarrier.inval" => "__nvvm_mbarrier_inval",
+    "llvm.nvvm.mbarrier.inval.shared" => "__nvvm_mbarrier_inval_shared",
+    "llvm.nvvm.mbarrier.pending.count" => "__nvvm_mbarrier_pending_count",
+    "llvm.nvvm.mbarrier.test.wait" => "__nvvm_mbarrier_test_wait",
+    "llvm.nvvm.mbarrier.test.wait.shared" => "__nvvm_mbarrier_test_wait_shared",
+    "llvm.nvvm.membar.cta" => "__nvvm_membar_cta",
+    "llvm.nvvm.membar.gl" => "__nvvm_membar_gl",
+    "llvm.nvvm.membar.sys" => "__nvvm_membar_sys",
+    "llvm.nvvm.min.i" => "__nvvm_min_i",
+    "llvm.nvvm.min.ll" => "__nvvm_min_ll",
+    "llvm.nvvm.min.ui" => "__nvvm_min_ui",
+    "llvm.nvvm.min.ull" => "__nvvm_min_ull",
+    "llvm.nvvm.mul.rm.d" => "__nvvm_mul_rm_d",
+    "llvm.nvvm.mul.rm.f" => "__nvvm_mul_rm_f",
+    "llvm.nvvm.mul.rm.ftz.f" => "__nvvm_mul_rm_ftz_f",
+    "llvm.nvvm.mul.rn.d" => "__nvvm_mul_rn_d",
+    "llvm.nvvm.mul.rn.f" => "__nvvm_mul_rn_f",
+    "llvm.nvvm.mul.rn.ftz.f" => "__nvvm_mul_rn_ftz_f",
+    "llvm.nvvm.mul.rp.d" => "__nvvm_mul_rp_d",
+    "llvm.nvvm.mul.rp.f" => "__nvvm_mul_rp_f",
+    "llvm.nvvm.mul.rp.ftz.f" => "__nvvm_mul_rp_ftz_f",
+    "llvm.nvvm.mul.rz.d" => "__nvvm_mul_rz_d",
+    "llvm.nvvm.mul.rz.f" => "__nvvm_mul_rz_f",
+    "llvm.nvvm.mul.rz.ftz.f" => "__nvvm_mul_rz_ftz_f",
+    "llvm.nvvm.mul24.i" => "__nvvm_mul24_i",
+    "llvm.nvvm.mul24.ui" => "__nvvm_mul24_ui",
+    "llvm.nvvm.mulhi.i" => "__nvvm_mulhi_i",
+    "llvm.nvvm.mulhi.ll" => "__nvvm_mulhi_ll",
+    "llvm.nvvm.mulhi.s" => "__nvvm_mulhi_s",
+    "llvm.nvvm.mulhi.ui" => "__nvvm_mulhi_ui",
+    "llvm.nvvm.mulhi.ull" => "__nvvm_mulhi_ull",
+    "llvm.nvvm.mulhi.us" => "__nvvm_mulhi_us",
+    "llvm.nvvm.nanosleep" => "__nvvm_nanosleep",
+    "llvm.nvvm.neg.bf16" => "__nvvm_neg_bf16",
+    "llvm.nvvm.neg.bf16x2" => "__nvvm_neg_bf16x2",
+    "llvm.nvvm.popc.i" => "__nvvm_popc_i",
+    "llvm.nvvm.popc.ll" => "__nvvm_popc_ll",
+    "llvm.nvvm.prmt" => "__nvvm_prmt",
+    "llvm.nvvm.rcp.approx.ftz.d" => "__nvvm_rcp_approx_ftz_d",
+    "llvm.nvvm.rcp.approx.ftz.f" => "__nvvm_rcp_approx_ftz_f",
+    "llvm.nvvm.rcp.rm.d" => "__nvvm_rcp_rm_d",
+    "llvm.nvvm.rcp.rm.f" => "__nvvm_rcp_rm_f",
+    "llvm.nvvm.rcp.rm.ftz.f" => "__nvvm_rcp_rm_ftz_f",
+    "llvm.nvvm.rcp.rn.d" => "__nvvm_rcp_rn_d",
+    "llvm.nvvm.rcp.rn.f" => "__nvvm_rcp_rn_f",
+    "llvm.nvvm.rcp.rn.ftz.f" => "__nvvm_rcp_rn_ftz_f",
+    "llvm.nvvm.rcp.rp.d" => "__nvvm_rcp_rp_d",
+    "llvm.nvvm.rcp.rp.f" => "__nvvm_rcp_rp_f",
+    "llvm.nvvm.rcp.rp.ftz.f" => "__nvvm_rcp_rp_ftz_f",
+    "llvm.nvvm.rcp.rz.d" => "__nvvm_rcp_rz_d",
+    "llvm.nvvm.rcp.rz.f" => "__nvvm_rcp_rz_f",
+    "llvm.nvvm.rcp.rz.ftz.f" => "__nvvm_rcp_rz_ftz_f",
+    "llvm.nvvm.read.ptx.sreg.clock" => "__nvvm_read_ptx_sreg_clock",
+    // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.clock" => "__nvvm_read_ptx_sreg_",
+    "llvm.nvvm.read.ptx.sreg.clock64" => "__nvvm_read_ptx_sreg_clock64",
+    // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.clock64" => "__nvvm_read_ptx_sreg_",
+    "llvm.nvvm.read.ptx.sreg.ctaid.w" => "__nvvm_read_ptx_sreg_ctaid_w",
+    "llvm.nvvm.read.ptx.sreg.ctaid.x" => "__nvvm_read_ptx_sreg_ctaid_x",
+    "llvm.nvvm.read.ptx.sreg.ctaid.y" => "__nvvm_read_ptx_sreg_ctaid_y",
+    "llvm.nvvm.read.ptx.sreg.ctaid.z" => "__nvvm_read_ptx_sreg_ctaid_z",
+    "llvm.nvvm.read.ptx.sreg.envreg0" => "__nvvm_read_ptx_sreg_envreg0",
+    "llvm.nvvm.read.ptx.sreg.envreg1" => "__nvvm_read_ptx_sreg_envreg1",
+    "llvm.nvvm.read.ptx.sreg.envreg10" => "__nvvm_read_ptx_sreg_envreg10",
+    "llvm.nvvm.read.ptx.sreg.envreg11" => "__nvvm_read_ptx_sreg_envreg11",
+    "llvm.nvvm.read.ptx.sreg.envreg12" => "__nvvm_read_ptx_sreg_envreg12",
+    "llvm.nvvm.read.ptx.sreg.envreg13" => "__nvvm_read_ptx_sreg_envreg13",
+    "llvm.nvvm.read.ptx.sreg.envreg14" => "__nvvm_read_ptx_sreg_envreg14",
+    "llvm.nvvm.read.ptx.sreg.envreg15" => "__nvvm_read_ptx_sreg_envreg15",
+    "llvm.nvvm.read.ptx.sreg.envreg16" => "__nvvm_read_ptx_sreg_envreg16",
+    "llvm.nvvm.read.ptx.sreg.envreg17" => "__nvvm_read_ptx_sreg_envreg17",
+    "llvm.nvvm.read.ptx.sreg.envreg18" => "__nvvm_read_ptx_sreg_envreg18",
+    "llvm.nvvm.read.ptx.sreg.envreg19" => "__nvvm_read_ptx_sreg_envreg19",
+    "llvm.nvvm.read.ptx.sreg.envreg2" => "__nvvm_read_ptx_sreg_envreg2",
+    "llvm.nvvm.read.ptx.sreg.envreg20" => "__nvvm_read_ptx_sreg_envreg20",
+    "llvm.nvvm.read.ptx.sreg.envreg21" => "__nvvm_read_ptx_sreg_envreg21",
+    "llvm.nvvm.read.ptx.sreg.envreg22" => "__nvvm_read_ptx_sreg_envreg22",
+    "llvm.nvvm.read.ptx.sreg.envreg23" => "__nvvm_read_ptx_sreg_envreg23",
+    "llvm.nvvm.read.ptx.sreg.envreg24" => "__nvvm_read_ptx_sreg_envreg24",
+    "llvm.nvvm.read.ptx.sreg.envreg25" => "__nvvm_read_ptx_sreg_envreg25",
+    "llvm.nvvm.read.ptx.sreg.envreg26" => "__nvvm_read_ptx_sreg_envreg26",
+    "llvm.nvvm.read.ptx.sreg.envreg27" => "__nvvm_read_ptx_sreg_envreg27",
+    "llvm.nvvm.read.ptx.sreg.envreg28" => "__nvvm_read_ptx_sreg_envreg28",
+    "llvm.nvvm.read.ptx.sreg.envreg29" => "__nvvm_read_ptx_sreg_envreg29",
+    "llvm.nvvm.read.ptx.sreg.envreg3" => "__nvvm_read_ptx_sreg_envreg3",
+    "llvm.nvvm.read.ptx.sreg.envreg30" => "__nvvm_read_ptx_sreg_envreg30",
+    "llvm.nvvm.read.ptx.sreg.envreg31" => "__nvvm_read_ptx_sreg_envreg31",
+    "llvm.nvvm.read.ptx.sreg.envreg4" => "__nvvm_read_ptx_sreg_envreg4",
+    "llvm.nvvm.read.ptx.sreg.envreg5" => "__nvvm_read_ptx_sreg_envreg5",
+    "llvm.nvvm.read.ptx.sreg.envreg6" => "__nvvm_read_ptx_sreg_envreg6",
+    "llvm.nvvm.read.ptx.sreg.envreg7" => "__nvvm_read_ptx_sreg_envreg7",
+    "llvm.nvvm.read.ptx.sreg.envreg8" => "__nvvm_read_ptx_sreg_envreg8",
+    "llvm.nvvm.read.ptx.sreg.envreg9" => "__nvvm_read_ptx_sreg_envreg9",
+    "llvm.nvvm.read.ptx.sreg.globaltimer" => "__nvvm_read_ptx_sreg_globaltimer",
+    "llvm.nvvm.read.ptx.sreg.gridid" => "__nvvm_read_ptx_sreg_gridid",
+    // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.gridid" => "__nvvm_read_ptx_sreg_",
+    "llvm.nvvm.read.ptx.sreg.laneid" => "__nvvm_read_ptx_sreg_laneid",
+    // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.laneid" => "__nvvm_read_ptx_sreg_",
+    "llvm.nvvm.read.ptx.sreg.lanemask.eq" => "__nvvm_read_ptx_sreg_lanemask_eq",
+    // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.eq" => "__nvvm_read_ptx_sreg_",
+    "llvm.nvvm.read.ptx.sreg.lanemask.ge" => "__nvvm_read_ptx_sreg_lanemask_ge",
+    // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.ge" => "__nvvm_read_ptx_sreg_",
+    "llvm.nvvm.read.ptx.sreg.lanemask.gt" => "__nvvm_read_ptx_sreg_lanemask_gt",
+    // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.gt" => "__nvvm_read_ptx_sreg_",
+    "llvm.nvvm.read.ptx.sreg.lanemask.le" => "__nvvm_read_ptx_sreg_lanemask_le",
+    // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.le" => "__nvvm_read_ptx_sreg_",
+    "llvm.nvvm.read.ptx.sreg.lanemask.lt" => "__nvvm_read_ptx_sreg_lanemask_lt",
+    // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.lt" => "__nvvm_read_ptx_sreg_",
+    "llvm.nvvm.read.ptx.sreg.nctaid.w" => "__nvvm_read_ptx_sreg_nctaid_w",
+    "llvm.nvvm.read.ptx.sreg.nctaid.x" => "__nvvm_read_ptx_sreg_nctaid_x",
+    "llvm.nvvm.read.ptx.sreg.nctaid.y" => "__nvvm_read_ptx_sreg_nctaid_y",
+    "llvm.nvvm.read.ptx.sreg.nctaid.z" => "__nvvm_read_ptx_sreg_nctaid_z",
+    "llvm.nvvm.read.ptx.sreg.nsmid" => "__nvvm_read_ptx_sreg_nsmid",
+    // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.nsmid" => "__nvvm_read_ptx_sreg_",
+    "llvm.nvvm.read.ptx.sreg.ntid.w" => "__nvvm_read_ptx_sreg_ntid_w",
+    "llvm.nvvm.read.ptx.sreg.ntid.x" => "__nvvm_read_ptx_sreg_ntid_x",
+    "llvm.nvvm.read.ptx.sreg.ntid.y" => "__nvvm_read_ptx_sreg_ntid_y",
+    "llvm.nvvm.read.ptx.sreg.ntid.z" => "__nvvm_read_ptx_sreg_ntid_z",
+    "llvm.nvvm.read.ptx.sreg.nwarpid" => "__nvvm_read_ptx_sreg_nwarpid",
+    // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.nwarpid" => "__nvvm_read_ptx_sreg_",
+    "llvm.nvvm.read.ptx.sreg.pm0" => "__nvvm_read_ptx_sreg_pm0",
+    // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.pm0" => "__nvvm_read_ptx_sreg_",
+    "llvm.nvvm.read.ptx.sreg.pm1" => "__nvvm_read_ptx_sreg_pm1",
+    // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.pm1" => "__nvvm_read_ptx_sreg_",
+    "llvm.nvvm.read.ptx.sreg.pm2" => "__nvvm_read_ptx_sreg_pm2",
+    // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.pm2" => "__nvvm_read_ptx_sreg_",
+    "llvm.nvvm.read.ptx.sreg.pm3" => "__nvvm_read_ptx_sreg_pm3",
+    // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.pm3" => "__nvvm_read_ptx_sreg_",
+    "llvm.nvvm.read.ptx.sreg.smid" => "__nvvm_read_ptx_sreg_smid",
+    // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.smid" => "__nvvm_read_ptx_sreg_",
+    "llvm.nvvm.read.ptx.sreg.tid.w" => "__nvvm_read_ptx_sreg_tid_w",
+    "llvm.nvvm.read.ptx.sreg.tid.x" => "__nvvm_read_ptx_sreg_tid_x",
+    "llvm.nvvm.read.ptx.sreg.tid.y" => "__nvvm_read_ptx_sreg_tid_y",
+    "llvm.nvvm.read.ptx.sreg.tid.z" => "__nvvm_read_ptx_sreg_tid_z",
+    "llvm.nvvm.read.ptx.sreg.warpid" => "__nvvm_read_ptx_sreg_warpid",
+    // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.warpid" => "__nvvm_read_ptx_sreg_",
+    "llvm.nvvm.read.ptx.sreg.warpsize" => "__nvvm_read_ptx_sreg_warpsize",
+    // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.warpsize" => "__nvvm_read_ptx_sreg_",
+    "llvm.nvvm.redux.sync.add" => "__nvvm_redux_sync_add",
+    "llvm.nvvm.redux.sync.and" => "__nvvm_redux_sync_and",
+    "llvm.nvvm.redux.sync.max" => "__nvvm_redux_sync_max",
+    "llvm.nvvm.redux.sync.min" => "__nvvm_redux_sync_min",
+    "llvm.nvvm.redux.sync.or" => "__nvvm_redux_sync_or",
+    "llvm.nvvm.redux.sync.umax" => "__nvvm_redux_sync_umax",
+    "llvm.nvvm.redux.sync.umin" => "__nvvm_redux_sync_umin",
+    "llvm.nvvm.redux.sync.xor" => "__nvvm_redux_sync_xor",
+    "llvm.nvvm.reflect" => "__nvvm_reflect",
+    "llvm.nvvm.rotate.b32" => "__nvvm_rotate_b32",
+    "llvm.nvvm.rotate.b64" => "__nvvm_rotate_b64",
+    "llvm.nvvm.rotate.right.b64" => "__nvvm_rotate_right_b64",
+    "llvm.nvvm.round.d" => "__nvvm_round_d",
+    "llvm.nvvm.round.f" => "__nvvm_round_f",
+    "llvm.nvvm.round.ftz.f" => "__nvvm_round_ftz_f",
+    "llvm.nvvm.rsqrt.approx.d" => "__nvvm_rsqrt_approx_d",
+    "llvm.nvvm.rsqrt.approx.f" => "__nvvm_rsqrt_approx_f",
+    "llvm.nvvm.rsqrt.approx.ftz.d" => "__nvvm_rsqrt_approx_ftz_d",
+    "llvm.nvvm.rsqrt.approx.ftz.f" => "__nvvm_rsqrt_approx_ftz_f",
+    "llvm.nvvm.sad.i" => "__nvvm_sad_i",
+    "llvm.nvvm.sad.ll" => "__nvvm_sad_ll",
+    "llvm.nvvm.sad.s" => "__nvvm_sad_s",
+    "llvm.nvvm.sad.ui" => "__nvvm_sad_ui",
+    "llvm.nvvm.sad.ull" => "__nvvm_sad_ull",
+    "llvm.nvvm.sad.us" => "__nvvm_sad_us",
+    "llvm.nvvm.saturate.d" => "__nvvm_saturate_d",
+    "llvm.nvvm.saturate.f" => "__nvvm_saturate_f",
+    "llvm.nvvm.saturate.ftz.f" => "__nvvm_saturate_ftz_f",
+    "llvm.nvvm.shfl.bfly.f32" => "__nvvm_shfl_bfly_f32",
+    "llvm.nvvm.shfl.bfly.i32" => "__nvvm_shfl_bfly_i32",
+    "llvm.nvvm.shfl.down.f32" => "__nvvm_shfl_down_f32",
+    "llvm.nvvm.shfl.down.i32" => "__nvvm_shfl_down_i32",
+    "llvm.nvvm.shfl.idx.f32" => "__nvvm_shfl_idx_f32",
+    "llvm.nvvm.shfl.idx.i32" => "__nvvm_shfl_idx_i32",
+    "llvm.nvvm.shfl.sync.bfly.f32" => "__nvvm_shfl_sync_bfly_f32",
+    "llvm.nvvm.shfl.sync.bfly.i32" => "__nvvm_shfl_sync_bfly_i32",
+    "llvm.nvvm.shfl.sync.down.f32" => "__nvvm_shfl_sync_down_f32",
+    "llvm.nvvm.shfl.sync.down.i32" => "__nvvm_shfl_sync_down_i32",
+    "llvm.nvvm.shfl.sync.idx.f32" => "__nvvm_shfl_sync_idx_f32",
+    "llvm.nvvm.shfl.sync.idx.i32" => "__nvvm_shfl_sync_idx_i32",
+    "llvm.nvvm.shfl.sync.up.f32" => "__nvvm_shfl_sync_up_f32",
+    "llvm.nvvm.shfl.sync.up.i32" => "__nvvm_shfl_sync_up_i32",
+    "llvm.nvvm.shfl.up.f32" => "__nvvm_shfl_up_f32",
+    "llvm.nvvm.shfl.up.i32" => "__nvvm_shfl_up_i32",
+    "llvm.nvvm.sin.approx.f" => "__nvvm_sin_approx_f",
+    "llvm.nvvm.sin.approx.ftz.f" => "__nvvm_sin_approx_ftz_f",
+    "llvm.nvvm.sqrt.approx.f" => "__nvvm_sqrt_approx_f",
+    "llvm.nvvm.sqrt.approx.ftz.f" => "__nvvm_sqrt_approx_ftz_f",
+    "llvm.nvvm.sqrt.f" => "__nvvm_sqrt_f",
+    "llvm.nvvm.sqrt.rm.d" => "__nvvm_sqrt_rm_d",
+    "llvm.nvvm.sqrt.rm.f" => "__nvvm_sqrt_rm_f",
+    "llvm.nvvm.sqrt.rm.ftz.f" => "__nvvm_sqrt_rm_ftz_f",
+    "llvm.nvvm.sqrt.rn.d" => "__nvvm_sqrt_rn_d",
+    "llvm.nvvm.sqrt.rn.f" => "__nvvm_sqrt_rn_f",
+    "llvm.nvvm.sqrt.rn.ftz.f" => "__nvvm_sqrt_rn_ftz_f",
+    "llvm.nvvm.sqrt.rp.d" => "__nvvm_sqrt_rp_d",
+    "llvm.nvvm.sqrt.rp.f" => "__nvvm_sqrt_rp_f",
+    "llvm.nvvm.sqrt.rp.ftz.f" => "__nvvm_sqrt_rp_ftz_f",
+    "llvm.nvvm.sqrt.rz.d" => "__nvvm_sqrt_rz_d",
+    "llvm.nvvm.sqrt.rz.f" => "__nvvm_sqrt_rz_f",
+    "llvm.nvvm.sqrt.rz.ftz.f" => "__nvvm_sqrt_rz_ftz_f",
+    "llvm.nvvm.suq.array.size" => "__nvvm_suq_array_size",
+    "llvm.nvvm.suq.channel.data.type" => "__nvvm_suq_channel_data_type",
+    "llvm.nvvm.suq.channel.order" => "__nvvm_suq_channel_order",
+    "llvm.nvvm.suq.depth" => "__nvvm_suq_depth",
+    "llvm.nvvm.suq.height" => "__nvvm_suq_height",
+    "llvm.nvvm.suq.width" => "__nvvm_suq_width",
+    "llvm.nvvm.sust.b.1d.array.i16.clamp" => "__nvvm_sust_b_1d_array_i16_clamp",
+    "llvm.nvvm.sust.b.1d.array.i16.trap" => "__nvvm_sust_b_1d_array_i16_trap",
+    "llvm.nvvm.sust.b.1d.array.i16.zero" => "__nvvm_sust_b_1d_array_i16_zero",
+    "llvm.nvvm.sust.b.1d.array.i32.clamp" => "__nvvm_sust_b_1d_array_i32_clamp",
+    "llvm.nvvm.sust.b.1d.array.i32.trap" => "__nvvm_sust_b_1d_array_i32_trap",
+    "llvm.nvvm.sust.b.1d.array.i32.zero" => "__nvvm_sust_b_1d_array_i32_zero",
+    "llvm.nvvm.sust.b.1d.array.i64.clamp" => "__nvvm_sust_b_1d_array_i64_clamp",
+    "llvm.nvvm.sust.b.1d.array.i64.trap" => "__nvvm_sust_b_1d_array_i64_trap",
+    "llvm.nvvm.sust.b.1d.array.i64.zero" => "__nvvm_sust_b_1d_array_i64_zero",
+    "llvm.nvvm.sust.b.1d.array.i8.clamp" => "__nvvm_sust_b_1d_array_i8_clamp",
+    "llvm.nvvm.sust.b.1d.array.i8.trap" => "__nvvm_sust_b_1d_array_i8_trap",
+    "llvm.nvvm.sust.b.1d.array.i8.zero" => "__nvvm_sust_b_1d_array_i8_zero",
+    "llvm.nvvm.sust.b.1d.array.v2i16.clamp" => "__nvvm_sust_b_1d_array_v2i16_clamp",
+    "llvm.nvvm.sust.b.1d.array.v2i16.trap" => "__nvvm_sust_b_1d_array_v2i16_trap",
+    "llvm.nvvm.sust.b.1d.array.v2i16.zero" => "__nvvm_sust_b_1d_array_v2i16_zero",
+    "llvm.nvvm.sust.b.1d.array.v2i32.clamp" => "__nvvm_sust_b_1d_array_v2i32_clamp",
+    "llvm.nvvm.sust.b.1d.array.v2i32.trap" => "__nvvm_sust_b_1d_array_v2i32_trap",
+    "llvm.nvvm.sust.b.1d.array.v2i32.zero" => "__nvvm_sust_b_1d_array_v2i32_zero",
+    "llvm.nvvm.sust.b.1d.array.v2i64.clamp" => "__nvvm_sust_b_1d_array_v2i64_clamp",
+    "llvm.nvvm.sust.b.1d.array.v2i64.trap" => "__nvvm_sust_b_1d_array_v2i64_trap",
+    "llvm.nvvm.sust.b.1d.array.v2i64.zero" => "__nvvm_sust_b_1d_array_v2i64_zero",
+    "llvm.nvvm.sust.b.1d.array.v2i8.clamp" => "__nvvm_sust_b_1d_array_v2i8_clamp",
+    "llvm.nvvm.sust.b.1d.array.v2i8.trap" => "__nvvm_sust_b_1d_array_v2i8_trap",
+    "llvm.nvvm.sust.b.1d.array.v2i8.zero" => "__nvvm_sust_b_1d_array_v2i8_zero",
+    "llvm.nvvm.sust.b.1d.array.v4i16.clamp" => "__nvvm_sust_b_1d_array_v4i16_clamp",
+    "llvm.nvvm.sust.b.1d.array.v4i16.trap" => "__nvvm_sust_b_1d_array_v4i16_trap",
+    "llvm.nvvm.sust.b.1d.array.v4i16.zero" => "__nvvm_sust_b_1d_array_v4i16_zero",
+    "llvm.nvvm.sust.b.1d.array.v4i32.clamp" => "__nvvm_sust_b_1d_array_v4i32_clamp",
+    "llvm.nvvm.sust.b.1d.array.v4i32.trap" => "__nvvm_sust_b_1d_array_v4i32_trap",
+    "llvm.nvvm.sust.b.1d.array.v4i32.zero" => "__nvvm_sust_b_1d_array_v4i32_zero",
+    "llvm.nvvm.sust.b.1d.array.v4i8.clamp" => "__nvvm_sust_b_1d_array_v4i8_clamp",
+    "llvm.nvvm.sust.b.1d.array.v4i8.trap" => "__nvvm_sust_b_1d_array_v4i8_trap",
+    "llvm.nvvm.sust.b.1d.array.v4i8.zero" => "__nvvm_sust_b_1d_array_v4i8_zero",
+    "llvm.nvvm.sust.b.1d.i16.clamp" => "__nvvm_sust_b_1d_i16_clamp",
+    "llvm.nvvm.sust.b.1d.i16.trap" => "__nvvm_sust_b_1d_i16_trap",
+    "llvm.nvvm.sust.b.1d.i16.zero" => "__nvvm_sust_b_1d_i16_zero",
+    "llvm.nvvm.sust.b.1d.i32.clamp" => "__nvvm_sust_b_1d_i32_clamp",
+    "llvm.nvvm.sust.b.1d.i32.trap" => "__nvvm_sust_b_1d_i32_trap",
+    "llvm.nvvm.sust.b.1d.i32.zero" => "__nvvm_sust_b_1d_i32_zero",
+    "llvm.nvvm.sust.b.1d.i64.clamp" => "__nvvm_sust_b_1d_i64_clamp",
+    "llvm.nvvm.sust.b.1d.i64.trap" => "__nvvm_sust_b_1d_i64_trap",
+    "llvm.nvvm.sust.b.1d.i64.zero" => "__nvvm_sust_b_1d_i64_zero",
+    "llvm.nvvm.sust.b.1d.i8.clamp" => "__nvvm_sust_b_1d_i8_clamp",
+    "llvm.nvvm.sust.b.1d.i8.trap" => "__nvvm_sust_b_1d_i8_trap",
+    "llvm.nvvm.sust.b.1d.i8.zero" => "__nvvm_sust_b_1d_i8_zero",
+    "llvm.nvvm.sust.b.1d.v2i16.clamp" => "__nvvm_sust_b_1d_v2i16_clamp",
+    "llvm.nvvm.sust.b.1d.v2i16.trap" => "__nvvm_sust_b_1d_v2i16_trap",
+    "llvm.nvvm.sust.b.1d.v2i16.zero" => "__nvvm_sust_b_1d_v2i16_zero",
+    "llvm.nvvm.sust.b.1d.v2i32.clamp" => "__nvvm_sust_b_1d_v2i32_clamp",
+    "llvm.nvvm.sust.b.1d.v2i32.trap" => "__nvvm_sust_b_1d_v2i32_trap",
+    "llvm.nvvm.sust.b.1d.v2i32.zero" => "__nvvm_sust_b_1d_v2i32_zero",
+    "llvm.nvvm.sust.b.1d.v2i64.clamp" => "__nvvm_sust_b_1d_v2i64_clamp",
+    "llvm.nvvm.sust.b.1d.v2i64.trap" => "__nvvm_sust_b_1d_v2i64_trap",
+    "llvm.nvvm.sust.b.1d.v2i64.zero" => "__nvvm_sust_b_1d_v2i64_zero",
+    "llvm.nvvm.sust.b.1d.v2i8.clamp" => "__nvvm_sust_b_1d_v2i8_clamp",
+    "llvm.nvvm.sust.b.1d.v2i8.trap" => "__nvvm_sust_b_1d_v2i8_trap",
+    "llvm.nvvm.sust.b.1d.v2i8.zero" => "__nvvm_sust_b_1d_v2i8_zero",
+    "llvm.nvvm.sust.b.1d.v4i16.clamp" => "__nvvm_sust_b_1d_v4i16_clamp",
+    "llvm.nvvm.sust.b.1d.v4i16.trap" => "__nvvm_sust_b_1d_v4i16_trap",
+    "llvm.nvvm.sust.b.1d.v4i16.zero" => "__nvvm_sust_b_1d_v4i16_zero",
+    "llvm.nvvm.sust.b.1d.v4i32.clamp" => "__nvvm_sust_b_1d_v4i32_clamp",
+    "llvm.nvvm.sust.b.1d.v4i32.trap" => "__nvvm_sust_b_1d_v4i32_trap",
+    "llvm.nvvm.sust.b.1d.v4i32.zero" => "__nvvm_sust_b_1d_v4i32_zero",
+    "llvm.nvvm.sust.b.1d.v4i8.clamp" => "__nvvm_sust_b_1d_v4i8_clamp",
+    "llvm.nvvm.sust.b.1d.v4i8.trap" => "__nvvm_sust_b_1d_v4i8_trap",
+    "llvm.nvvm.sust.b.1d.v4i8.zero" => "__nvvm_sust_b_1d_v4i8_zero",
+    "llvm.nvvm.sust.b.2d.array.i16.clamp" => "__nvvm_sust_b_2d_array_i16_clamp",
+    "llvm.nvvm.sust.b.2d.array.i16.trap" => "__nvvm_sust_b_2d_array_i16_trap",
+    "llvm.nvvm.sust.b.2d.array.i16.zero" => "__nvvm_sust_b_2d_array_i16_zero",
+    "llvm.nvvm.sust.b.2d.array.i32.clamp" => "__nvvm_sust_b_2d_array_i32_clamp",
+    "llvm.nvvm.sust.b.2d.array.i32.trap" => "__nvvm_sust_b_2d_array_i32_trap",
+    "llvm.nvvm.sust.b.2d.array.i32.zero" => "__nvvm_sust_b_2d_array_i32_zero",
+    "llvm.nvvm.sust.b.2d.array.i64.clamp" => "__nvvm_sust_b_2d_array_i64_clamp",
+    "llvm.nvvm.sust.b.2d.array.i64.trap" => "__nvvm_sust_b_2d_array_i64_trap",
+    "llvm.nvvm.sust.b.2d.array.i64.zero" => "__nvvm_sust_b_2d_array_i64_zero",
+    "llvm.nvvm.sust.b.2d.array.i8.clamp" => "__nvvm_sust_b_2d_array_i8_clamp",
+    "llvm.nvvm.sust.b.2d.array.i8.trap" => "__nvvm_sust_b_2d_array_i8_trap",
+    "llvm.nvvm.sust.b.2d.array.i8.zero" => "__nvvm_sust_b_2d_array_i8_zero",
+    "llvm.nvvm.sust.b.2d.array.v2i16.clamp" => "__nvvm_sust_b_2d_array_v2i16_clamp",
+    "llvm.nvvm.sust.b.2d.array.v2i16.trap" => "__nvvm_sust_b_2d_array_v2i16_trap",
+    "llvm.nvvm.sust.b.2d.array.v2i16.zero" => "__nvvm_sust_b_2d_array_v2i16_zero",
+    "llvm.nvvm.sust.b.2d.array.v2i32.clamp" => "__nvvm_sust_b_2d_array_v2i32_clamp",
+    "llvm.nvvm.sust.b.2d.array.v2i32.trap" => "__nvvm_sust_b_2d_array_v2i32_trap",
+    "llvm.nvvm.sust.b.2d.array.v2i32.zero" => "__nvvm_sust_b_2d_array_v2i32_zero",
+    "llvm.nvvm.sust.b.2d.array.v2i64.clamp" => "__nvvm_sust_b_2d_array_v2i64_clamp",
+    "llvm.nvvm.sust.b.2d.array.v2i64.trap" => "__nvvm_sust_b_2d_array_v2i64_trap",
+    "llvm.nvvm.sust.b.2d.array.v2i64.zero" => "__nvvm_sust_b_2d_array_v2i64_zero",
+    "llvm.nvvm.sust.b.2d.array.v2i8.clamp" => "__nvvm_sust_b_2d_array_v2i8_clamp",
+    "llvm.nvvm.sust.b.2d.array.v2i8.trap" => "__nvvm_sust_b_2d_array_v2i8_trap",
+    "llvm.nvvm.sust.b.2d.array.v2i8.zero" => "__nvvm_sust_b_2d_array_v2i8_zero",
+    "llvm.nvvm.sust.b.2d.array.v4i16.clamp" => "__nvvm_sust_b_2d_array_v4i16_clamp",
+    "llvm.nvvm.sust.b.2d.array.v4i16.trap" => "__nvvm_sust_b_2d_array_v4i16_trap",
+    "llvm.nvvm.sust.b.2d.array.v4i16.zero" => "__nvvm_sust_b_2d_array_v4i16_zero",
+    "llvm.nvvm.sust.b.2d.array.v4i32.clamp" => "__nvvm_sust_b_2d_array_v4i32_clamp",
+    "llvm.nvvm.sust.b.2d.array.v4i32.trap" => "__nvvm_sust_b_2d_array_v4i32_trap",
+    "llvm.nvvm.sust.b.2d.array.v4i32.zero" => "__nvvm_sust_b_2d_array_v4i32_zero",
+    "llvm.nvvm.sust.b.2d.array.v4i8.clamp" => "__nvvm_sust_b_2d_array_v4i8_clamp",
+    "llvm.nvvm.sust.b.2d.array.v4i8.trap" => "__nvvm_sust_b_2d_array_v4i8_trap",
+    "llvm.nvvm.sust.b.2d.array.v4i8.zero" => "__nvvm_sust_b_2d_array_v4i8_zero",
+    "llvm.nvvm.sust.b.2d.i16.clamp" => "__nvvm_sust_b_2d_i16_clamp",
+    "llvm.nvvm.sust.b.2d.i16.trap" => "__nvvm_sust_b_2d_i16_trap",
+    "llvm.nvvm.sust.b.2d.i16.zero" => "__nvvm_sust_b_2d_i16_zero",
+    "llvm.nvvm.sust.b.2d.i32.clamp" => "__nvvm_sust_b_2d_i32_clamp",
+    "llvm.nvvm.sust.b.2d.i32.trap" => "__nvvm_sust_b_2d_i32_trap",
+    "llvm.nvvm.sust.b.2d.i32.zero" => "__nvvm_sust_b_2d_i32_zero",
+    "llvm.nvvm.sust.b.2d.i64.clamp" => "__nvvm_sust_b_2d_i64_clamp",
+    "llvm.nvvm.sust.b.2d.i64.trap" => "__nvvm_sust_b_2d_i64_trap",
+    "llvm.nvvm.sust.b.2d.i64.zero" => "__nvvm_sust_b_2d_i64_zero",
+    "llvm.nvvm.sust.b.2d.i8.clamp" => "__nvvm_sust_b_2d_i8_clamp",
+    "llvm.nvvm.sust.b.2d.i8.trap" => "__nvvm_sust_b_2d_i8_trap",
+    "llvm.nvvm.sust.b.2d.i8.zero" => "__nvvm_sust_b_2d_i8_zero",
+    "llvm.nvvm.sust.b.2d.v2i16.clamp" => "__nvvm_sust_b_2d_v2i16_clamp",
+    "llvm.nvvm.sust.b.2d.v2i16.trap" => "__nvvm_sust_b_2d_v2i16_trap",
+    "llvm.nvvm.sust.b.2d.v2i16.zero" => "__nvvm_sust_b_2d_v2i16_zero",
+    "llvm.nvvm.sust.b.2d.v2i32.clamp" => "__nvvm_sust_b_2d_v2i32_clamp",
+    "llvm.nvvm.sust.b.2d.v2i32.trap" => "__nvvm_sust_b_2d_v2i32_trap",
+    "llvm.nvvm.sust.b.2d.v2i32.zero" => "__nvvm_sust_b_2d_v2i32_zero",
+    "llvm.nvvm.sust.b.2d.v2i64.clamp" => "__nvvm_sust_b_2d_v2i64_clamp",
+    "llvm.nvvm.sust.b.2d.v2i64.trap" => "__nvvm_sust_b_2d_v2i64_trap",
+    "llvm.nvvm.sust.b.2d.v2i64.zero" => "__nvvm_sust_b_2d_v2i64_zero",
+    "llvm.nvvm.sust.b.2d.v2i8.clamp" => "__nvvm_sust_b_2d_v2i8_clamp",
+    "llvm.nvvm.sust.b.2d.v2i8.trap" => "__nvvm_sust_b_2d_v2i8_trap",
+    "llvm.nvvm.sust.b.2d.v2i8.zero" => "__nvvm_sust_b_2d_v2i8_zero",
+    "llvm.nvvm.sust.b.2d.v4i16.clamp" => "__nvvm_sust_b_2d_v4i16_clamp",
+    "llvm.nvvm.sust.b.2d.v4i16.trap" => "__nvvm_sust_b_2d_v4i16_trap",
+    "llvm.nvvm.sust.b.2d.v4i16.zero" => "__nvvm_sust_b_2d_v4i16_zero",
+    "llvm.nvvm.sust.b.2d.v4i32.clamp" => "__nvvm_sust_b_2d_v4i32_clamp",
+    "llvm.nvvm.sust.b.2d.v4i32.trap" => "__nvvm_sust_b_2d_v4i32_trap",
+    "llvm.nvvm.sust.b.2d.v4i32.zero" => "__nvvm_sust_b_2d_v4i32_zero",
+    "llvm.nvvm.sust.b.2d.v4i8.clamp" => "__nvvm_sust_b_2d_v4i8_clamp",
+    "llvm.nvvm.sust.b.2d.v4i8.trap" => "__nvvm_sust_b_2d_v4i8_trap",
+    "llvm.nvvm.sust.b.2d.v4i8.zero" => "__nvvm_sust_b_2d_v4i8_zero",
+    "llvm.nvvm.sust.b.3d.i16.clamp" => "__nvvm_sust_b_3d_i16_clamp",
+    "llvm.nvvm.sust.b.3d.i16.trap" => "__nvvm_sust_b_3d_i16_trap",
+    "llvm.nvvm.sust.b.3d.i16.zero" => "__nvvm_sust_b_3d_i16_zero",
+    "llvm.nvvm.sust.b.3d.i32.clamp" => "__nvvm_sust_b_3d_i32_clamp",
+    "llvm.nvvm.sust.b.3d.i32.trap" => "__nvvm_sust_b_3d_i32_trap",
+    "llvm.nvvm.sust.b.3d.i32.zero" => "__nvvm_sust_b_3d_i32_zero",
+    "llvm.nvvm.sust.b.3d.i64.clamp" => "__nvvm_sust_b_3d_i64_clamp",
+    "llvm.nvvm.sust.b.3d.i64.trap" => "__nvvm_sust_b_3d_i64_trap",
+    "llvm.nvvm.sust.b.3d.i64.zero" => "__nvvm_sust_b_3d_i64_zero",
+    "llvm.nvvm.sust.b.3d.i8.clamp" => "__nvvm_sust_b_3d_i8_clamp",
+    "llvm.nvvm.sust.b.3d.i8.trap" => "__nvvm_sust_b_3d_i8_trap",
+    "llvm.nvvm.sust.b.3d.i8.zero" => "__nvvm_sust_b_3d_i8_zero",
+    "llvm.nvvm.sust.b.3d.v2i16.clamp" => "__nvvm_sust_b_3d_v2i16_clamp",
+    "llvm.nvvm.sust.b.3d.v2i16.trap" => "__nvvm_sust_b_3d_v2i16_trap",
+    "llvm.nvvm.sust.b.3d.v2i16.zero" => "__nvvm_sust_b_3d_v2i16_zero",
+    "llvm.nvvm.sust.b.3d.v2i32.clamp" => "__nvvm_sust_b_3d_v2i32_clamp",
+    "llvm.nvvm.sust.b.3d.v2i32.trap" => "__nvvm_sust_b_3d_v2i32_trap",
+    "llvm.nvvm.sust.b.3d.v2i32.zero" => "__nvvm_sust_b_3d_v2i32_zero",
+    "llvm.nvvm.sust.b.3d.v2i64.clamp" => "__nvvm_sust_b_3d_v2i64_clamp",
+    "llvm.nvvm.sust.b.3d.v2i64.trap" => "__nvvm_sust_b_3d_v2i64_trap",
+    "llvm.nvvm.sust.b.3d.v2i64.zero" => "__nvvm_sust_b_3d_v2i64_zero",
+    "llvm.nvvm.sust.b.3d.v2i8.clamp" => "__nvvm_sust_b_3d_v2i8_clamp",
+    "llvm.nvvm.sust.b.3d.v2i8.trap" => "__nvvm_sust_b_3d_v2i8_trap",
+    "llvm.nvvm.sust.b.3d.v2i8.zero" => "__nvvm_sust_b_3d_v2i8_zero",
+    "llvm.nvvm.sust.b.3d.v4i16.clamp" => "__nvvm_sust_b_3d_v4i16_clamp",
+    "llvm.nvvm.sust.b.3d.v4i16.trap" => "__nvvm_sust_b_3d_v4i16_trap",
+    "llvm.nvvm.sust.b.3d.v4i16.zero" => "__nvvm_sust_b_3d_v4i16_zero",
+    "llvm.nvvm.sust.b.3d.v4i32.clamp" => "__nvvm_sust_b_3d_v4i32_clamp",
+    "llvm.nvvm.sust.b.3d.v4i32.trap" => "__nvvm_sust_b_3d_v4i32_trap",
+    "llvm.nvvm.sust.b.3d.v4i32.zero" => "__nvvm_sust_b_3d_v4i32_zero",
+    "llvm.nvvm.sust.b.3d.v4i8.clamp" => "__nvvm_sust_b_3d_v4i8_clamp",
+    "llvm.nvvm.sust.b.3d.v4i8.trap" => "__nvvm_sust_b_3d_v4i8_trap",
+    "llvm.nvvm.sust.b.3d.v4i8.zero" => "__nvvm_sust_b_3d_v4i8_zero",
+    "llvm.nvvm.sust.p.1d.array.i16.trap" => "__nvvm_sust_p_1d_array_i16_trap",
+    "llvm.nvvm.sust.p.1d.array.i32.trap" => "__nvvm_sust_p_1d_array_i32_trap",
+    "llvm.nvvm.sust.p.1d.array.i8.trap" => "__nvvm_sust_p_1d_array_i8_trap",
+    "llvm.nvvm.sust.p.1d.array.v2i16.trap" => "__nvvm_sust_p_1d_array_v2i16_trap",
+    "llvm.nvvm.sust.p.1d.array.v2i32.trap" => "__nvvm_sust_p_1d_array_v2i32_trap",
+    "llvm.nvvm.sust.p.1d.array.v2i8.trap" => "__nvvm_sust_p_1d_array_v2i8_trap",
+    "llvm.nvvm.sust.p.1d.array.v4i16.trap" => "__nvvm_sust_p_1d_array_v4i16_trap",
+    "llvm.nvvm.sust.p.1d.array.v4i32.trap" => "__nvvm_sust_p_1d_array_v4i32_trap",
+    "llvm.nvvm.sust.p.1d.array.v4i8.trap" => "__nvvm_sust_p_1d_array_v4i8_trap",
+    "llvm.nvvm.sust.p.1d.i16.trap" => "__nvvm_sust_p_1d_i16_trap",
+    "llvm.nvvm.sust.p.1d.i32.trap" => "__nvvm_sust_p_1d_i32_trap",
+    "llvm.nvvm.sust.p.1d.i8.trap" => "__nvvm_sust_p_1d_i8_trap",
+    "llvm.nvvm.sust.p.1d.v2i16.trap" => "__nvvm_sust_p_1d_v2i16_trap",
+    "llvm.nvvm.sust.p.1d.v2i32.trap" => "__nvvm_sust_p_1d_v2i32_trap",
+    "llvm.nvvm.sust.p.1d.v2i8.trap" => "__nvvm_sust_p_1d_v2i8_trap",
+    "llvm.nvvm.sust.p.1d.v4i16.trap" => "__nvvm_sust_p_1d_v4i16_trap",
+    "llvm.nvvm.sust.p.1d.v4i32.trap" => "__nvvm_sust_p_1d_v4i32_trap",
+    "llvm.nvvm.sust.p.1d.v4i8.trap" => "__nvvm_sust_p_1d_v4i8_trap",
+    "llvm.nvvm.sust.p.2d.array.i16.trap" => "__nvvm_sust_p_2d_array_i16_trap",
+    "llvm.nvvm.sust.p.2d.array.i32.trap" => "__nvvm_sust_p_2d_array_i32_trap",
+    "llvm.nvvm.sust.p.2d.array.i8.trap" => "__nvvm_sust_p_2d_array_i8_trap",
+    "llvm.nvvm.sust.p.2d.array.v2i16.trap" => "__nvvm_sust_p_2d_array_v2i16_trap",
+    "llvm.nvvm.sust.p.2d.array.v2i32.trap" => "__nvvm_sust_p_2d_array_v2i32_trap",
+    "llvm.nvvm.sust.p.2d.array.v2i8.trap" => "__nvvm_sust_p_2d_array_v2i8_trap",
+    "llvm.nvvm.sust.p.2d.array.v4i16.trap" => "__nvvm_sust_p_2d_array_v4i16_trap",
+    "llvm.nvvm.sust.p.2d.array.v4i32.trap" => "__nvvm_sust_p_2d_array_v4i32_trap",
+    "llvm.nvvm.sust.p.2d.array.v4i8.trap" => "__nvvm_sust_p_2d_array_v4i8_trap",
+    "llvm.nvvm.sust.p.2d.i16.trap" => "__nvvm_sust_p_2d_i16_trap",
+    "llvm.nvvm.sust.p.2d.i32.trap" => "__nvvm_sust_p_2d_i32_trap",
+    "llvm.nvvm.sust.p.2d.i8.trap" => "__nvvm_sust_p_2d_i8_trap",
+    "llvm.nvvm.sust.p.2d.v2i16.trap" => "__nvvm_sust_p_2d_v2i16_trap",
+    "llvm.nvvm.sust.p.2d.v2i32.trap" => "__nvvm_sust_p_2d_v2i32_trap",
+    "llvm.nvvm.sust.p.2d.v2i8.trap" => "__nvvm_sust_p_2d_v2i8_trap",
+    "llvm.nvvm.sust.p.2d.v4i16.trap" => "__nvvm_sust_p_2d_v4i16_trap",
+    "llvm.nvvm.sust.p.2d.v4i32.trap" => "__nvvm_sust_p_2d_v4i32_trap",
+    "llvm.nvvm.sust.p.2d.v4i8.trap" => "__nvvm_sust_p_2d_v4i8_trap",
+    "llvm.nvvm.sust.p.3d.i16.trap" => "__nvvm_sust_p_3d_i16_trap",
+    "llvm.nvvm.sust.p.3d.i32.trap" => "__nvvm_sust_p_3d_i32_trap",
+    "llvm.nvvm.sust.p.3d.i8.trap" => "__nvvm_sust_p_3d_i8_trap",
+    "llvm.nvvm.sust.p.3d.v2i16.trap" => "__nvvm_sust_p_3d_v2i16_trap",
+    "llvm.nvvm.sust.p.3d.v2i32.trap" => "__nvvm_sust_p_3d_v2i32_trap",
+    "llvm.nvvm.sust.p.3d.v2i8.trap" => "__nvvm_sust_p_3d_v2i8_trap",
+    "llvm.nvvm.sust.p.3d.v4i16.trap" => "__nvvm_sust_p_3d_v4i16_trap",
+    "llvm.nvvm.sust.p.3d.v4i32.trap" => "__nvvm_sust_p_3d_v4i32_trap",
+    "llvm.nvvm.sust.p.3d.v4i8.trap" => "__nvvm_sust_p_3d_v4i8_trap",
+    "llvm.nvvm.swap.lo.hi.b64" => "__nvvm_swap_lo_hi_b64",
+    "llvm.nvvm.trunc.d" => "__nvvm_trunc_d",
+    "llvm.nvvm.trunc.f" => "__nvvm_trunc_f",
+    "llvm.nvvm.trunc.ftz.f" => "__nvvm_trunc_ftz_f",
+    "llvm.nvvm.txq.array.size" => "__nvvm_txq_array_size",
+    "llvm.nvvm.txq.channel.data.type" => "__nvvm_txq_channel_data_type",
+    "llvm.nvvm.txq.channel.order" => "__nvvm_txq_channel_order",
+    "llvm.nvvm.txq.depth" => "__nvvm_txq_depth",
+    "llvm.nvvm.txq.height" => "__nvvm_txq_height",
+    "llvm.nvvm.txq.num.mipmap.levels" => "__nvvm_txq_num_mipmap_levels",
+    "llvm.nvvm.txq.num.samples" => "__nvvm_txq_num_samples",
+    "llvm.nvvm.txq.width" => "__nvvm_txq_width",
+    "llvm.nvvm.ui2d.rm" => "__nvvm_ui2d_rm",
+    "llvm.nvvm.ui2d.rn" => "__nvvm_ui2d_rn",
+    "llvm.nvvm.ui2d.rp" => "__nvvm_ui2d_rp",
+    "llvm.nvvm.ui2d.rz" => "__nvvm_ui2d_rz",
+    "llvm.nvvm.ui2f.rm" => "__nvvm_ui2f_rm",
+    "llvm.nvvm.ui2f.rn" => "__nvvm_ui2f_rn",
+    "llvm.nvvm.ui2f.rp" => "__nvvm_ui2f_rp",
+    "llvm.nvvm.ui2f.rz" => "__nvvm_ui2f_rz",
+    "llvm.nvvm.ull2d.rm" => "__nvvm_ull2d_rm",
+    "llvm.nvvm.ull2d.rn" => "__nvvm_ull2d_rn",
+    "llvm.nvvm.ull2d.rp" => "__nvvm_ull2d_rp",
+    "llvm.nvvm.ull2d.rz" => "__nvvm_ull2d_rz",
+    "llvm.nvvm.ull2f.rm" => "__nvvm_ull2f_rm",
+    "llvm.nvvm.ull2f.rn" => "__nvvm_ull2f_rn",
+    "llvm.nvvm.ull2f.rp" => "__nvvm_ull2f_rp",
+    "llvm.nvvm.ull2f.rz" => "__nvvm_ull2f_rz",
+    "llvm.nvvm.vote.all" => "__nvvm_vote_all",
+    "llvm.nvvm.vote.all.sync" => "__nvvm_vote_all_sync",
+    "llvm.nvvm.vote.any" => "__nvvm_vote_any",
+    "llvm.nvvm.vote.any.sync" => "__nvvm_vote_any_sync",
+    "llvm.nvvm.vote.ballot" => "__nvvm_vote_ballot",
+    "llvm.nvvm.vote.ballot.sync" => "__nvvm_vote_ballot_sync",
+    "llvm.nvvm.vote.uni" => "__nvvm_vote_uni",
+    "llvm.nvvm.vote.uni.sync" => "__nvvm_vote_uni_sync",
+    // ppc
+    "llvm.ppc.addex" => "__builtin_ppc_addex",
+    "llvm.ppc.addf128.round.to.odd" => "__builtin_addf128_round_to_odd",
+    "llvm.ppc.addg6s" => "__builtin_addg6s",
+    "llvm.ppc.addg6sd" => "__builtin_ppc_addg6s",
+    "llvm.ppc.altivec.crypto.vcipher" => "__builtin_altivec_crypto_vcipher",
+    "llvm.ppc.altivec.crypto.vcipherlast" => "__builtin_altivec_crypto_vcipherlast",
+    "llvm.ppc.altivec.crypto.vncipher" => "__builtin_altivec_crypto_vncipher",
+    "llvm.ppc.altivec.crypto.vncipherlast" => "__builtin_altivec_crypto_vncipherlast",
+    "llvm.ppc.altivec.crypto.vpermxor" => "__builtin_altivec_crypto_vpermxor",
+    "llvm.ppc.altivec.crypto.vpermxor.be" => "__builtin_altivec_crypto_vpermxor_be",
+    "llvm.ppc.altivec.crypto.vpmsumb" => "__builtin_altivec_crypto_vpmsumb",
+    "llvm.ppc.altivec.crypto.vpmsumd" => "__builtin_altivec_crypto_vpmsumd",
+    "llvm.ppc.altivec.crypto.vpmsumh" => "__builtin_altivec_crypto_vpmsumh",
+    "llvm.ppc.altivec.crypto.vpmsumw" => "__builtin_altivec_crypto_vpmsumw",
+    "llvm.ppc.altivec.crypto.vsbox" => "__builtin_altivec_crypto_vsbox",
+    "llvm.ppc.altivec.crypto.vshasigmad" => "__builtin_altivec_crypto_vshasigmad",
+    "llvm.ppc.altivec.crypto.vshasigmaw" => "__builtin_altivec_crypto_vshasigmaw",
+    "llvm.ppc.altivec.dss" => "__builtin_altivec_dss",
+    "llvm.ppc.altivec.dssall" => "__builtin_altivec_dssall",
+    "llvm.ppc.altivec.dst" => "__builtin_altivec_dst",
+    "llvm.ppc.altivec.dstst" => "__builtin_altivec_dstst",
+    "llvm.ppc.altivec.dststt" => "__builtin_altivec_dststt",
+    "llvm.ppc.altivec.dstt" => "__builtin_altivec_dstt",
+    "llvm.ppc.altivec.mfvscr" => "__builtin_altivec_mfvscr",
+    "llvm.ppc.altivec.mtvscr" => "__builtin_altivec_mtvscr",
+    "llvm.ppc.altivec.mtvsrbm" => "__builtin_altivec_mtvsrbm",
+    "llvm.ppc.altivec.mtvsrdm" => "__builtin_altivec_mtvsrdm",
+    "llvm.ppc.altivec.mtvsrhm" => "__builtin_altivec_mtvsrhm",
+    "llvm.ppc.altivec.mtvsrqm" => "__builtin_altivec_mtvsrqm",
+    "llvm.ppc.altivec.mtvsrwm" => "__builtin_altivec_mtvsrwm",
+    "llvm.ppc.altivec.vabsdub" => "__builtin_altivec_vabsdub",
+    "llvm.ppc.altivec.vabsduh" => "__builtin_altivec_vabsduh",
+    "llvm.ppc.altivec.vabsduw" => "__builtin_altivec_vabsduw",
+    "llvm.ppc.altivec.vaddcuq" => "__builtin_altivec_vaddcuq",
+    "llvm.ppc.altivec.vaddcuw" => "__builtin_altivec_vaddcuw",
+    "llvm.ppc.altivec.vaddecuq" => "__builtin_altivec_vaddecuq",
+    "llvm.ppc.altivec.vaddeuqm" => "__builtin_altivec_vaddeuqm",
+    "llvm.ppc.altivec.vaddsbs" => "__builtin_altivec_vaddsbs",
+    "llvm.ppc.altivec.vaddshs" => "__builtin_altivec_vaddshs",
+    "llvm.ppc.altivec.vaddsws" => "__builtin_altivec_vaddsws",
+    "llvm.ppc.altivec.vaddubs" => "__builtin_altivec_vaddubs",
+    "llvm.ppc.altivec.vadduhs" => "__builtin_altivec_vadduhs",
+    "llvm.ppc.altivec.vadduws" => "__builtin_altivec_vadduws",
+    "llvm.ppc.altivec.vavgsb" => "__builtin_altivec_vavgsb",
+    "llvm.ppc.altivec.vavgsh" => "__builtin_altivec_vavgsh",
+    "llvm.ppc.altivec.vavgsw" => "__builtin_altivec_vavgsw",
+    "llvm.ppc.altivec.vavgub" => "__builtin_altivec_vavgub",
+    "llvm.ppc.altivec.vavguh" => "__builtin_altivec_vavguh",
+    "llvm.ppc.altivec.vavguw" => "__builtin_altivec_vavguw",
+    "llvm.ppc.altivec.vbpermd" => "__builtin_altivec_vbpermd",
+    "llvm.ppc.altivec.vbpermq" => "__builtin_altivec_vbpermq",
+    "llvm.ppc.altivec.vcfsx" => "__builtin_altivec_vcfsx",
+    "llvm.ppc.altivec.vcfuged" => "__builtin_altivec_vcfuged",
+    "llvm.ppc.altivec.vcfux" => "__builtin_altivec_vcfux",
+    "llvm.ppc.altivec.vclrlb" => "__builtin_altivec_vclrlb",
+    "llvm.ppc.altivec.vclrrb" => "__builtin_altivec_vclrrb",
+    "llvm.ppc.altivec.vclzdm" => "__builtin_altivec_vclzdm",
+    "llvm.ppc.altivec.vclzlsbb" => "__builtin_altivec_vclzlsbb",
+    "llvm.ppc.altivec.vcmpbfp" => "__builtin_altivec_vcmpbfp",
+    "llvm.ppc.altivec.vcmpbfp.p" => "__builtin_altivec_vcmpbfp_p",
+    "llvm.ppc.altivec.vcmpeqfp" => "__builtin_altivec_vcmpeqfp",
+    "llvm.ppc.altivec.vcmpeqfp.p" => "__builtin_altivec_vcmpeqfp_p",
+    "llvm.ppc.altivec.vcmpequb" => "__builtin_altivec_vcmpequb",
+    "llvm.ppc.altivec.vcmpequb.p" => "__builtin_altivec_vcmpequb_p",
+    "llvm.ppc.altivec.vcmpequd" => "__builtin_altivec_vcmpequd",
+    "llvm.ppc.altivec.vcmpequd.p" => "__builtin_altivec_vcmpequd_p",
+    "llvm.ppc.altivec.vcmpequh" => "__builtin_altivec_vcmpequh",
+    "llvm.ppc.altivec.vcmpequh.p" => "__builtin_altivec_vcmpequh_p",
+    "llvm.ppc.altivec.vcmpequq" => "__builtin_altivec_vcmpequq",
+    "llvm.ppc.altivec.vcmpequq.p" => "__builtin_altivec_vcmpequq_p",
+    "llvm.ppc.altivec.vcmpequw" => "__builtin_altivec_vcmpequw",
+    "llvm.ppc.altivec.vcmpequw.p" => "__builtin_altivec_vcmpequw_p",
+    "llvm.ppc.altivec.vcmpgefp" => "__builtin_altivec_vcmpgefp",
+    "llvm.ppc.altivec.vcmpgefp.p" => "__builtin_altivec_vcmpgefp_p",
+    "llvm.ppc.altivec.vcmpgtfp" => "__builtin_altivec_vcmpgtfp",
+    "llvm.ppc.altivec.vcmpgtfp.p" => "__builtin_altivec_vcmpgtfp_p",
+    "llvm.ppc.altivec.vcmpgtsb" => "__builtin_altivec_vcmpgtsb",
+    "llvm.ppc.altivec.vcmpgtsb.p" => "__builtin_altivec_vcmpgtsb_p",
+    "llvm.ppc.altivec.vcmpgtsd" => "__builtin_altivec_vcmpgtsd",
+    "llvm.ppc.altivec.vcmpgtsd.p" => "__builtin_altivec_vcmpgtsd_p",
+    "llvm.ppc.altivec.vcmpgtsh" => "__builtin_altivec_vcmpgtsh",
+    "llvm.ppc.altivec.vcmpgtsh.p" => "__builtin_altivec_vcmpgtsh_p",
+    "llvm.ppc.altivec.vcmpgtsq" => "__builtin_altivec_vcmpgtsq",
+    "llvm.ppc.altivec.vcmpgtsq.p" => "__builtin_altivec_vcmpgtsq_p",
+    "llvm.ppc.altivec.vcmpgtsw" => "__builtin_altivec_vcmpgtsw",
+    "llvm.ppc.altivec.vcmpgtsw.p" => "__builtin_altivec_vcmpgtsw_p",
+    "llvm.ppc.altivec.vcmpgtub" => "__builtin_altivec_vcmpgtub",
+    "llvm.ppc.altivec.vcmpgtub.p" => "__builtin_altivec_vcmpgtub_p",
+    "llvm.ppc.altivec.vcmpgtud" => "__builtin_altivec_vcmpgtud",
+    "llvm.ppc.altivec.vcmpgtud.p" => "__builtin_altivec_vcmpgtud_p",
+    "llvm.ppc.altivec.vcmpgtuh" => "__builtin_altivec_vcmpgtuh",
+    "llvm.ppc.altivec.vcmpgtuh.p" => "__builtin_altivec_vcmpgtuh_p",
+    "llvm.ppc.altivec.vcmpgtuq" => "__builtin_altivec_vcmpgtuq",
+    "llvm.ppc.altivec.vcmpgtuq.p" => "__builtin_altivec_vcmpgtuq_p",
+    "llvm.ppc.altivec.vcmpgtuw" => "__builtin_altivec_vcmpgtuw",
+    "llvm.ppc.altivec.vcmpgtuw.p" => "__builtin_altivec_vcmpgtuw_p",
+    "llvm.ppc.altivec.vcmpneb" => "__builtin_altivec_vcmpneb",
+    "llvm.ppc.altivec.vcmpneb.p" => "__builtin_altivec_vcmpneb_p",
+    "llvm.ppc.altivec.vcmpneh" => "__builtin_altivec_vcmpneh",
+    "llvm.ppc.altivec.vcmpneh.p" => "__builtin_altivec_vcmpneh_p",
+    "llvm.ppc.altivec.vcmpnew" => "__builtin_altivec_vcmpnew",
+    "llvm.ppc.altivec.vcmpnew.p" => "__builtin_altivec_vcmpnew_p",
+    "llvm.ppc.altivec.vcmpnezb" => "__builtin_altivec_vcmpnezb",
+    "llvm.ppc.altivec.vcmpnezb.p" => "__builtin_altivec_vcmpnezb_p",
+    "llvm.ppc.altivec.vcmpnezh" => "__builtin_altivec_vcmpnezh",
+    "llvm.ppc.altivec.vcmpnezh.p" => "__builtin_altivec_vcmpnezh_p",
+    "llvm.ppc.altivec.vcmpnezw" => "__builtin_altivec_vcmpnezw",
+    "llvm.ppc.altivec.vcmpnezw.p" => "__builtin_altivec_vcmpnezw_p",
+    "llvm.ppc.altivec.vcntmbb" => "__builtin_altivec_vcntmbb",
+    "llvm.ppc.altivec.vcntmbd" => "__builtin_altivec_vcntmbd",
+    "llvm.ppc.altivec.vcntmbh" => "__builtin_altivec_vcntmbh",
+    "llvm.ppc.altivec.vcntmbw" => "__builtin_altivec_vcntmbw",
+    "llvm.ppc.altivec.vctsxs" => "__builtin_altivec_vctsxs",
+    "llvm.ppc.altivec.vctuxs" => "__builtin_altivec_vctuxs",
+    "llvm.ppc.altivec.vctzdm" => "__builtin_altivec_vctzdm",
+    "llvm.ppc.altivec.vctzlsbb" => "__builtin_altivec_vctzlsbb",
+    "llvm.ppc.altivec.vdivesd" => "__builtin_altivec_vdivesd",
+    "llvm.ppc.altivec.vdivesq" => "__builtin_altivec_vdivesq",
+    "llvm.ppc.altivec.vdivesw" => "__builtin_altivec_vdivesw",
+    "llvm.ppc.altivec.vdiveud" => "__builtin_altivec_vdiveud",
+    "llvm.ppc.altivec.vdiveuq" => "__builtin_altivec_vdiveuq",
+    "llvm.ppc.altivec.vdiveuw" => "__builtin_altivec_vdiveuw",
+    "llvm.ppc.altivec.vexpandbm" => "__builtin_altivec_vexpandbm",
+    "llvm.ppc.altivec.vexpanddm" => "__builtin_altivec_vexpanddm",
+    "llvm.ppc.altivec.vexpandhm" => "__builtin_altivec_vexpandhm",
+    "llvm.ppc.altivec.vexpandqm" => "__builtin_altivec_vexpandqm",
+    "llvm.ppc.altivec.vexpandwm" => "__builtin_altivec_vexpandwm",
+    "llvm.ppc.altivec.vexptefp" => "__builtin_altivec_vexptefp",
+    "llvm.ppc.altivec.vextddvlx" => "__builtin_altivec_vextddvlx",
+    "llvm.ppc.altivec.vextddvrx" => "__builtin_altivec_vextddvrx",
+    "llvm.ppc.altivec.vextdubvlx" => "__builtin_altivec_vextdubvlx",
+    "llvm.ppc.altivec.vextdubvrx" => "__builtin_altivec_vextdubvrx",
+    "llvm.ppc.altivec.vextduhvlx" => "__builtin_altivec_vextduhvlx",
+    "llvm.ppc.altivec.vextduhvrx" => "__builtin_altivec_vextduhvrx",
+    "llvm.ppc.altivec.vextduwvlx" => "__builtin_altivec_vextduwvlx",
+    "llvm.ppc.altivec.vextduwvrx" => "__builtin_altivec_vextduwvrx",
+    "llvm.ppc.altivec.vextractbm" => "__builtin_altivec_vextractbm",
+    "llvm.ppc.altivec.vextractdm" => "__builtin_altivec_vextractdm",
+    "llvm.ppc.altivec.vextracthm" => "__builtin_altivec_vextracthm",
+    "llvm.ppc.altivec.vextractqm" => "__builtin_altivec_vextractqm",
+    "llvm.ppc.altivec.vextractwm" => "__builtin_altivec_vextractwm",
+    "llvm.ppc.altivec.vextsb2d" => "__builtin_altivec_vextsb2d",
+    "llvm.ppc.altivec.vextsb2w" => "__builtin_altivec_vextsb2w",
+    "llvm.ppc.altivec.vextsd2q" => "__builtin_altivec_vextsd2q",
+    "llvm.ppc.altivec.vextsh2d" => "__builtin_altivec_vextsh2d",
+    "llvm.ppc.altivec.vextsh2w" => "__builtin_altivec_vextsh2w",
+    "llvm.ppc.altivec.vextsw2d" => "__builtin_altivec_vextsw2d",
+    "llvm.ppc.altivec.vgbbd" => "__builtin_altivec_vgbbd",
+    "llvm.ppc.altivec.vgnb" => "__builtin_altivec_vgnb",
+    "llvm.ppc.altivec.vinsblx" => "__builtin_altivec_vinsblx",
+    "llvm.ppc.altivec.vinsbrx" => "__builtin_altivec_vinsbrx",
+    "llvm.ppc.altivec.vinsbvlx" => "__builtin_altivec_vinsbvlx",
+    "llvm.ppc.altivec.vinsbvrx" => "__builtin_altivec_vinsbvrx",
+    "llvm.ppc.altivec.vinsdlx" => "__builtin_altivec_vinsdlx",
+    "llvm.ppc.altivec.vinsdrx" => "__builtin_altivec_vinsdrx",
+    "llvm.ppc.altivec.vinshlx" => "__builtin_altivec_vinshlx",
+    "llvm.ppc.altivec.vinshrx" => "__builtin_altivec_vinshrx",
+    "llvm.ppc.altivec.vinshvlx" => "__builtin_altivec_vinshvlx",
+    "llvm.ppc.altivec.vinshvrx" => "__builtin_altivec_vinshvrx",
+    "llvm.ppc.altivec.vinswlx" => "__builtin_altivec_vinswlx",
+    "llvm.ppc.altivec.vinswrx" => "__builtin_altivec_vinswrx",
+    "llvm.ppc.altivec.vinswvlx" => "__builtin_altivec_vinswvlx",
+    "llvm.ppc.altivec.vinswvrx" => "__builtin_altivec_vinswvrx",
+    "llvm.ppc.altivec.vlogefp" => "__builtin_altivec_vlogefp",
+    "llvm.ppc.altivec.vmaddfp" => "__builtin_altivec_vmaddfp",
+    "llvm.ppc.altivec.vmaxfp" => "__builtin_altivec_vmaxfp",
+    "llvm.ppc.altivec.vmaxsb" => "__builtin_altivec_vmaxsb",
+    "llvm.ppc.altivec.vmaxsd" => "__builtin_altivec_vmaxsd",
+    "llvm.ppc.altivec.vmaxsh" => "__builtin_altivec_vmaxsh",
+    "llvm.ppc.altivec.vmaxsw" => "__builtin_altivec_vmaxsw",
+    "llvm.ppc.altivec.vmaxub" => "__builtin_altivec_vmaxub",
+    "llvm.ppc.altivec.vmaxud" => "__builtin_altivec_vmaxud",
+    "llvm.ppc.altivec.vmaxuh" => "__builtin_altivec_vmaxuh",
+    "llvm.ppc.altivec.vmaxuw" => "__builtin_altivec_vmaxuw",
+    "llvm.ppc.altivec.vmhaddshs" => "__builtin_altivec_vmhaddshs",
+    "llvm.ppc.altivec.vmhraddshs" => "__builtin_altivec_vmhraddshs",
+    "llvm.ppc.altivec.vminfp" => "__builtin_altivec_vminfp",
+    "llvm.ppc.altivec.vminsb" => "__builtin_altivec_vminsb",
+    "llvm.ppc.altivec.vminsd" => "__builtin_altivec_vminsd",
+    "llvm.ppc.altivec.vminsh" => "__builtin_altivec_vminsh",
+    "llvm.ppc.altivec.vminsw" => "__builtin_altivec_vminsw",
+    "llvm.ppc.altivec.vminub" => "__builtin_altivec_vminub",
+    "llvm.ppc.altivec.vminud" => "__builtin_altivec_vminud",
+    "llvm.ppc.altivec.vminuh" => "__builtin_altivec_vminuh",
+    "llvm.ppc.altivec.vminuw" => "__builtin_altivec_vminuw",
+    "llvm.ppc.altivec.vmladduhm" => "__builtin_altivec_vmladduhm",
+    "llvm.ppc.altivec.vmsumcud" => "__builtin_altivec_vmsumcud",
+    "llvm.ppc.altivec.vmsummbm" => "__builtin_altivec_vmsummbm",
+    "llvm.ppc.altivec.vmsumshm" => "__builtin_altivec_vmsumshm",
+    "llvm.ppc.altivec.vmsumshs" => "__builtin_altivec_vmsumshs",
+    "llvm.ppc.altivec.vmsumubm" => "__builtin_altivec_vmsumubm",
+    "llvm.ppc.altivec.vmsumudm" => "__builtin_altivec_vmsumudm",
+    "llvm.ppc.altivec.vmsumuhm" => "__builtin_altivec_vmsumuhm",
+    "llvm.ppc.altivec.vmsumuhs" => "__builtin_altivec_vmsumuhs",
+    "llvm.ppc.altivec.vmulesb" => "__builtin_altivec_vmulesb",
+    "llvm.ppc.altivec.vmulesd" => "__builtin_altivec_vmulesd",
+    "llvm.ppc.altivec.vmulesh" => "__builtin_altivec_vmulesh",
+    "llvm.ppc.altivec.vmulesw" => "__builtin_altivec_vmulesw",
+    "llvm.ppc.altivec.vmuleub" => "__builtin_altivec_vmuleub",
+    "llvm.ppc.altivec.vmuleud" => "__builtin_altivec_vmuleud",
+    "llvm.ppc.altivec.vmuleuh" => "__builtin_altivec_vmuleuh",
+    "llvm.ppc.altivec.vmuleuw" => "__builtin_altivec_vmuleuw",
+    "llvm.ppc.altivec.vmulhsd" => "__builtin_altivec_vmulhsd",
+    "llvm.ppc.altivec.vmulhsw" => "__builtin_altivec_vmulhsw",
+    "llvm.ppc.altivec.vmulhud" => "__builtin_altivec_vmulhud",
+    "llvm.ppc.altivec.vmulhuw" => "__builtin_altivec_vmulhuw",
+    "llvm.ppc.altivec.vmulosb" => "__builtin_altivec_vmulosb",
+    "llvm.ppc.altivec.vmulosd" => "__builtin_altivec_vmulosd",
+    "llvm.ppc.altivec.vmulosh" => "__builtin_altivec_vmulosh",
+    "llvm.ppc.altivec.vmulosw" => "__builtin_altivec_vmulosw",
+    "llvm.ppc.altivec.vmuloub" => "__builtin_altivec_vmuloub",
+    "llvm.ppc.altivec.vmuloud" => "__builtin_altivec_vmuloud",
+    "llvm.ppc.altivec.vmulouh" => "__builtin_altivec_vmulouh",
+    "llvm.ppc.altivec.vmulouw" => "__builtin_altivec_vmulouw",
+    "llvm.ppc.altivec.vnmsubfp" => "__builtin_altivec_vnmsubfp",
+    "llvm.ppc.altivec.vpdepd" => "__builtin_altivec_vpdepd",
+    "llvm.ppc.altivec.vperm" => "__builtin_altivec_vperm_4si",
+    "llvm.ppc.altivec.vpextd" => "__builtin_altivec_vpextd",
+    "llvm.ppc.altivec.vpkpx" => "__builtin_altivec_vpkpx",
+    "llvm.ppc.altivec.vpksdss" => "__builtin_altivec_vpksdss",
+    "llvm.ppc.altivec.vpksdus" => "__builtin_altivec_vpksdus",
+    "llvm.ppc.altivec.vpkshss" => "__builtin_altivec_vpkshss",
+    "llvm.ppc.altivec.vpkshus" => "__builtin_altivec_vpkshus",
+    "llvm.ppc.altivec.vpkswss" => "__builtin_altivec_vpkswss",
+    "llvm.ppc.altivec.vpkswus" => "__builtin_altivec_vpkswus",
+    "llvm.ppc.altivec.vpkudus" => "__builtin_altivec_vpkudus",
+    "llvm.ppc.altivec.vpkuhus" => "__builtin_altivec_vpkuhus",
+    "llvm.ppc.altivec.vpkuwus" => "__builtin_altivec_vpkuwus",
+    "llvm.ppc.altivec.vprtybd" => "__builtin_altivec_vprtybd",
+    "llvm.ppc.altivec.vprtybq" => "__builtin_altivec_vprtybq",
+    "llvm.ppc.altivec.vprtybw" => "__builtin_altivec_vprtybw",
+    "llvm.ppc.altivec.vrefp" => "__builtin_altivec_vrefp",
+    "llvm.ppc.altivec.vrfim" => "__builtin_altivec_vrfim",
+    "llvm.ppc.altivec.vrfin" => "__builtin_altivec_vrfin",
+    "llvm.ppc.altivec.vrfip" => "__builtin_altivec_vrfip",
+    "llvm.ppc.altivec.vrfiz" => "__builtin_altivec_vrfiz",
+    "llvm.ppc.altivec.vrlb" => "__builtin_altivec_vrlb",
+    "llvm.ppc.altivec.vrld" => "__builtin_altivec_vrld",
+    "llvm.ppc.altivec.vrldmi" => "__builtin_altivec_vrldmi",
+    "llvm.ppc.altivec.vrldnm" => "__builtin_altivec_vrldnm",
+    "llvm.ppc.altivec.vrlh" => "__builtin_altivec_vrlh",
+    "llvm.ppc.altivec.vrlqmi" => "__builtin_altivec_vrlqmi",
+    "llvm.ppc.altivec.vrlqnm" => "__builtin_altivec_vrlqnm",
+    "llvm.ppc.altivec.vrlw" => "__builtin_altivec_vrlw",
+    "llvm.ppc.altivec.vrlwmi" => "__builtin_altivec_vrlwmi",
+    "llvm.ppc.altivec.vrlwnm" => "__builtin_altivec_vrlwnm",
+    "llvm.ppc.altivec.vrsqrtefp" => "__builtin_altivec_vrsqrtefp",
+    "llvm.ppc.altivec.vsel" => "__builtin_altivec_vsel_4si",
+    "llvm.ppc.altivec.vsl" => "__builtin_altivec_vsl",
+    "llvm.ppc.altivec.vslb" => "__builtin_altivec_vslb",
+    "llvm.ppc.altivec.vsldbi" => "__builtin_altivec_vsldbi",
+    "llvm.ppc.altivec.vslh" => "__builtin_altivec_vslh",
+    "llvm.ppc.altivec.vslo" => "__builtin_altivec_vslo",
+    "llvm.ppc.altivec.vslv" => "__builtin_altivec_vslv",
+    "llvm.ppc.altivec.vslw" => "__builtin_altivec_vslw",
+    "llvm.ppc.altivec.vsr" => "__builtin_altivec_vsr",
+    "llvm.ppc.altivec.vsrab" => "__builtin_altivec_vsrab",
+    "llvm.ppc.altivec.vsrah" => "__builtin_altivec_vsrah",
+    "llvm.ppc.altivec.vsraw" => "__builtin_altivec_vsraw",
+    "llvm.ppc.altivec.vsrb" => "__builtin_altivec_vsrb",
+    "llvm.ppc.altivec.vsrdbi" => "__builtin_altivec_vsrdbi",
+    "llvm.ppc.altivec.vsrh" => "__builtin_altivec_vsrh",
+    "llvm.ppc.altivec.vsro" => "__builtin_altivec_vsro",
+    "llvm.ppc.altivec.vsrv" => "__builtin_altivec_vsrv",
+    "llvm.ppc.altivec.vsrw" => "__builtin_altivec_vsrw",
+    "llvm.ppc.altivec.vstribl" => "__builtin_altivec_vstribl",
+    "llvm.ppc.altivec.vstribl.p" => "__builtin_altivec_vstribl_p",
+    "llvm.ppc.altivec.vstribr" => "__builtin_altivec_vstribr",
+    "llvm.ppc.altivec.vstribr.p" => "__builtin_altivec_vstribr_p",
+    "llvm.ppc.altivec.vstrihl" => "__builtin_altivec_vstrihl",
+    "llvm.ppc.altivec.vstrihl.p" => "__builtin_altivec_vstrihl_p",
+    "llvm.ppc.altivec.vstrihr" => "__builtin_altivec_vstrihr",
+    "llvm.ppc.altivec.vstrihr.p" => "__builtin_altivec_vstrihr_p",
+    "llvm.ppc.altivec.vsubcuq" => "__builtin_altivec_vsubcuq",
+    "llvm.ppc.altivec.vsubcuw" => "__builtin_altivec_vsubcuw",
+    "llvm.ppc.altivec.vsubecuq" => "__builtin_altivec_vsubecuq",
+    "llvm.ppc.altivec.vsubeuqm" => "__builtin_altivec_vsubeuqm",
+    "llvm.ppc.altivec.vsubsbs" => "__builtin_altivec_vsubsbs",
+    "llvm.ppc.altivec.vsubshs" => "__builtin_altivec_vsubshs",
+    "llvm.ppc.altivec.vsubsws" => "__builtin_altivec_vsubsws",
+    "llvm.ppc.altivec.vsububs" => "__builtin_altivec_vsububs",
+    "llvm.ppc.altivec.vsubuhs" => "__builtin_altivec_vsubuhs",
+    "llvm.ppc.altivec.vsubuws" => "__builtin_altivec_vsubuws",
+    "llvm.ppc.altivec.vsum2sws" => "__builtin_altivec_vsum2sws",
+    "llvm.ppc.altivec.vsum4sbs" => "__builtin_altivec_vsum4sbs",
+    "llvm.ppc.altivec.vsum4shs" => "__builtin_altivec_vsum4shs",
+    "llvm.ppc.altivec.vsum4ubs" => "__builtin_altivec_vsum4ubs",
+    "llvm.ppc.altivec.vsumsws" => "__builtin_altivec_vsumsws",
+    "llvm.ppc.altivec.vupkhpx" => "__builtin_altivec_vupkhpx",
+    "llvm.ppc.altivec.vupkhsb" => "__builtin_altivec_vupkhsb",
+    "llvm.ppc.altivec.vupkhsh" => "__builtin_altivec_vupkhsh",
+    "llvm.ppc.altivec.vupkhsw" => "__builtin_altivec_vupkhsw",
+    "llvm.ppc.altivec.vupklpx" => "__builtin_altivec_vupklpx",
+    "llvm.ppc.altivec.vupklsb" => "__builtin_altivec_vupklsb",
+    "llvm.ppc.altivec.vupklsh" => "__builtin_altivec_vupklsh",
+    "llvm.ppc.altivec.vupklsw" => "__builtin_altivec_vupklsw",
+    "llvm.ppc.bcdadd" => "__builtin_ppc_bcdadd",
+    "llvm.ppc.bcdadd.p" => "__builtin_ppc_bcdadd_p",
+    "llvm.ppc.bcdsub" => "__builtin_ppc_bcdsub",
+    "llvm.ppc.bcdsub.p" => "__builtin_ppc_bcdsub_p",
+    "llvm.ppc.bpermd" => "__builtin_bpermd",
+    "llvm.ppc.cbcdtd" => "__builtin_cbcdtd",
+    "llvm.ppc.cbcdtdd" => "__builtin_ppc_cbcdtd",
+    "llvm.ppc.cdtbcd" => "__builtin_cdtbcd",
+    "llvm.ppc.cdtbcdd" => "__builtin_ppc_cdtbcd",
+    "llvm.ppc.cfuged" => "__builtin_cfuged",
+    "llvm.ppc.cmpeqb" => "__builtin_ppc_cmpeqb",
+    "llvm.ppc.cmprb" => "__builtin_ppc_cmprb",
+    "llvm.ppc.cntlzdm" => "__builtin_cntlzdm",
+    "llvm.ppc.cnttzdm" => "__builtin_cnttzdm",
+    "llvm.ppc.compare.exp.eq" => "__builtin_ppc_compare_exp_eq",
+    "llvm.ppc.compare.exp.gt" => "__builtin_ppc_compare_exp_gt",
+    "llvm.ppc.compare.exp.lt" => "__builtin_ppc_compare_exp_lt",
+    "llvm.ppc.compare.exp.uo" => "__builtin_ppc_compare_exp_uo",
+    "llvm.ppc.darn" => "__builtin_darn",
+    "llvm.ppc.darn32" => "__builtin_darn_32",
+    "llvm.ppc.darnraw" => "__builtin_darn_raw",
+    "llvm.ppc.dcbf" => "__builtin_dcbf",
+    "llvm.ppc.dcbfl" => "__builtin_ppc_dcbfl",
+    "llvm.ppc.dcbflp" => "__builtin_ppc_dcbflp",
+    "llvm.ppc.dcbst" => "__builtin_ppc_dcbst",
+    "llvm.ppc.dcbt" => "__builtin_ppc_dcbt",
+    "llvm.ppc.dcbtst" => "__builtin_ppc_dcbtst",
+    "llvm.ppc.dcbtstt" => "__builtin_ppc_dcbtstt",
+    "llvm.ppc.dcbtt" => "__builtin_ppc_dcbtt",
+    "llvm.ppc.dcbz" => "__builtin_ppc_dcbz",
+    "llvm.ppc.divde" => "__builtin_divde",
+    "llvm.ppc.divdeu" => "__builtin_divdeu",
+    "llvm.ppc.divf128.round.to.odd" => "__builtin_divf128_round_to_odd",
+    "llvm.ppc.divwe" => "__builtin_divwe",
+    "llvm.ppc.divweu" => "__builtin_divweu",
+    "llvm.ppc.eieio" => "__builtin_ppc_eieio",
+    "llvm.ppc.extract.exp" => "__builtin_ppc_extract_exp",
+    "llvm.ppc.extract.sig" => "__builtin_ppc_extract_sig",
+    "llvm.ppc.fcfid" => "__builtin_ppc_fcfid",
+    "llvm.ppc.fcfud" => "__builtin_ppc_fcfud",
+    "llvm.ppc.fctid" => "__builtin_ppc_fctid",
+    "llvm.ppc.fctidz" => "__builtin_ppc_fctidz",
+    "llvm.ppc.fctiw" => "__builtin_ppc_fctiw",
+    "llvm.ppc.fctiwz" => "__builtin_ppc_fctiwz",
+    "llvm.ppc.fctudz" => "__builtin_ppc_fctudz",
+    "llvm.ppc.fctuwz" => "__builtin_ppc_fctuwz",
+    "llvm.ppc.fence" => "__builtin_ppc_fence",
+    "llvm.ppc.fmaf128.round.to.odd" => "__builtin_fmaf128_round_to_odd",
+    "llvm.ppc.fmsub" => "__builtin_ppc_fmsub",
+    "llvm.ppc.fmsubs" => "__builtin_ppc_fmsubs",
+    "llvm.ppc.fnabs" => "__builtin_ppc_fnabs",
+    "llvm.ppc.fnabss" => "__builtin_ppc_fnabss",
+    "llvm.ppc.fnmadd" => "__builtin_ppc_fnmadd",
+    "llvm.ppc.fnmadds" => "__builtin_ppc_fnmadds",
+    "llvm.ppc.fre" => "__builtin_ppc_fre",
+    "llvm.ppc.fres" => "__builtin_ppc_fres",
+    "llvm.ppc.frsqrte" => "__builtin_ppc_frsqrte",
+    "llvm.ppc.frsqrtes" => "__builtin_ppc_frsqrtes",
+    "llvm.ppc.fsel" => "__builtin_ppc_fsel",
+    "llvm.ppc.fsels" => "__builtin_ppc_fsels",
+    "llvm.ppc.get.texasr" => "__builtin_get_texasr",
+    "llvm.ppc.get.texasru" => "__builtin_get_texasru",
+    "llvm.ppc.get.tfhar" => "__builtin_get_tfhar",
+    "llvm.ppc.get.tfiar" => "__builtin_get_tfiar",
+    "llvm.ppc.icbt" => "__builtin_ppc_icbt",
+    "llvm.ppc.insert.exp" => "__builtin_ppc_insert_exp",
+    "llvm.ppc.iospace.eieio" => "__builtin_ppc_iospace_eieio",
+    "llvm.ppc.iospace.lwsync" => "__builtin_ppc_iospace_lwsync",
+    "llvm.ppc.iospace.sync" => "__builtin_ppc_iospace_sync",
+    "llvm.ppc.isync" => "__builtin_ppc_isync",
+    "llvm.ppc.load4r" => "__builtin_ppc_load4r",
+    "llvm.ppc.load8r" => "__builtin_ppc_load8r",
+    "llvm.ppc.lwsync" => "__builtin_ppc_lwsync",
+    "llvm.ppc.maddhd" => "__builtin_ppc_maddhd",
+    "llvm.ppc.maddhdu" => "__builtin_ppc_maddhdu",
+    "llvm.ppc.maddld" => "__builtin_ppc_maddld",
+    "llvm.ppc.mffsl" => "__builtin_ppc_mffsl",
+    "llvm.ppc.mfmsr" => "__builtin_ppc_mfmsr",
+    "llvm.ppc.mftbu" => "__builtin_ppc_mftbu",
+    "llvm.ppc.mtfsb0" => "__builtin_ppc_mtfsb0",
+    "llvm.ppc.mtfsb1" => "__builtin_ppc_mtfsb1",
+    "llvm.ppc.mtfsfi" => "__builtin_ppc_mtfsfi",
+    "llvm.ppc.mtmsr" => "__builtin_ppc_mtmsr",
+    "llvm.ppc.mulf128.round.to.odd" => "__builtin_mulf128_round_to_odd",
+    "llvm.ppc.mulhd" => "__builtin_ppc_mulhd",
+    "llvm.ppc.mulhdu" => "__builtin_ppc_mulhdu",
+    "llvm.ppc.mulhw" => "__builtin_ppc_mulhw",
+    "llvm.ppc.mulhwu" => "__builtin_ppc_mulhwu",
+    "llvm.ppc.pack.longdouble" => "__builtin_pack_longdouble",
+    "llvm.ppc.pdepd" => "__builtin_pdepd",
+    "llvm.ppc.pextd" => "__builtin_pextd",
+    "llvm.ppc.qpx.qvfabs" => "__builtin_qpx_qvfabs",
+    "llvm.ppc.qpx.qvfadd" => "__builtin_qpx_qvfadd",
+    "llvm.ppc.qpx.qvfadds" => "__builtin_qpx_qvfadds",
+    "llvm.ppc.qpx.qvfcfid" => "__builtin_qpx_qvfcfid",
+    "llvm.ppc.qpx.qvfcfids" => "__builtin_qpx_qvfcfids",
+    "llvm.ppc.qpx.qvfcfidu" => "__builtin_qpx_qvfcfidu",
+    "llvm.ppc.qpx.qvfcfidus" => "__builtin_qpx_qvfcfidus",
+    "llvm.ppc.qpx.qvfcmpeq" => "__builtin_qpx_qvfcmpeq",
+    "llvm.ppc.qpx.qvfcmpgt" => "__builtin_qpx_qvfcmpgt",
+    "llvm.ppc.qpx.qvfcmplt" => "__builtin_qpx_qvfcmplt",
+    "llvm.ppc.qpx.qvfcpsgn" => "__builtin_qpx_qvfcpsgn",
+    "llvm.ppc.qpx.qvfctid" => "__builtin_qpx_qvfctid",
+    "llvm.ppc.qpx.qvfctidu" => "__builtin_qpx_qvfctidu",
+    "llvm.ppc.qpx.qvfctiduz" => "__builtin_qpx_qvfctiduz",
+    "llvm.ppc.qpx.qvfctidz" => "__builtin_qpx_qvfctidz",
+    "llvm.ppc.qpx.qvfctiw" => "__builtin_qpx_qvfctiw",
+    "llvm.ppc.qpx.qvfctiwu" => "__builtin_qpx_qvfctiwu",
+    "llvm.ppc.qpx.qvfctiwuz" => "__builtin_qpx_qvfctiwuz",
+    "llvm.ppc.qpx.qvfctiwz" => "__builtin_qpx_qvfctiwz",
+    "llvm.ppc.qpx.qvflogical" => "__builtin_qpx_qvflogical",
+    "llvm.ppc.qpx.qvfmadd" => "__builtin_qpx_qvfmadd",
+    "llvm.ppc.qpx.qvfmadds" => "__builtin_qpx_qvfmadds",
+    "llvm.ppc.qpx.qvfmsub" => "__builtin_qpx_qvfmsub",
+    "llvm.ppc.qpx.qvfmsubs" => "__builtin_qpx_qvfmsubs",
+    "llvm.ppc.qpx.qvfmul" => "__builtin_qpx_qvfmul",
+    "llvm.ppc.qpx.qvfmuls" => "__builtin_qpx_qvfmuls",
+    "llvm.ppc.qpx.qvfnabs" => "__builtin_qpx_qvfnabs",
+    "llvm.ppc.qpx.qvfneg" => "__builtin_qpx_qvfneg",
+    "llvm.ppc.qpx.qvfnmadd" => "__builtin_qpx_qvfnmadd",
+    "llvm.ppc.qpx.qvfnmadds" => "__builtin_qpx_qvfnmadds",
+    "llvm.ppc.qpx.qvfnmsub" => "__builtin_qpx_qvfnmsub",
+    "llvm.ppc.qpx.qvfnmsubs" => "__builtin_qpx_qvfnmsubs",
+    "llvm.ppc.qpx.qvfperm" => "__builtin_qpx_qvfperm",
+    "llvm.ppc.qpx.qvfre" => "__builtin_qpx_qvfre",
+    "llvm.ppc.qpx.qvfres" => "__builtin_qpx_qvfres",
+    "llvm.ppc.qpx.qvfrim" => "__builtin_qpx_qvfrim",
+    "llvm.ppc.qpx.qvfrin" => "__builtin_qpx_qvfrin",
+    "llvm.ppc.qpx.qvfrip" => "__builtin_qpx_qvfrip",
+    "llvm.ppc.qpx.qvfriz" => "__builtin_qpx_qvfriz",
+    "llvm.ppc.qpx.qvfrsp" => "__builtin_qpx_qvfrsp",
+    "llvm.ppc.qpx.qvfrsqrte" => "__builtin_qpx_qvfrsqrte",
+    "llvm.ppc.qpx.qvfrsqrtes" => "__builtin_qpx_qvfrsqrtes",
+    "llvm.ppc.qpx.qvfsel" => "__builtin_qpx_qvfsel",
+    "llvm.ppc.qpx.qvfsub" => "__builtin_qpx_qvfsub",
+    "llvm.ppc.qpx.qvfsubs" => "__builtin_qpx_qvfsubs",
+    "llvm.ppc.qpx.qvftstnan" => "__builtin_qpx_qvftstnan",
+    "llvm.ppc.qpx.qvfxmadd" => "__builtin_qpx_qvfxmadd",
+    "llvm.ppc.qpx.qvfxmadds" => "__builtin_qpx_qvfxmadds",
+    "llvm.ppc.qpx.qvfxmul" => "__builtin_qpx_qvfxmul",
+    "llvm.ppc.qpx.qvfxmuls" => "__builtin_qpx_qvfxmuls",
+    "llvm.ppc.qpx.qvfxxcpnmadd" => "__builtin_qpx_qvfxxcpnmadd",
+    "llvm.ppc.qpx.qvfxxcpnmadds" => "__builtin_qpx_qvfxxcpnmadds",
+    "llvm.ppc.qpx.qvfxxmadd" => "__builtin_qpx_qvfxxmadd",
+    "llvm.ppc.qpx.qvfxxmadds" => "__builtin_qpx_qvfxxmadds",
+    "llvm.ppc.qpx.qvfxxnpmadd" => "__builtin_qpx_qvfxxnpmadd",
+    "llvm.ppc.qpx.qvfxxnpmadds" => "__builtin_qpx_qvfxxnpmadds",
+    "llvm.ppc.qpx.qvgpci" => "__builtin_qpx_qvgpci",
+    "llvm.ppc.qpx.qvlfcd" => "__builtin_qpx_qvlfcd",
+    "llvm.ppc.qpx.qvlfcda" => "__builtin_qpx_qvlfcda",
+    "llvm.ppc.qpx.qvlfcs" => "__builtin_qpx_qvlfcs",
+    "llvm.ppc.qpx.qvlfcsa" => "__builtin_qpx_qvlfcsa",
+    "llvm.ppc.qpx.qvlfd" => "__builtin_qpx_qvlfd",
+    "llvm.ppc.qpx.qvlfda" => "__builtin_qpx_qvlfda",
+    "llvm.ppc.qpx.qvlfiwa" => "__builtin_qpx_qvlfiwa",
+    "llvm.ppc.qpx.qvlfiwaa" => "__builtin_qpx_qvlfiwaa",
+    "llvm.ppc.qpx.qvlfiwz" => "__builtin_qpx_qvlfiwz",
+    "llvm.ppc.qpx.qvlfiwza" => "__builtin_qpx_qvlfiwza",
+    "llvm.ppc.qpx.qvlfs" => "__builtin_qpx_qvlfs",
+    "llvm.ppc.qpx.qvlfsa" => "__builtin_qpx_qvlfsa",
+    "llvm.ppc.qpx.qvlpcld" => "__builtin_qpx_qvlpcld",
+    "llvm.ppc.qpx.qvlpcls" => "__builtin_qpx_qvlpcls",
+    "llvm.ppc.qpx.qvlpcrd" => "__builtin_qpx_qvlpcrd",
+    "llvm.ppc.qpx.qvlpcrs" => "__builtin_qpx_qvlpcrs",
+    "llvm.ppc.qpx.qvstfcd" => "__builtin_qpx_qvstfcd",
+    "llvm.ppc.qpx.qvstfcda" => "__builtin_qpx_qvstfcda",
+    "llvm.ppc.qpx.qvstfcs" => "__builtin_qpx_qvstfcs",
+    "llvm.ppc.qpx.qvstfcsa" => "__builtin_qpx_qvstfcsa",
+    "llvm.ppc.qpx.qvstfd" => "__builtin_qpx_qvstfd",
+    "llvm.ppc.qpx.qvstfda" => "__builtin_qpx_qvstfda",
+    "llvm.ppc.qpx.qvstfiw" => "__builtin_qpx_qvstfiw",
+    "llvm.ppc.qpx.qvstfiwa" => "__builtin_qpx_qvstfiwa",
+    "llvm.ppc.qpx.qvstfs" => "__builtin_qpx_qvstfs",
+    "llvm.ppc.qpx.qvstfsa" => "__builtin_qpx_qvstfsa",
+    "llvm.ppc.readflm" => "__builtin_readflm",
+    "llvm.ppc.rlwimi" => "__builtin_ppc_rlwimi",
+    "llvm.ppc.rlwnm" => "__builtin_ppc_rlwnm",
+    "llvm.ppc.scalar.extract.expq" => "__builtin_vsx_scalar_extract_expq",
+    "llvm.ppc.scalar.insert.exp.qp" => "__builtin_vsx_scalar_insert_exp_qp",
+    "llvm.ppc.set.texasr" => "__builtin_set_texasr",
+    "llvm.ppc.set.texasru" => "__builtin_set_texasru",
+    "llvm.ppc.set.tfhar" => "__builtin_set_tfhar",
+    "llvm.ppc.set.tfiar" => "__builtin_set_tfiar",
+    "llvm.ppc.setb" => "__builtin_ppc_setb",
+    "llvm.ppc.setflm" => "__builtin_setflm",
+    "llvm.ppc.setrnd" => "__builtin_setrnd",
+    "llvm.ppc.sqrtf128.round.to.odd" => "__builtin_sqrtf128_round_to_odd",
+    "llvm.ppc.stbcx" => "__builtin_ppc_stbcx",
+    "llvm.ppc.stdcx" => "__builtin_ppc_stdcx",
+    "llvm.ppc.stfiw" => "__builtin_ppc_stfiw",
+    "llvm.ppc.store2r" => "__builtin_ppc_store2r",
+    "llvm.ppc.store4r" => "__builtin_ppc_store4r",
+    "llvm.ppc.store8r" => "__builtin_ppc_store8r",
+    "llvm.ppc.stwcx" => "__builtin_ppc_stwcx",
+    "llvm.ppc.subf128.round.to.odd" => "__builtin_subf128_round_to_odd",
+    "llvm.ppc.sync" => "__builtin_ppc_sync",
+    "llvm.ppc.tabort" => "__builtin_tabort",
+    "llvm.ppc.tabortdc" => "__builtin_tabortdc",
+    "llvm.ppc.tabortdci" => "__builtin_tabortdci",
+    "llvm.ppc.tabortwc" => "__builtin_tabortwc",
+    "llvm.ppc.tabortwci" => "__builtin_tabortwci",
+    "llvm.ppc.tbegin" => "__builtin_tbegin",
+    "llvm.ppc.tcheck" => "__builtin_tcheck",
+    "llvm.ppc.tdw" => "__builtin_ppc_tdw",
+    "llvm.ppc.tend" => "__builtin_tend",
+    "llvm.ppc.tendall" => "__builtin_tendall",
+    "llvm.ppc.trap" => "__builtin_ppc_trap",
+    "llvm.ppc.trapd" => "__builtin_ppc_trapd",
+    "llvm.ppc.trechkpt" => "__builtin_trechkpt",
+    "llvm.ppc.treclaim" => "__builtin_treclaim",
+    "llvm.ppc.tresume" => "__builtin_tresume",
+    "llvm.ppc.truncf128.round.to.odd" => "__builtin_truncf128_round_to_odd",
+    "llvm.ppc.tsr" => "__builtin_tsr",
+    "llvm.ppc.tsuspend" => "__builtin_tsuspend",
+    "llvm.ppc.ttest" => "__builtin_ttest",
+    "llvm.ppc.tw" => "__builtin_ppc_tw",
+    "llvm.ppc.unpack.longdouble" => "__builtin_unpack_longdouble",
+    "llvm.ppc.vsx.xsmaxdp" => "__builtin_vsx_xsmaxdp",
+    "llvm.ppc.vsx.xsmindp" => "__builtin_vsx_xsmindp",
+    "llvm.ppc.vsx.xvcmpeqdp" => "__builtin_vsx_xvcmpeqdp",
+    "llvm.ppc.vsx.xvcmpeqdp.p" => "__builtin_vsx_xvcmpeqdp_p",
+    "llvm.ppc.vsx.xvcmpeqsp" => "__builtin_vsx_xvcmpeqsp",
+    "llvm.ppc.vsx.xvcmpeqsp.p" => "__builtin_vsx_xvcmpeqsp_p",
+    "llvm.ppc.vsx.xvcmpgedp" => "__builtin_vsx_xvcmpgedp",
+    "llvm.ppc.vsx.xvcmpgedp.p" => "__builtin_vsx_xvcmpgedp_p",
+    "llvm.ppc.vsx.xvcmpgesp" => "__builtin_vsx_xvcmpgesp",
+    "llvm.ppc.vsx.xvcmpgesp.p" => "__builtin_vsx_xvcmpgesp_p",
+    "llvm.ppc.vsx.xvcmpgtdp" => "__builtin_vsx_xvcmpgtdp",
+    "llvm.ppc.vsx.xvcmpgtdp.p" => "__builtin_vsx_xvcmpgtdp_p",
+    "llvm.ppc.vsx.xvcmpgtsp" => "__builtin_vsx_xvcmpgtsp",
+    "llvm.ppc.vsx.xvcmpgtsp.p" => "__builtin_vsx_xvcmpgtsp_p",
+    "llvm.ppc.vsx.xvcvbf16spn" => "__builtin_vsx_xvcvbf16spn",
+    "llvm.ppc.vsx.xvcvdpsp" => "__builtin_vsx_xvcvdpsp",
+    "llvm.ppc.vsx.xvcvdpsxws" => "__builtin_vsx_xvcvdpsxws",
+    "llvm.ppc.vsx.xvcvdpuxws" => "__builtin_vsx_xvcvdpuxws",
+    "llvm.ppc.vsx.xvcvhpsp" => "__builtin_vsx_xvcvhpsp",
+    "llvm.ppc.vsx.xvcvspbf16" => "__builtin_vsx_xvcvspbf16",
+    "llvm.ppc.vsx.xvcvspdp" => "__builtin_vsx_xvcvspdp",
+    "llvm.ppc.vsx.xvcvsphp" => "__builtin_vsx_xvcvsphp",
+    "llvm.ppc.vsx.xvcvspsxds" => "__builtin_vsx_xvcvspsxds",
+    "llvm.ppc.vsx.xvcvspuxds" => "__builtin_vsx_xvcvspuxds",
+    "llvm.ppc.vsx.xvcvsxdsp" => "__builtin_vsx_xvcvsxdsp",
+    "llvm.ppc.vsx.xvcvsxwdp" => "__builtin_vsx_xvcvsxwdp",
+    "llvm.ppc.vsx.xvcvuxdsp" => "__builtin_vsx_xvcvuxdsp",
+    "llvm.ppc.vsx.xvcvuxwdp" => "__builtin_vsx_xvcvuxwdp",
+    "llvm.ppc.vsx.xvdivdp" => "__builtin_vsx_xvdivdp",
+    "llvm.ppc.vsx.xvdivsp" => "__builtin_vsx_xvdivsp",
+    "llvm.ppc.vsx.xviexpdp" => "__builtin_vsx_xviexpdp",
+    "llvm.ppc.vsx.xviexpsp" => "__builtin_vsx_xviexpsp",
+    "llvm.ppc.vsx.xvmaxdp" => "__builtin_vsx_xvmaxdp",
+    "llvm.ppc.vsx.xvmaxsp" => "__builtin_vsx_xvmaxsp",
+    "llvm.ppc.vsx.xvmindp" => "__builtin_vsx_xvmindp",
+    "llvm.ppc.vsx.xvminsp" => "__builtin_vsx_xvminsp",
+    "llvm.ppc.vsx.xvredp" => "__builtin_vsx_xvredp",
+    "llvm.ppc.vsx.xvresp" => "__builtin_vsx_xvresp",
+    "llvm.ppc.vsx.xvrsqrtedp" => "__builtin_vsx_xvrsqrtedp",
+    "llvm.ppc.vsx.xvrsqrtesp" => "__builtin_vsx_xvrsqrtesp",
+    "llvm.ppc.vsx.xvtdivdp" => "__builtin_vsx_xvtdivdp",
+    "llvm.ppc.vsx.xvtdivsp" => "__builtin_vsx_xvtdivsp",
+    "llvm.ppc.vsx.xvtlsbb" => "__builtin_vsx_xvtlsbb",
+    "llvm.ppc.vsx.xvtsqrtdp" => "__builtin_vsx_xvtsqrtdp",
+    "llvm.ppc.vsx.xvtsqrtsp" => "__builtin_vsx_xvtsqrtsp",
+    "llvm.ppc.vsx.xvtstdcdp" => "__builtin_vsx_xvtstdcdp",
+    "llvm.ppc.vsx.xvtstdcsp" => "__builtin_vsx_xvtstdcsp",
+    "llvm.ppc.vsx.xvxexpdp" => "__builtin_vsx_xvxexpdp",
+    "llvm.ppc.vsx.xvxexpsp" => "__builtin_vsx_xvxexpsp",
+    "llvm.ppc.vsx.xvxsigdp" => "__builtin_vsx_xvxsigdp",
+    "llvm.ppc.vsx.xvxsigsp" => "__builtin_vsx_xvxsigsp",
+    "llvm.ppc.vsx.xxblendvb" => "__builtin_vsx_xxblendvb",
+    "llvm.ppc.vsx.xxblendvd" => "__builtin_vsx_xxblendvd",
+    "llvm.ppc.vsx.xxblendvh" => "__builtin_vsx_xxblendvh",
+    "llvm.ppc.vsx.xxblendvw" => "__builtin_vsx_xxblendvw",
+    "llvm.ppc.vsx.xxeval" => "__builtin_vsx_xxeval",
+    "llvm.ppc.vsx.xxextractuw" => "__builtin_vsx_xxextractuw",
+    "llvm.ppc.vsx.xxgenpcvbm" => "__builtin_vsx_xxgenpcvbm",
+    "llvm.ppc.vsx.xxgenpcvdm" => "__builtin_vsx_xxgenpcvdm",
+    "llvm.ppc.vsx.xxgenpcvhm" => "__builtin_vsx_xxgenpcvhm",
+    "llvm.ppc.vsx.xxgenpcvwm" => "__builtin_vsx_xxgenpcvwm",
+    "llvm.ppc.vsx.xxinsertw" => "__builtin_vsx_xxinsertw",
+    "llvm.ppc.vsx.xxleqv" => "__builtin_vsx_xxleqv",
+    "llvm.ppc.vsx.xxpermx" => "__builtin_vsx_xxpermx",
+    // ptx
+    "llvm.ptx.bar.sync" => "__builtin_ptx_bar_sync",
+    "llvm.ptx.read.clock" => "__builtin_ptx_read_clock",
+    "llvm.ptx.read.clock64" => "__builtin_ptx_read_clock64",
+    "llvm.ptx.read.gridid" => "__builtin_ptx_read_gridid",
+    "llvm.ptx.read.laneid" => "__builtin_ptx_read_laneid",
+    "llvm.ptx.read.lanemask.eq" => "__builtin_ptx_read_lanemask_eq",
+    "llvm.ptx.read.lanemask.ge" => "__builtin_ptx_read_lanemask_ge",
+    "llvm.ptx.read.lanemask.gt" => "__builtin_ptx_read_lanemask_gt",
+    "llvm.ptx.read.lanemask.le" => "__builtin_ptx_read_lanemask_le",
+    "llvm.ptx.read.lanemask.lt" => "__builtin_ptx_read_lanemask_lt",
+    "llvm.ptx.read.nsmid" => "__builtin_ptx_read_nsmid",
+    "llvm.ptx.read.nwarpid" => "__builtin_ptx_read_nwarpid",
+    "llvm.ptx.read.pm0" => "__builtin_ptx_read_pm0",
+    "llvm.ptx.read.pm1" => "__builtin_ptx_read_pm1",
+    "llvm.ptx.read.pm2" => "__builtin_ptx_read_pm2",
+    "llvm.ptx.read.pm3" => "__builtin_ptx_read_pm3",
+    "llvm.ptx.read.smid" => "__builtin_ptx_read_smid",
+    "llvm.ptx.read.warpid" => "__builtin_ptx_read_warpid",
+    // r600
+    "llvm.r600.group.barrier" => "__builtin_r600_group_barrier",
+    "llvm.r600.implicitarg.ptr" => "__builtin_r600_implicitarg_ptr",
+    "llvm.r600.rat.store.typed" => "__builtin_r600_rat_store_typed",
+    "llvm.r600.read.global.size.x" => "__builtin_r600_read_global_size_x",
+    "llvm.r600.read.global.size.y" => "__builtin_r600_read_global_size_y",
+    "llvm.r600.read.global.size.z" => "__builtin_r600_read_global_size_z",
+    "llvm.r600.read.ngroups.x" => "__builtin_r600_read_ngroups_x",
+    "llvm.r600.read.ngroups.y" => "__builtin_r600_read_ngroups_y",
+    "llvm.r600.read.ngroups.z" => "__builtin_r600_read_ngroups_z",
+    "llvm.r600.read.tgid.x" => "__builtin_r600_read_tgid_x",
+    "llvm.r600.read.tgid.y" => "__builtin_r600_read_tgid_y",
+    "llvm.r600.read.tgid.z" => "__builtin_r600_read_tgid_z",
+    // riscv
+    "llvm.riscv.aes32dsi" => "__builtin_riscv_aes32dsi",
+    "llvm.riscv.aes32dsmi" => "__builtin_riscv_aes32dsmi",
+    "llvm.riscv.aes32esi" => "__builtin_riscv_aes32esi",
+    "llvm.riscv.aes32esmi" => "__builtin_riscv_aes32esmi",
+    "llvm.riscv.aes64ds" => "__builtin_riscv_aes64ds",
+    "llvm.riscv.aes64dsm" => "__builtin_riscv_aes64dsm",
+    "llvm.riscv.aes64es" => "__builtin_riscv_aes64es",
+    "llvm.riscv.aes64esm" => "__builtin_riscv_aes64esm",
+    "llvm.riscv.aes64im" => "__builtin_riscv_aes64im",
+    "llvm.riscv.aes64ks1i" => "__builtin_riscv_aes64ks1i",
+    "llvm.riscv.aes64ks2" => "__builtin_riscv_aes64ks2",
+    "llvm.riscv.sha512sig0" => "__builtin_riscv_sha512sig0",
+    "llvm.riscv.sha512sig0h" => "__builtin_riscv_sha512sig0h",
+    "llvm.riscv.sha512sig0l" => "__builtin_riscv_sha512sig0l",
+    "llvm.riscv.sha512sig1" => "__builtin_riscv_sha512sig1",
+    "llvm.riscv.sha512sig1h" => "__builtin_riscv_sha512sig1h",
+    "llvm.riscv.sha512sig1l" => "__builtin_riscv_sha512sig1l",
+    "llvm.riscv.sha512sum0" => "__builtin_riscv_sha512sum0",
+    "llvm.riscv.sha512sum0r" => "__builtin_riscv_sha512sum0r",
+    "llvm.riscv.sha512sum1" => "__builtin_riscv_sha512sum1",
+    "llvm.riscv.sha512sum1r" => "__builtin_riscv_sha512sum1r",
+    // s390
+    "llvm.s390.efpc" => "__builtin_s390_efpc",
+    "llvm.s390.etnd" => "__builtin_tx_nesting_depth",
+    "llvm.s390.lcbb" => "__builtin_s390_lcbb",
+    "llvm.s390.ppa.txassist" => "__builtin_tx_assist",
+    "llvm.s390.sfpc" => "__builtin_s390_sfpc",
+    "llvm.s390.tend" => "__builtin_tend",
+    "llvm.s390.vaccb" => "__builtin_s390_vaccb",
+    "llvm.s390.vacccq" => "__builtin_s390_vacccq",
+    "llvm.s390.vaccf" => "__builtin_s390_vaccf",
+    "llvm.s390.vaccg" => "__builtin_s390_vaccg",
+    "llvm.s390.vacch" => "__builtin_s390_vacch",
+    "llvm.s390.vaccq" => "__builtin_s390_vaccq",
+    "llvm.s390.vacq" => "__builtin_s390_vacq",
+    "llvm.s390.vaq" => "__builtin_s390_vaq",
+    "llvm.s390.vavgb" => "__builtin_s390_vavgb",
+    "llvm.s390.vavgf" => "__builtin_s390_vavgf",
+    "llvm.s390.vavgg" => "__builtin_s390_vavgg",
+    "llvm.s390.vavgh" => "__builtin_s390_vavgh",
+    "llvm.s390.vavglb" => "__builtin_s390_vavglb",
+    "llvm.s390.vavglf" => "__builtin_s390_vavglf",
+    "llvm.s390.vavglg" => "__builtin_s390_vavglg",
+    "llvm.s390.vavglh" => "__builtin_s390_vavglh",
+    "llvm.s390.vbperm" => "__builtin_s390_vbperm",
+    "llvm.s390.vcfn" => "__builtin_s390_vcfn",
+    "llvm.s390.vcksm" => "__builtin_s390_vcksm",
+    "llvm.s390.vclfnhs" => "__builtin_s390_vclfnhs",
+    "llvm.s390.vclfnls" => "__builtin_s390_vclfnls",
+    "llvm.s390.vcnf" => "__builtin_s390_vcnf",
+    "llvm.s390.vcrnfs" => "__builtin_s390_vcrnfs",
+    "llvm.s390.verimb" => "__builtin_s390_verimb",
+    "llvm.s390.verimf" => "__builtin_s390_verimf",
+    "llvm.s390.verimg" => "__builtin_s390_verimg",
+    "llvm.s390.verimh" => "__builtin_s390_verimh",
+    "llvm.s390.vfaeb" => "__builtin_s390_vfaeb",
+    "llvm.s390.vfaef" => "__builtin_s390_vfaef",
+    "llvm.s390.vfaeh" => "__builtin_s390_vfaeh",
+    "llvm.s390.vfaezb" => "__builtin_s390_vfaezb",
+    "llvm.s390.vfaezf" => "__builtin_s390_vfaezf",
+    "llvm.s390.vfaezh" => "__builtin_s390_vfaezh",
+    "llvm.s390.vfeeb" => "__builtin_s390_vfeeb",
+    "llvm.s390.vfeef" => "__builtin_s390_vfeef",
+    "llvm.s390.vfeeh" => "__builtin_s390_vfeeh",
+    "llvm.s390.vfeezb" => "__builtin_s390_vfeezb",
+    "llvm.s390.vfeezf" => "__builtin_s390_vfeezf",
+    "llvm.s390.vfeezh" => "__builtin_s390_vfeezh",
+    "llvm.s390.vfeneb" => "__builtin_s390_vfeneb",
+    "llvm.s390.vfenef" => "__builtin_s390_vfenef",
+    "llvm.s390.vfeneh" => "__builtin_s390_vfeneh",
+    "llvm.s390.vfenezb" => "__builtin_s390_vfenezb",
+    "llvm.s390.vfenezf" => "__builtin_s390_vfenezf",
+    "llvm.s390.vfenezh" => "__builtin_s390_vfenezh",
+    "llvm.s390.vgfmab" => "__builtin_s390_vgfmab",
+    "llvm.s390.vgfmaf" => "__builtin_s390_vgfmaf",
+    "llvm.s390.vgfmag" => "__builtin_s390_vgfmag",
+    "llvm.s390.vgfmah" => "__builtin_s390_vgfmah",
+    "llvm.s390.vgfmb" => "__builtin_s390_vgfmb",
+    "llvm.s390.vgfmf" => "__builtin_s390_vgfmf",
+    "llvm.s390.vgfmg" => "__builtin_s390_vgfmg",
+    "llvm.s390.vgfmh" => "__builtin_s390_vgfmh",
+    "llvm.s390.vistrb" => "__builtin_s390_vistrb",
+    "llvm.s390.vistrf" => "__builtin_s390_vistrf",
+    "llvm.s390.vistrh" => "__builtin_s390_vistrh",
+    "llvm.s390.vlbb" => "__builtin_s390_vlbb",
+    "llvm.s390.vll" => "__builtin_s390_vll",
+    "llvm.s390.vlrl" => "__builtin_s390_vlrlr",
+    "llvm.s390.vmaeb" => "__builtin_s390_vmaeb",
+    "llvm.s390.vmaef" => "__builtin_s390_vmaef",
+    "llvm.s390.vmaeh" => "__builtin_s390_vmaeh",
+    "llvm.s390.vmahb" => "__builtin_s390_vmahb",
+    "llvm.s390.vmahf" => "__builtin_s390_vmahf",
+    "llvm.s390.vmahh" => "__builtin_s390_vmahh",
+    "llvm.s390.vmaleb" => "__builtin_s390_vmaleb",
+    "llvm.s390.vmalef" => "__builtin_s390_vmalef",
+    "llvm.s390.vmaleh" => "__builtin_s390_vmaleh",
+    "llvm.s390.vmalhb" => "__builtin_s390_vmalhb",
+    "llvm.s390.vmalhf" => "__builtin_s390_vmalhf",
+    "llvm.s390.vmalhh" => "__builtin_s390_vmalhh",
+    "llvm.s390.vmalob" => "__builtin_s390_vmalob",
+    "llvm.s390.vmalof" => "__builtin_s390_vmalof",
+    "llvm.s390.vmaloh" => "__builtin_s390_vmaloh",
+    "llvm.s390.vmaob" => "__builtin_s390_vmaob",
+    "llvm.s390.vmaof" => "__builtin_s390_vmaof",
+    "llvm.s390.vmaoh" => "__builtin_s390_vmaoh",
+    "llvm.s390.vmeb" => "__builtin_s390_vmeb",
+    "llvm.s390.vmef" => "__builtin_s390_vmef",
+    "llvm.s390.vmeh" => "__builtin_s390_vmeh",
+    "llvm.s390.vmhb" => "__builtin_s390_vmhb",
+    "llvm.s390.vmhf" => "__builtin_s390_vmhf",
+    "llvm.s390.vmhh" => "__builtin_s390_vmhh",
+    "llvm.s390.vmleb" => "__builtin_s390_vmleb",
+    "llvm.s390.vmlef" => "__builtin_s390_vmlef",
+    "llvm.s390.vmleh" => "__builtin_s390_vmleh",
+    "llvm.s390.vmlhb" => "__builtin_s390_vmlhb",
+    "llvm.s390.vmlhf" => "__builtin_s390_vmlhf",
+    "llvm.s390.vmlhh" => "__builtin_s390_vmlhh",
+    "llvm.s390.vmlob" => "__builtin_s390_vmlob",
+    "llvm.s390.vmlof" => "__builtin_s390_vmlof",
+    "llvm.s390.vmloh" => "__builtin_s390_vmloh",
+    "llvm.s390.vmob" => "__builtin_s390_vmob",
+    "llvm.s390.vmof" => "__builtin_s390_vmof",
+    "llvm.s390.vmoh" => "__builtin_s390_vmoh",
+    "llvm.s390.vmslg" => "__builtin_s390_vmslg",
+    "llvm.s390.vpdi" => "__builtin_s390_vpdi",
+    "llvm.s390.vperm" => "__builtin_s390_vperm",
+    "llvm.s390.vpklsf" => "__builtin_s390_vpklsf",
+    "llvm.s390.vpklsg" => "__builtin_s390_vpklsg",
+    "llvm.s390.vpklsh" => "__builtin_s390_vpklsh",
+    "llvm.s390.vpksf" => "__builtin_s390_vpksf",
+    "llvm.s390.vpksg" => "__builtin_s390_vpksg",
+    "llvm.s390.vpksh" => "__builtin_s390_vpksh",
+    "llvm.s390.vsbcbiq" => "__builtin_s390_vsbcbiq",
+    "llvm.s390.vsbiq" => "__builtin_s390_vsbiq",
+    "llvm.s390.vscbib" => "__builtin_s390_vscbib",
+    "llvm.s390.vscbif" => "__builtin_s390_vscbif",
+    "llvm.s390.vscbig" => "__builtin_s390_vscbig",
+    "llvm.s390.vscbih" => "__builtin_s390_vscbih",
+    "llvm.s390.vscbiq" => "__builtin_s390_vscbiq",
+    "llvm.s390.vsl" => "__builtin_s390_vsl",
+    "llvm.s390.vslb" => "__builtin_s390_vslb",
+    "llvm.s390.vsld" => "__builtin_s390_vsld",
+    "llvm.s390.vsldb" => "__builtin_s390_vsldb",
+    "llvm.s390.vsq" => "__builtin_s390_vsq",
+    "llvm.s390.vsra" => "__builtin_s390_vsra",
+    "llvm.s390.vsrab" => "__builtin_s390_vsrab",
+    "llvm.s390.vsrd" => "__builtin_s390_vsrd",
+    "llvm.s390.vsrl" => "__builtin_s390_vsrl",
+    "llvm.s390.vsrlb" => "__builtin_s390_vsrlb",
+    "llvm.s390.vstl" => "__builtin_s390_vstl",
+    "llvm.s390.vstrcb" => "__builtin_s390_vstrcb",
+    "llvm.s390.vstrcf" => "__builtin_s390_vstrcf",
+    "llvm.s390.vstrch" => "__builtin_s390_vstrch",
+    "llvm.s390.vstrczb" => "__builtin_s390_vstrczb",
+    "llvm.s390.vstrczf" => "__builtin_s390_vstrczf",
+    "llvm.s390.vstrczh" => "__builtin_s390_vstrczh",
+    "llvm.s390.vstrl" => "__builtin_s390_vstrlr",
+    "llvm.s390.vsumb" => "__builtin_s390_vsumb",
+    "llvm.s390.vsumgf" => "__builtin_s390_vsumgf",
+    "llvm.s390.vsumgh" => "__builtin_s390_vsumgh",
+    "llvm.s390.vsumh" => "__builtin_s390_vsumh",
+    "llvm.s390.vsumqf" => "__builtin_s390_vsumqf",
+    "llvm.s390.vsumqg" => "__builtin_s390_vsumqg",
+    "llvm.s390.vtm" => "__builtin_s390_vtm",
+    "llvm.s390.vuphb" => "__builtin_s390_vuphb",
+    "llvm.s390.vuphf" => "__builtin_s390_vuphf",
+    "llvm.s390.vuphh" => "__builtin_s390_vuphh",
+    "llvm.s390.vuplb" => "__builtin_s390_vuplb",
+    "llvm.s390.vuplf" => "__builtin_s390_vuplf",
+    "llvm.s390.vuplhb" => "__builtin_s390_vuplhb",
+    "llvm.s390.vuplhf" => "__builtin_s390_vuplhf",
+    "llvm.s390.vuplhh" => "__builtin_s390_vuplhh",
+    "llvm.s390.vuplhw" => "__builtin_s390_vuplhw",
+    "llvm.s390.vupllb" => "__builtin_s390_vupllb",
+    "llvm.s390.vupllf" => "__builtin_s390_vupllf",
+    "llvm.s390.vupllh" => "__builtin_s390_vupllh",
+    // spv
+    "llvm.spv.create.handle" => "__builtin_hlsl_create_handle",
+    // ve
+    "llvm.ve.vl.andm.MMM" => "__builtin_ve_vl_andm_MMM",
+    "llvm.ve.vl.andm.mmm" => "__builtin_ve_vl_andm_mmm",
+    "llvm.ve.vl.eqvm.MMM" => "__builtin_ve_vl_eqvm_MMM",
+    "llvm.ve.vl.eqvm.mmm" => "__builtin_ve_vl_eqvm_mmm",
+    "llvm.ve.vl.extract.vm512l" => "__builtin_ve_vl_extract_vm512l",
+    "llvm.ve.vl.extract.vm512u" => "__builtin_ve_vl_extract_vm512u",
+    "llvm.ve.vl.fencec.s" => "__builtin_ve_vl_fencec_s",
+    "llvm.ve.vl.fencei" => "__builtin_ve_vl_fencei",
+    "llvm.ve.vl.fencem.s" => "__builtin_ve_vl_fencem_s",
+    "llvm.ve.vl.fidcr.sss" => "__builtin_ve_vl_fidcr_sss",
+    "llvm.ve.vl.insert.vm512l" => "__builtin_ve_vl_insert_vm512l",
+    "llvm.ve.vl.insert.vm512u" => "__builtin_ve_vl_insert_vm512u",
+    "llvm.ve.vl.lcr.sss" => "__builtin_ve_vl_lcr_sss",
+    "llvm.ve.vl.lsv.vvss" => "__builtin_ve_vl_lsv_vvss",
+    "llvm.ve.vl.lvm.MMss" => "__builtin_ve_vl_lvm_MMss",
+    "llvm.ve.vl.lvm.mmss" => "__builtin_ve_vl_lvm_mmss",
+    "llvm.ve.vl.lvsd.svs" => "__builtin_ve_vl_lvsd_svs",
+    "llvm.ve.vl.lvsl.svs" => "__builtin_ve_vl_lvsl_svs",
+    "llvm.ve.vl.lvss.svs" => "__builtin_ve_vl_lvss_svs",
+    "llvm.ve.vl.lzvm.sml" => "__builtin_ve_vl_lzvm_sml",
+    "llvm.ve.vl.negm.MM" => "__builtin_ve_vl_negm_MM",
+    "llvm.ve.vl.negm.mm" => "__builtin_ve_vl_negm_mm",
+    "llvm.ve.vl.nndm.MMM" => "__builtin_ve_vl_nndm_MMM",
+    "llvm.ve.vl.nndm.mmm" => "__builtin_ve_vl_nndm_mmm",
+    "llvm.ve.vl.orm.MMM" => "__builtin_ve_vl_orm_MMM",
+    "llvm.ve.vl.orm.mmm" => "__builtin_ve_vl_orm_mmm",
+    "llvm.ve.vl.pack.f32a" => "__builtin_ve_vl_pack_f32a",
+    "llvm.ve.vl.pack.f32p" => "__builtin_ve_vl_pack_f32p",
+    "llvm.ve.vl.pcvm.sml" => "__builtin_ve_vl_pcvm_sml",
+    "llvm.ve.vl.pfchv.ssl" => "__builtin_ve_vl_pfchv_ssl",
+    "llvm.ve.vl.pfchvnc.ssl" => "__builtin_ve_vl_pfchvnc_ssl",
+    "llvm.ve.vl.pvadds.vsvMvl" => "__builtin_ve_vl_pvadds_vsvMvl",
+    "llvm.ve.vl.pvadds.vsvl" => "__builtin_ve_vl_pvadds_vsvl",
+    "llvm.ve.vl.pvadds.vsvvl" => "__builtin_ve_vl_pvadds_vsvvl",
+    "llvm.ve.vl.pvadds.vvvMvl" => "__builtin_ve_vl_pvadds_vvvMvl",
+    "llvm.ve.vl.pvadds.vvvl" => "__builtin_ve_vl_pvadds_vvvl",
+    "llvm.ve.vl.pvadds.vvvvl" => "__builtin_ve_vl_pvadds_vvvvl",
+    "llvm.ve.vl.pvaddu.vsvMvl" => "__builtin_ve_vl_pvaddu_vsvMvl",
+    "llvm.ve.vl.pvaddu.vsvl" => "__builtin_ve_vl_pvaddu_vsvl",
+    "llvm.ve.vl.pvaddu.vsvvl" => "__builtin_ve_vl_pvaddu_vsvvl",
+    "llvm.ve.vl.pvaddu.vvvMvl" => "__builtin_ve_vl_pvaddu_vvvMvl",
+    "llvm.ve.vl.pvaddu.vvvl" => "__builtin_ve_vl_pvaddu_vvvl",
+    "llvm.ve.vl.pvaddu.vvvvl" => "__builtin_ve_vl_pvaddu_vvvvl",
+    "llvm.ve.vl.pvand.vsvMvl" => "__builtin_ve_vl_pvand_vsvMvl",
+    "llvm.ve.vl.pvand.vsvl" => "__builtin_ve_vl_pvand_vsvl",
+    "llvm.ve.vl.pvand.vsvvl" => "__builtin_ve_vl_pvand_vsvvl",
+    "llvm.ve.vl.pvand.vvvMvl" => "__builtin_ve_vl_pvand_vvvMvl",
+    "llvm.ve.vl.pvand.vvvl" => "__builtin_ve_vl_pvand_vvvl",
+    "llvm.ve.vl.pvand.vvvvl" => "__builtin_ve_vl_pvand_vvvvl",
+    "llvm.ve.vl.pvbrd.vsMvl" => "__builtin_ve_vl_pvbrd_vsMvl",
+    "llvm.ve.vl.pvbrd.vsl" => "__builtin_ve_vl_pvbrd_vsl",
+    "llvm.ve.vl.pvbrd.vsvl" => "__builtin_ve_vl_pvbrd_vsvl",
+    "llvm.ve.vl.pvbrv.vvMvl" => "__builtin_ve_vl_pvbrv_vvMvl",
+    "llvm.ve.vl.pvbrv.vvl" => "__builtin_ve_vl_pvbrv_vvl",
+    "llvm.ve.vl.pvbrv.vvvl" => "__builtin_ve_vl_pvbrv_vvvl",
+    "llvm.ve.vl.pvbrvlo.vvl" => "__builtin_ve_vl_pvbrvlo_vvl",
+    "llvm.ve.vl.pvbrvlo.vvmvl" => "__builtin_ve_vl_pvbrvlo_vvmvl",
+    "llvm.ve.vl.pvbrvlo.vvvl" => "__builtin_ve_vl_pvbrvlo_vvvl",
+    "llvm.ve.vl.pvbrvup.vvl" => "__builtin_ve_vl_pvbrvup_vvl",
+    "llvm.ve.vl.pvbrvup.vvmvl" => "__builtin_ve_vl_pvbrvup_vvmvl",
+    "llvm.ve.vl.pvbrvup.vvvl" => "__builtin_ve_vl_pvbrvup_vvvl",
+    "llvm.ve.vl.pvcmps.vsvMvl" => "__builtin_ve_vl_pvcmps_vsvMvl",
+    "llvm.ve.vl.pvcmps.vsvl" => "__builtin_ve_vl_pvcmps_vsvl",
+    "llvm.ve.vl.pvcmps.vsvvl" => "__builtin_ve_vl_pvcmps_vsvvl",
+    "llvm.ve.vl.pvcmps.vvvMvl" => "__builtin_ve_vl_pvcmps_vvvMvl",
+    "llvm.ve.vl.pvcmps.vvvl" => "__builtin_ve_vl_pvcmps_vvvl",
+    "llvm.ve.vl.pvcmps.vvvvl" => "__builtin_ve_vl_pvcmps_vvvvl",
+    "llvm.ve.vl.pvcmpu.vsvMvl" => "__builtin_ve_vl_pvcmpu_vsvMvl",
+    "llvm.ve.vl.pvcmpu.vsvl" => "__builtin_ve_vl_pvcmpu_vsvl",
+    "llvm.ve.vl.pvcmpu.vsvvl" => "__builtin_ve_vl_pvcmpu_vsvvl",
+    "llvm.ve.vl.pvcmpu.vvvMvl" => "__builtin_ve_vl_pvcmpu_vvvMvl",
+    "llvm.ve.vl.pvcmpu.vvvl" => "__builtin_ve_vl_pvcmpu_vvvl",
+    "llvm.ve.vl.pvcmpu.vvvvl" => "__builtin_ve_vl_pvcmpu_vvvvl",
+    "llvm.ve.vl.pvcvtsw.vvl" => "__builtin_ve_vl_pvcvtsw_vvl",
+    "llvm.ve.vl.pvcvtsw.vvvl" => "__builtin_ve_vl_pvcvtsw_vvvl",
+    "llvm.ve.vl.pvcvtws.vvMvl" => "__builtin_ve_vl_pvcvtws_vvMvl",
+    "llvm.ve.vl.pvcvtws.vvl" => "__builtin_ve_vl_pvcvtws_vvl",
+    "llvm.ve.vl.pvcvtws.vvvl" => "__builtin_ve_vl_pvcvtws_vvvl",
+    "llvm.ve.vl.pvcvtwsrz.vvMvl" => "__builtin_ve_vl_pvcvtwsrz_vvMvl",
+    "llvm.ve.vl.pvcvtwsrz.vvl" => "__builtin_ve_vl_pvcvtwsrz_vvl",
+    "llvm.ve.vl.pvcvtwsrz.vvvl" => "__builtin_ve_vl_pvcvtwsrz_vvvl",
+    "llvm.ve.vl.pveqv.vsvMvl" => "__builtin_ve_vl_pveqv_vsvMvl",
+    "llvm.ve.vl.pveqv.vsvl" => "__builtin_ve_vl_pveqv_vsvl",
+    "llvm.ve.vl.pveqv.vsvvl" => "__builtin_ve_vl_pveqv_vsvvl",
+    "llvm.ve.vl.pveqv.vvvMvl" => "__builtin_ve_vl_pveqv_vvvMvl",
+    "llvm.ve.vl.pveqv.vvvl" => "__builtin_ve_vl_pveqv_vvvl",
+    "llvm.ve.vl.pveqv.vvvvl" => "__builtin_ve_vl_pveqv_vvvvl",
+    "llvm.ve.vl.pvfadd.vsvMvl" => "__builtin_ve_vl_pvfadd_vsvMvl",
+    "llvm.ve.vl.pvfadd.vsvl" => "__builtin_ve_vl_pvfadd_vsvl",
+    "llvm.ve.vl.pvfadd.vsvvl" => "__builtin_ve_vl_pvfadd_vsvvl",
+    "llvm.ve.vl.pvfadd.vvvMvl" => "__builtin_ve_vl_pvfadd_vvvMvl",
+    "llvm.ve.vl.pvfadd.vvvl" => "__builtin_ve_vl_pvfadd_vvvl",
+    "llvm.ve.vl.pvfadd.vvvvl" => "__builtin_ve_vl_pvfadd_vvvvl",
+    "llvm.ve.vl.pvfcmp.vsvMvl" => "__builtin_ve_vl_pvfcmp_vsvMvl",
+    "llvm.ve.vl.pvfcmp.vsvl" => "__builtin_ve_vl_pvfcmp_vsvl",
+    "llvm.ve.vl.pvfcmp.vsvvl" => "__builtin_ve_vl_pvfcmp_vsvvl",
+    "llvm.ve.vl.pvfcmp.vvvMvl" => "__builtin_ve_vl_pvfcmp_vvvMvl",
+    "llvm.ve.vl.pvfcmp.vvvl" => "__builtin_ve_vl_pvfcmp_vvvl",
+    "llvm.ve.vl.pvfcmp.vvvvl" => "__builtin_ve_vl_pvfcmp_vvvvl",
+    "llvm.ve.vl.pvfmad.vsvvMvl" => "__builtin_ve_vl_pvfmad_vsvvMvl",
+    "llvm.ve.vl.pvfmad.vsvvl" => "__builtin_ve_vl_pvfmad_vsvvl",
+    "llvm.ve.vl.pvfmad.vsvvvl" => "__builtin_ve_vl_pvfmad_vsvvvl",
+    "llvm.ve.vl.pvfmad.vvsvMvl" => "__builtin_ve_vl_pvfmad_vvsvMvl",
+    "llvm.ve.vl.pvfmad.vvsvl" => "__builtin_ve_vl_pvfmad_vvsvl",
+    "llvm.ve.vl.pvfmad.vvsvvl" => "__builtin_ve_vl_pvfmad_vvsvvl",
+    "llvm.ve.vl.pvfmad.vvvvMvl" => "__builtin_ve_vl_pvfmad_vvvvMvl",
+    "llvm.ve.vl.pvfmad.vvvvl" => "__builtin_ve_vl_pvfmad_vvvvl",
+    "llvm.ve.vl.pvfmad.vvvvvl" => "__builtin_ve_vl_pvfmad_vvvvvl",
+    "llvm.ve.vl.pvfmax.vsvMvl" => "__builtin_ve_vl_pvfmax_vsvMvl",
+    "llvm.ve.vl.pvfmax.vsvl" => "__builtin_ve_vl_pvfmax_vsvl",
+    "llvm.ve.vl.pvfmax.vsvvl" => "__builtin_ve_vl_pvfmax_vsvvl",
+    "llvm.ve.vl.pvfmax.vvvMvl" => "__builtin_ve_vl_pvfmax_vvvMvl",
+    "llvm.ve.vl.pvfmax.vvvl" => "__builtin_ve_vl_pvfmax_vvvl",
+    "llvm.ve.vl.pvfmax.vvvvl" => "__builtin_ve_vl_pvfmax_vvvvl",
+    "llvm.ve.vl.pvfmin.vsvMvl" => "__builtin_ve_vl_pvfmin_vsvMvl",
+    "llvm.ve.vl.pvfmin.vsvl" => "__builtin_ve_vl_pvfmin_vsvl",
+    "llvm.ve.vl.pvfmin.vsvvl" => "__builtin_ve_vl_pvfmin_vsvvl",
+    "llvm.ve.vl.pvfmin.vvvMvl" => "__builtin_ve_vl_pvfmin_vvvMvl",
+    "llvm.ve.vl.pvfmin.vvvl" => "__builtin_ve_vl_pvfmin_vvvl",
+    "llvm.ve.vl.pvfmin.vvvvl" => "__builtin_ve_vl_pvfmin_vvvvl",
+    "llvm.ve.vl.pvfmkaf.Ml" => "__builtin_ve_vl_pvfmkaf_Ml",
+    "llvm.ve.vl.pvfmkat.Ml" => "__builtin_ve_vl_pvfmkat_Ml",
+    "llvm.ve.vl.pvfmkseq.MvMl" => "__builtin_ve_vl_pvfmkseq_MvMl",
+    "llvm.ve.vl.pvfmkseq.Mvl" => "__builtin_ve_vl_pvfmkseq_Mvl",
+    "llvm.ve.vl.pvfmkseqnan.MvMl" => "__builtin_ve_vl_pvfmkseqnan_MvMl",
+    "llvm.ve.vl.pvfmkseqnan.Mvl" => "__builtin_ve_vl_pvfmkseqnan_Mvl",
+    "llvm.ve.vl.pvfmksge.MvMl" => "__builtin_ve_vl_pvfmksge_MvMl",
+    "llvm.ve.vl.pvfmksge.Mvl" => "__builtin_ve_vl_pvfmksge_Mvl",
+    "llvm.ve.vl.pvfmksgenan.MvMl" => "__builtin_ve_vl_pvfmksgenan_MvMl",
+    "llvm.ve.vl.pvfmksgenan.Mvl" => "__builtin_ve_vl_pvfmksgenan_Mvl",
+    "llvm.ve.vl.pvfmksgt.MvMl" => "__builtin_ve_vl_pvfmksgt_MvMl",
+    "llvm.ve.vl.pvfmksgt.Mvl" => "__builtin_ve_vl_pvfmksgt_Mvl",
+    "llvm.ve.vl.pvfmksgtnan.MvMl" => "__builtin_ve_vl_pvfmksgtnan_MvMl",
+    "llvm.ve.vl.pvfmksgtnan.Mvl" => "__builtin_ve_vl_pvfmksgtnan_Mvl",
+    "llvm.ve.vl.pvfmksle.MvMl" => "__builtin_ve_vl_pvfmksle_MvMl",
+    "llvm.ve.vl.pvfmksle.Mvl" => "__builtin_ve_vl_pvfmksle_Mvl",
+    "llvm.ve.vl.pvfmkslenan.MvMl" => "__builtin_ve_vl_pvfmkslenan_MvMl",
+    "llvm.ve.vl.pvfmkslenan.Mvl" => "__builtin_ve_vl_pvfmkslenan_Mvl",
+    "llvm.ve.vl.pvfmksloeq.mvl" => "__builtin_ve_vl_pvfmksloeq_mvl",
+    "llvm.ve.vl.pvfmksloeq.mvml" => "__builtin_ve_vl_pvfmksloeq_mvml",
+    "llvm.ve.vl.pvfmksloeqnan.mvl" => "__builtin_ve_vl_pvfmksloeqnan_mvl",
+    "llvm.ve.vl.pvfmksloeqnan.mvml" => "__builtin_ve_vl_pvfmksloeqnan_mvml",
+    "llvm.ve.vl.pvfmksloge.mvl" => "__builtin_ve_vl_pvfmksloge_mvl",
+    "llvm.ve.vl.pvfmksloge.mvml" => "__builtin_ve_vl_pvfmksloge_mvml",
+    "llvm.ve.vl.pvfmkslogenan.mvl" => "__builtin_ve_vl_pvfmkslogenan_mvl",
+    "llvm.ve.vl.pvfmkslogenan.mvml" => "__builtin_ve_vl_pvfmkslogenan_mvml",
+    "llvm.ve.vl.pvfmkslogt.mvl" => "__builtin_ve_vl_pvfmkslogt_mvl",
+    "llvm.ve.vl.pvfmkslogt.mvml" => "__builtin_ve_vl_pvfmkslogt_mvml",
+    "llvm.ve.vl.pvfmkslogtnan.mvl" => "__builtin_ve_vl_pvfmkslogtnan_mvl",
+    "llvm.ve.vl.pvfmkslogtnan.mvml" => "__builtin_ve_vl_pvfmkslogtnan_mvml",
+    "llvm.ve.vl.pvfmkslole.mvl" => "__builtin_ve_vl_pvfmkslole_mvl",
+    "llvm.ve.vl.pvfmkslole.mvml" => "__builtin_ve_vl_pvfmkslole_mvml",
+    "llvm.ve.vl.pvfmkslolenan.mvl" => "__builtin_ve_vl_pvfmkslolenan_mvl",
+    "llvm.ve.vl.pvfmkslolenan.mvml" => "__builtin_ve_vl_pvfmkslolenan_mvml",
+    "llvm.ve.vl.pvfmkslolt.mvl" => "__builtin_ve_vl_pvfmkslolt_mvl",
+    "llvm.ve.vl.pvfmkslolt.mvml" => "__builtin_ve_vl_pvfmkslolt_mvml",
+    "llvm.ve.vl.pvfmksloltnan.mvl" => "__builtin_ve_vl_pvfmksloltnan_mvl",
+    "llvm.ve.vl.pvfmksloltnan.mvml" => "__builtin_ve_vl_pvfmksloltnan_mvml",
+    "llvm.ve.vl.pvfmkslonan.mvl" => "__builtin_ve_vl_pvfmkslonan_mvl",
+    "llvm.ve.vl.pvfmkslonan.mvml" => "__builtin_ve_vl_pvfmkslonan_mvml",
+    "llvm.ve.vl.pvfmkslone.mvl" => "__builtin_ve_vl_pvfmkslone_mvl",
+    "llvm.ve.vl.pvfmkslone.mvml" => "__builtin_ve_vl_pvfmkslone_mvml",
+    "llvm.ve.vl.pvfmkslonenan.mvl" => "__builtin_ve_vl_pvfmkslonenan_mvl",
+    "llvm.ve.vl.pvfmkslonenan.mvml" => "__builtin_ve_vl_pvfmkslonenan_mvml",
+    "llvm.ve.vl.pvfmkslonum.mvl" => "__builtin_ve_vl_pvfmkslonum_mvl",
+    "llvm.ve.vl.pvfmkslonum.mvml" => "__builtin_ve_vl_pvfmkslonum_mvml",
+    "llvm.ve.vl.pvfmkslt.MvMl" => "__builtin_ve_vl_pvfmkslt_MvMl",
+    "llvm.ve.vl.pvfmkslt.Mvl" => "__builtin_ve_vl_pvfmkslt_Mvl",
+    "llvm.ve.vl.pvfmksltnan.MvMl" => "__builtin_ve_vl_pvfmksltnan_MvMl",
+    "llvm.ve.vl.pvfmksltnan.Mvl" => "__builtin_ve_vl_pvfmksltnan_Mvl",
+    "llvm.ve.vl.pvfmksnan.MvMl" => "__builtin_ve_vl_pvfmksnan_MvMl",
+    "llvm.ve.vl.pvfmksnan.Mvl" => "__builtin_ve_vl_pvfmksnan_Mvl",
+    "llvm.ve.vl.pvfmksne.MvMl" => "__builtin_ve_vl_pvfmksne_MvMl",
+    "llvm.ve.vl.pvfmksne.Mvl" => "__builtin_ve_vl_pvfmksne_Mvl",
+    "llvm.ve.vl.pvfmksnenan.MvMl" => "__builtin_ve_vl_pvfmksnenan_MvMl",
+    "llvm.ve.vl.pvfmksnenan.Mvl" => "__builtin_ve_vl_pvfmksnenan_Mvl",
+    "llvm.ve.vl.pvfmksnum.MvMl" => "__builtin_ve_vl_pvfmksnum_MvMl",
+    "llvm.ve.vl.pvfmksnum.Mvl" => "__builtin_ve_vl_pvfmksnum_Mvl",
+    "llvm.ve.vl.pvfmksupeq.mvl" => "__builtin_ve_vl_pvfmksupeq_mvl",
+    "llvm.ve.vl.pvfmksupeq.mvml" => "__builtin_ve_vl_pvfmksupeq_mvml",
+    "llvm.ve.vl.pvfmksupeqnan.mvl" => "__builtin_ve_vl_pvfmksupeqnan_mvl",
+    "llvm.ve.vl.pvfmksupeqnan.mvml" => "__builtin_ve_vl_pvfmksupeqnan_mvml",
+    "llvm.ve.vl.pvfmksupge.mvl" => "__builtin_ve_vl_pvfmksupge_mvl",
+    "llvm.ve.vl.pvfmksupge.mvml" => "__builtin_ve_vl_pvfmksupge_mvml",
+    "llvm.ve.vl.pvfmksupgenan.mvl" => "__builtin_ve_vl_pvfmksupgenan_mvl",
+    "llvm.ve.vl.pvfmksupgenan.mvml" => "__builtin_ve_vl_pvfmksupgenan_mvml",
+    "llvm.ve.vl.pvfmksupgt.mvl" => "__builtin_ve_vl_pvfmksupgt_mvl",
+    "llvm.ve.vl.pvfmksupgt.mvml" => "__builtin_ve_vl_pvfmksupgt_mvml",
+    "llvm.ve.vl.pvfmksupgtnan.mvl" => "__builtin_ve_vl_pvfmksupgtnan_mvl",
+    "llvm.ve.vl.pvfmksupgtnan.mvml" => "__builtin_ve_vl_pvfmksupgtnan_mvml",
+    "llvm.ve.vl.pvfmksuple.mvl" => "__builtin_ve_vl_pvfmksuple_mvl",
+    "llvm.ve.vl.pvfmksuple.mvml" => "__builtin_ve_vl_pvfmksuple_mvml",
+    "llvm.ve.vl.pvfmksuplenan.mvl" => "__builtin_ve_vl_pvfmksuplenan_mvl",
+    "llvm.ve.vl.pvfmksuplenan.mvml" => "__builtin_ve_vl_pvfmksuplenan_mvml",
+    "llvm.ve.vl.pvfmksuplt.mvl" => "__builtin_ve_vl_pvfmksuplt_mvl",
+    "llvm.ve.vl.pvfmksuplt.mvml" => "__builtin_ve_vl_pvfmksuplt_mvml",
+    "llvm.ve.vl.pvfmksupltnan.mvl" => "__builtin_ve_vl_pvfmksupltnan_mvl",
+    "llvm.ve.vl.pvfmksupltnan.mvml" => "__builtin_ve_vl_pvfmksupltnan_mvml",
+    "llvm.ve.vl.pvfmksupnan.mvl" => "__builtin_ve_vl_pvfmksupnan_mvl",
+    "llvm.ve.vl.pvfmksupnan.mvml" => "__builtin_ve_vl_pvfmksupnan_mvml",
+    "llvm.ve.vl.pvfmksupne.mvl" => "__builtin_ve_vl_pvfmksupne_mvl",
+    "llvm.ve.vl.pvfmksupne.mvml" => "__builtin_ve_vl_pvfmksupne_mvml",
+    "llvm.ve.vl.pvfmksupnenan.mvl" => "__builtin_ve_vl_pvfmksupnenan_mvl",
+    "llvm.ve.vl.pvfmksupnenan.mvml" => "__builtin_ve_vl_pvfmksupnenan_mvml",
+    "llvm.ve.vl.pvfmksupnum.mvl" => "__builtin_ve_vl_pvfmksupnum_mvl",
+    "llvm.ve.vl.pvfmksupnum.mvml" => "__builtin_ve_vl_pvfmksupnum_mvml",
+    "llvm.ve.vl.pvfmkweq.MvMl" => "__builtin_ve_vl_pvfmkweq_MvMl",
+    "llvm.ve.vl.pvfmkweq.Mvl" => "__builtin_ve_vl_pvfmkweq_Mvl",
+    "llvm.ve.vl.pvfmkweqnan.MvMl" => "__builtin_ve_vl_pvfmkweqnan_MvMl",
+    "llvm.ve.vl.pvfmkweqnan.Mvl" => "__builtin_ve_vl_pvfmkweqnan_Mvl",
+    "llvm.ve.vl.pvfmkwge.MvMl" => "__builtin_ve_vl_pvfmkwge_MvMl",
+    "llvm.ve.vl.pvfmkwge.Mvl" => "__builtin_ve_vl_pvfmkwge_Mvl",
+    "llvm.ve.vl.pvfmkwgenan.MvMl" => "__builtin_ve_vl_pvfmkwgenan_MvMl",
+    "llvm.ve.vl.pvfmkwgenan.Mvl" => "__builtin_ve_vl_pvfmkwgenan_Mvl",
+    "llvm.ve.vl.pvfmkwgt.MvMl" => "__builtin_ve_vl_pvfmkwgt_MvMl",
+    "llvm.ve.vl.pvfmkwgt.Mvl" => "__builtin_ve_vl_pvfmkwgt_Mvl",
+    "llvm.ve.vl.pvfmkwgtnan.MvMl" => "__builtin_ve_vl_pvfmkwgtnan_MvMl",
+    "llvm.ve.vl.pvfmkwgtnan.Mvl" => "__builtin_ve_vl_pvfmkwgtnan_Mvl",
+    "llvm.ve.vl.pvfmkwle.MvMl" => "__builtin_ve_vl_pvfmkwle_MvMl",
+    "llvm.ve.vl.pvfmkwle.Mvl" => "__builtin_ve_vl_pvfmkwle_Mvl",
+    "llvm.ve.vl.pvfmkwlenan.MvMl" => "__builtin_ve_vl_pvfmkwlenan_MvMl",
+    "llvm.ve.vl.pvfmkwlenan.Mvl" => "__builtin_ve_vl_pvfmkwlenan_Mvl",
+    "llvm.ve.vl.pvfmkwloeq.mvl" => "__builtin_ve_vl_pvfmkwloeq_mvl",
+    "llvm.ve.vl.pvfmkwloeq.mvml" => "__builtin_ve_vl_pvfmkwloeq_mvml",
+    "llvm.ve.vl.pvfmkwloeqnan.mvl" => "__builtin_ve_vl_pvfmkwloeqnan_mvl",
+    "llvm.ve.vl.pvfmkwloeqnan.mvml" => "__builtin_ve_vl_pvfmkwloeqnan_mvml",
+    "llvm.ve.vl.pvfmkwloge.mvl" => "__builtin_ve_vl_pvfmkwloge_mvl",
+    "llvm.ve.vl.pvfmkwloge.mvml" => "__builtin_ve_vl_pvfmkwloge_mvml",
+    "llvm.ve.vl.pvfmkwlogenan.mvl" => "__builtin_ve_vl_pvfmkwlogenan_mvl",
+    "llvm.ve.vl.pvfmkwlogenan.mvml" => "__builtin_ve_vl_pvfmkwlogenan_mvml",
+    "llvm.ve.vl.pvfmkwlogt.mvl" => "__builtin_ve_vl_pvfmkwlogt_mvl",
+    "llvm.ve.vl.pvfmkwlogt.mvml" => "__builtin_ve_vl_pvfmkwlogt_mvml",
+    "llvm.ve.vl.pvfmkwlogtnan.mvl" => "__builtin_ve_vl_pvfmkwlogtnan_mvl",
+    "llvm.ve.vl.pvfmkwlogtnan.mvml" => "__builtin_ve_vl_pvfmkwlogtnan_mvml",
+    "llvm.ve.vl.pvfmkwlole.mvl" => "__builtin_ve_vl_pvfmkwlole_mvl",
+    "llvm.ve.vl.pvfmkwlole.mvml" => "__builtin_ve_vl_pvfmkwlole_mvml",
+    "llvm.ve.vl.pvfmkwlolenan.mvl" => "__builtin_ve_vl_pvfmkwlolenan_mvl",
+    "llvm.ve.vl.pvfmkwlolenan.mvml" => "__builtin_ve_vl_pvfmkwlolenan_mvml",
+    "llvm.ve.vl.pvfmkwlolt.mvl" => "__builtin_ve_vl_pvfmkwlolt_mvl",
+    "llvm.ve.vl.pvfmkwlolt.mvml" => "__builtin_ve_vl_pvfmkwlolt_mvml",
+    "llvm.ve.vl.pvfmkwloltnan.mvl" => "__builtin_ve_vl_pvfmkwloltnan_mvl",
+    "llvm.ve.vl.pvfmkwloltnan.mvml" => "__builtin_ve_vl_pvfmkwloltnan_mvml",
+    "llvm.ve.vl.pvfmkwlonan.mvl" => "__builtin_ve_vl_pvfmkwlonan_mvl",
+    "llvm.ve.vl.pvfmkwlonan.mvml" => "__builtin_ve_vl_pvfmkwlonan_mvml",
+    "llvm.ve.vl.pvfmkwlone.mvl" => "__builtin_ve_vl_pvfmkwlone_mvl",
+    "llvm.ve.vl.pvfmkwlone.mvml" => "__builtin_ve_vl_pvfmkwlone_mvml",
+    "llvm.ve.vl.pvfmkwlonenan.mvl" => "__builtin_ve_vl_pvfmkwlonenan_mvl",
+    "llvm.ve.vl.pvfmkwlonenan.mvml" => "__builtin_ve_vl_pvfmkwlonenan_mvml",
+    "llvm.ve.vl.pvfmkwlonum.mvl" => "__builtin_ve_vl_pvfmkwlonum_mvl",
+    "llvm.ve.vl.pvfmkwlonum.mvml" => "__builtin_ve_vl_pvfmkwlonum_mvml",
+    "llvm.ve.vl.pvfmkwlt.MvMl" => "__builtin_ve_vl_pvfmkwlt_MvMl",
+    "llvm.ve.vl.pvfmkwlt.Mvl" => "__builtin_ve_vl_pvfmkwlt_Mvl",
+    "llvm.ve.vl.pvfmkwltnan.MvMl" => "__builtin_ve_vl_pvfmkwltnan_MvMl",
+    "llvm.ve.vl.pvfmkwltnan.Mvl" => "__builtin_ve_vl_pvfmkwltnan_Mvl",
+    "llvm.ve.vl.pvfmkwnan.MvMl" => "__builtin_ve_vl_pvfmkwnan_MvMl",
+    "llvm.ve.vl.pvfmkwnan.Mvl" => "__builtin_ve_vl_pvfmkwnan_Mvl",
+    "llvm.ve.vl.pvfmkwne.MvMl" => "__builtin_ve_vl_pvfmkwne_MvMl",
+    "llvm.ve.vl.pvfmkwne.Mvl" => "__builtin_ve_vl_pvfmkwne_Mvl",
+    "llvm.ve.vl.pvfmkwnenan.MvMl" => "__builtin_ve_vl_pvfmkwnenan_MvMl",
+    "llvm.ve.vl.pvfmkwnenan.Mvl" => "__builtin_ve_vl_pvfmkwnenan_Mvl",
+    "llvm.ve.vl.pvfmkwnum.MvMl" => "__builtin_ve_vl_pvfmkwnum_MvMl",
+    "llvm.ve.vl.pvfmkwnum.Mvl" => "__builtin_ve_vl_pvfmkwnum_Mvl",
+    "llvm.ve.vl.pvfmkwupeq.mvl" => "__builtin_ve_vl_pvfmkwupeq_mvl",
+    "llvm.ve.vl.pvfmkwupeq.mvml" => "__builtin_ve_vl_pvfmkwupeq_mvml",
+    "llvm.ve.vl.pvfmkwupeqnan.mvl" => "__builtin_ve_vl_pvfmkwupeqnan_mvl",
+    "llvm.ve.vl.pvfmkwupeqnan.mvml" => "__builtin_ve_vl_pvfmkwupeqnan_mvml",
+    "llvm.ve.vl.pvfmkwupge.mvl" => "__builtin_ve_vl_pvfmkwupge_mvl",
+    "llvm.ve.vl.pvfmkwupge.mvml" => "__builtin_ve_vl_pvfmkwupge_mvml",
+    "llvm.ve.vl.pvfmkwupgenan.mvl" => "__builtin_ve_vl_pvfmkwupgenan_mvl",
+    "llvm.ve.vl.pvfmkwupgenan.mvml" => "__builtin_ve_vl_pvfmkwupgenan_mvml",
+    "llvm.ve.vl.pvfmkwupgt.mvl" => "__builtin_ve_vl_pvfmkwupgt_mvl",
+    "llvm.ve.vl.pvfmkwupgt.mvml" => "__builtin_ve_vl_pvfmkwupgt_mvml",
+    "llvm.ve.vl.pvfmkwupgtnan.mvl" => "__builtin_ve_vl_pvfmkwupgtnan_mvl",
+    "llvm.ve.vl.pvfmkwupgtnan.mvml" => "__builtin_ve_vl_pvfmkwupgtnan_mvml",
+    "llvm.ve.vl.pvfmkwuple.mvl" => "__builtin_ve_vl_pvfmkwuple_mvl",
+    "llvm.ve.vl.pvfmkwuple.mvml" => "__builtin_ve_vl_pvfmkwuple_mvml",
+    "llvm.ve.vl.pvfmkwuplenan.mvl" => "__builtin_ve_vl_pvfmkwuplenan_mvl",
+    "llvm.ve.vl.pvfmkwuplenan.mvml" => "__builtin_ve_vl_pvfmkwuplenan_mvml",
+    "llvm.ve.vl.pvfmkwuplt.mvl" => "__builtin_ve_vl_pvfmkwuplt_mvl",
+    "llvm.ve.vl.pvfmkwuplt.mvml" => "__builtin_ve_vl_pvfmkwuplt_mvml",
+    "llvm.ve.vl.pvfmkwupltnan.mvl" => "__builtin_ve_vl_pvfmkwupltnan_mvl",
+    "llvm.ve.vl.pvfmkwupltnan.mvml" => "__builtin_ve_vl_pvfmkwupltnan_mvml",
+    "llvm.ve.vl.pvfmkwupnan.mvl" => "__builtin_ve_vl_pvfmkwupnan_mvl",
+    "llvm.ve.vl.pvfmkwupnan.mvml" => "__builtin_ve_vl_pvfmkwupnan_mvml",
+    "llvm.ve.vl.pvfmkwupne.mvl" => "__builtin_ve_vl_pvfmkwupne_mvl",
+    "llvm.ve.vl.pvfmkwupne.mvml" => "__builtin_ve_vl_pvfmkwupne_mvml",
+    "llvm.ve.vl.pvfmkwupnenan.mvl" => "__builtin_ve_vl_pvfmkwupnenan_mvl",
+    "llvm.ve.vl.pvfmkwupnenan.mvml" => "__builtin_ve_vl_pvfmkwupnenan_mvml",
+    "llvm.ve.vl.pvfmkwupnum.mvl" => "__builtin_ve_vl_pvfmkwupnum_mvl",
+    "llvm.ve.vl.pvfmkwupnum.mvml" => "__builtin_ve_vl_pvfmkwupnum_mvml",
+    "llvm.ve.vl.pvfmsb.vsvvMvl" => "__builtin_ve_vl_pvfmsb_vsvvMvl",
+    "llvm.ve.vl.pvfmsb.vsvvl" => "__builtin_ve_vl_pvfmsb_vsvvl",
+    "llvm.ve.vl.pvfmsb.vsvvvl" => "__builtin_ve_vl_pvfmsb_vsvvvl",
+    "llvm.ve.vl.pvfmsb.vvsvMvl" => "__builtin_ve_vl_pvfmsb_vvsvMvl",
+    "llvm.ve.vl.pvfmsb.vvsvl" => "__builtin_ve_vl_pvfmsb_vvsvl",
+    "llvm.ve.vl.pvfmsb.vvsvvl" => "__builtin_ve_vl_pvfmsb_vvsvvl",
+    "llvm.ve.vl.pvfmsb.vvvvMvl" => "__builtin_ve_vl_pvfmsb_vvvvMvl",
+    "llvm.ve.vl.pvfmsb.vvvvl" => "__builtin_ve_vl_pvfmsb_vvvvl",
+    "llvm.ve.vl.pvfmsb.vvvvvl" => "__builtin_ve_vl_pvfmsb_vvvvvl",
+    "llvm.ve.vl.pvfmul.vsvMvl" => "__builtin_ve_vl_pvfmul_vsvMvl",
+    "llvm.ve.vl.pvfmul.vsvl" => "__builtin_ve_vl_pvfmul_vsvl",
+    "llvm.ve.vl.pvfmul.vsvvl" => "__builtin_ve_vl_pvfmul_vsvvl",
+    "llvm.ve.vl.pvfmul.vvvMvl" => "__builtin_ve_vl_pvfmul_vvvMvl",
+    "llvm.ve.vl.pvfmul.vvvl" => "__builtin_ve_vl_pvfmul_vvvl",
+    "llvm.ve.vl.pvfmul.vvvvl" => "__builtin_ve_vl_pvfmul_vvvvl",
+    "llvm.ve.vl.pvfnmad.vsvvMvl" => "__builtin_ve_vl_pvfnmad_vsvvMvl",
+    "llvm.ve.vl.pvfnmad.vsvvl" => "__builtin_ve_vl_pvfnmad_vsvvl",
+    "llvm.ve.vl.pvfnmad.vsvvvl" => "__builtin_ve_vl_pvfnmad_vsvvvl",
+    "llvm.ve.vl.pvfnmad.vvsvMvl" => "__builtin_ve_vl_pvfnmad_vvsvMvl",
+    "llvm.ve.vl.pvfnmad.vvsvl" => "__builtin_ve_vl_pvfnmad_vvsvl",
+    "llvm.ve.vl.pvfnmad.vvsvvl" => "__builtin_ve_vl_pvfnmad_vvsvvl",
+    "llvm.ve.vl.pvfnmad.vvvvMvl" => "__builtin_ve_vl_pvfnmad_vvvvMvl",
+    "llvm.ve.vl.pvfnmad.vvvvl" => "__builtin_ve_vl_pvfnmad_vvvvl",
+    "llvm.ve.vl.pvfnmad.vvvvvl" => "__builtin_ve_vl_pvfnmad_vvvvvl",
+    "llvm.ve.vl.pvfnmsb.vsvvMvl" => "__builtin_ve_vl_pvfnmsb_vsvvMvl",
+    "llvm.ve.vl.pvfnmsb.vsvvl" => "__builtin_ve_vl_pvfnmsb_vsvvl",
+    "llvm.ve.vl.pvfnmsb.vsvvvl" => "__builtin_ve_vl_pvfnmsb_vsvvvl",
+    "llvm.ve.vl.pvfnmsb.vvsvMvl" => "__builtin_ve_vl_pvfnmsb_vvsvMvl",
+    "llvm.ve.vl.pvfnmsb.vvsvl" => "__builtin_ve_vl_pvfnmsb_vvsvl",
+    "llvm.ve.vl.pvfnmsb.vvsvvl" => "__builtin_ve_vl_pvfnmsb_vvsvvl",
+    "llvm.ve.vl.pvfnmsb.vvvvMvl" => "__builtin_ve_vl_pvfnmsb_vvvvMvl",
+    "llvm.ve.vl.pvfnmsb.vvvvl" => "__builtin_ve_vl_pvfnmsb_vvvvl",
+    "llvm.ve.vl.pvfnmsb.vvvvvl" => "__builtin_ve_vl_pvfnmsb_vvvvvl",
+    "llvm.ve.vl.pvfsub.vsvMvl" => "__builtin_ve_vl_pvfsub_vsvMvl",
+    "llvm.ve.vl.pvfsub.vsvl" => "__builtin_ve_vl_pvfsub_vsvl",
+    "llvm.ve.vl.pvfsub.vsvvl" => "__builtin_ve_vl_pvfsub_vsvvl",
+    "llvm.ve.vl.pvfsub.vvvMvl" => "__builtin_ve_vl_pvfsub_vvvMvl",
+    "llvm.ve.vl.pvfsub.vvvl" => "__builtin_ve_vl_pvfsub_vvvl",
+    "llvm.ve.vl.pvfsub.vvvvl" => "__builtin_ve_vl_pvfsub_vvvvl",
+    "llvm.ve.vl.pvldz.vvMvl" => "__builtin_ve_vl_pvldz_vvMvl",
+    "llvm.ve.vl.pvldz.vvl" => "__builtin_ve_vl_pvldz_vvl",
+    "llvm.ve.vl.pvldz.vvvl" => "__builtin_ve_vl_pvldz_vvvl",
+    "llvm.ve.vl.pvldzlo.vvl" => "__builtin_ve_vl_pvldzlo_vvl",
+    "llvm.ve.vl.pvldzlo.vvmvl" => "__builtin_ve_vl_pvldzlo_vvmvl",
+    "llvm.ve.vl.pvldzlo.vvvl" => "__builtin_ve_vl_pvldzlo_vvvl",
+    "llvm.ve.vl.pvldzup.vvl" => "__builtin_ve_vl_pvldzup_vvl",
+    "llvm.ve.vl.pvldzup.vvmvl" => "__builtin_ve_vl_pvldzup_vvmvl",
+    "llvm.ve.vl.pvldzup.vvvl" => "__builtin_ve_vl_pvldzup_vvvl",
+    "llvm.ve.vl.pvmaxs.vsvMvl" => "__builtin_ve_vl_pvmaxs_vsvMvl",
+    "llvm.ve.vl.pvmaxs.vsvl" => "__builtin_ve_vl_pvmaxs_vsvl",
+    "llvm.ve.vl.pvmaxs.vsvvl" => "__builtin_ve_vl_pvmaxs_vsvvl",
+    "llvm.ve.vl.pvmaxs.vvvMvl" => "__builtin_ve_vl_pvmaxs_vvvMvl",
+    "llvm.ve.vl.pvmaxs.vvvl" => "__builtin_ve_vl_pvmaxs_vvvl",
+    "llvm.ve.vl.pvmaxs.vvvvl" => "__builtin_ve_vl_pvmaxs_vvvvl",
+    "llvm.ve.vl.pvmins.vsvMvl" => "__builtin_ve_vl_pvmins_vsvMvl",
+    "llvm.ve.vl.pvmins.vsvl" => "__builtin_ve_vl_pvmins_vsvl",
+    "llvm.ve.vl.pvmins.vsvvl" => "__builtin_ve_vl_pvmins_vsvvl",
+    "llvm.ve.vl.pvmins.vvvMvl" => "__builtin_ve_vl_pvmins_vvvMvl",
+    "llvm.ve.vl.pvmins.vvvl" => "__builtin_ve_vl_pvmins_vvvl",
+    "llvm.ve.vl.pvmins.vvvvl" => "__builtin_ve_vl_pvmins_vvvvl",
+    "llvm.ve.vl.pvor.vsvMvl" => "__builtin_ve_vl_pvor_vsvMvl",
+    "llvm.ve.vl.pvor.vsvl" => "__builtin_ve_vl_pvor_vsvl",
+    "llvm.ve.vl.pvor.vsvvl" => "__builtin_ve_vl_pvor_vsvvl",
+    "llvm.ve.vl.pvor.vvvMvl" => "__builtin_ve_vl_pvor_vvvMvl",
+    "llvm.ve.vl.pvor.vvvl" => "__builtin_ve_vl_pvor_vvvl",
+    "llvm.ve.vl.pvor.vvvvl" => "__builtin_ve_vl_pvor_vvvvl",
+    "llvm.ve.vl.pvpcnt.vvMvl" => "__builtin_ve_vl_pvpcnt_vvMvl",
+    "llvm.ve.vl.pvpcnt.vvl" => "__builtin_ve_vl_pvpcnt_vvl",
+    "llvm.ve.vl.pvpcnt.vvvl" => "__builtin_ve_vl_pvpcnt_vvvl",
+    "llvm.ve.vl.pvpcntlo.vvl" => "__builtin_ve_vl_pvpcntlo_vvl",
+    "llvm.ve.vl.pvpcntlo.vvmvl" => "__builtin_ve_vl_pvpcntlo_vvmvl",
+    "llvm.ve.vl.pvpcntlo.vvvl" => "__builtin_ve_vl_pvpcntlo_vvvl",
+    "llvm.ve.vl.pvpcntup.vvl" => "__builtin_ve_vl_pvpcntup_vvl",
+    "llvm.ve.vl.pvpcntup.vvmvl" => "__builtin_ve_vl_pvpcntup_vvmvl",
+    "llvm.ve.vl.pvpcntup.vvvl" => "__builtin_ve_vl_pvpcntup_vvvl",
+    "llvm.ve.vl.pvrcp.vvl" => "__builtin_ve_vl_pvrcp_vvl",
+    "llvm.ve.vl.pvrcp.vvvl" => "__builtin_ve_vl_pvrcp_vvvl",
+    "llvm.ve.vl.pvrsqrt.vvl" => "__builtin_ve_vl_pvrsqrt_vvl",
+    "llvm.ve.vl.pvrsqrt.vvvl" => "__builtin_ve_vl_pvrsqrt_vvvl",
+    "llvm.ve.vl.pvrsqrtnex.vvl" => "__builtin_ve_vl_pvrsqrtnex_vvl",
+    "llvm.ve.vl.pvrsqrtnex.vvvl" => "__builtin_ve_vl_pvrsqrtnex_vvvl",
+    "llvm.ve.vl.pvseq.vl" => "__builtin_ve_vl_pvseq_vl",
+    "llvm.ve.vl.pvseq.vvl" => "__builtin_ve_vl_pvseq_vvl",
+    "llvm.ve.vl.pvseqlo.vl" => "__builtin_ve_vl_pvseqlo_vl",
+    "llvm.ve.vl.pvseqlo.vvl" => "__builtin_ve_vl_pvseqlo_vvl",
+    "llvm.ve.vl.pvsequp.vl" => "__builtin_ve_vl_pvsequp_vl",
+    "llvm.ve.vl.pvsequp.vvl" => "__builtin_ve_vl_pvsequp_vvl",
+    "llvm.ve.vl.pvsla.vvsMvl" => "__builtin_ve_vl_pvsla_vvsMvl",
+    "llvm.ve.vl.pvsla.vvsl" => "__builtin_ve_vl_pvsla_vvsl",
+    "llvm.ve.vl.pvsla.vvsvl" => "__builtin_ve_vl_pvsla_vvsvl",
+    "llvm.ve.vl.pvsla.vvvMvl" => "__builtin_ve_vl_pvsla_vvvMvl",
+    "llvm.ve.vl.pvsla.vvvl" => "__builtin_ve_vl_pvsla_vvvl",
+    "llvm.ve.vl.pvsla.vvvvl" => "__builtin_ve_vl_pvsla_vvvvl",
+    "llvm.ve.vl.pvsll.vvsMvl" => "__builtin_ve_vl_pvsll_vvsMvl",
+    "llvm.ve.vl.pvsll.vvsl" => "__builtin_ve_vl_pvsll_vvsl",
+    "llvm.ve.vl.pvsll.vvsvl" => "__builtin_ve_vl_pvsll_vvsvl",
+    "llvm.ve.vl.pvsll.vvvMvl" => "__builtin_ve_vl_pvsll_vvvMvl",
+    "llvm.ve.vl.pvsll.vvvl" => "__builtin_ve_vl_pvsll_vvvl",
+    "llvm.ve.vl.pvsll.vvvvl" => "__builtin_ve_vl_pvsll_vvvvl",
+    "llvm.ve.vl.pvsra.vvsMvl" => "__builtin_ve_vl_pvsra_vvsMvl",
+    "llvm.ve.vl.pvsra.vvsl" => "__builtin_ve_vl_pvsra_vvsl",
+    "llvm.ve.vl.pvsra.vvsvl" => "__builtin_ve_vl_pvsra_vvsvl",
+    "llvm.ve.vl.pvsra.vvvMvl" => "__builtin_ve_vl_pvsra_vvvMvl",
+    "llvm.ve.vl.pvsra.vvvl" => "__builtin_ve_vl_pvsra_vvvl",
+    "llvm.ve.vl.pvsra.vvvvl" => "__builtin_ve_vl_pvsra_vvvvl",
+    "llvm.ve.vl.pvsrl.vvsMvl" => "__builtin_ve_vl_pvsrl_vvsMvl",
+    "llvm.ve.vl.pvsrl.vvsl" => "__builtin_ve_vl_pvsrl_vvsl",
+    "llvm.ve.vl.pvsrl.vvsvl" => "__builtin_ve_vl_pvsrl_vvsvl",
+    "llvm.ve.vl.pvsrl.vvvMvl" => "__builtin_ve_vl_pvsrl_vvvMvl",
+    "llvm.ve.vl.pvsrl.vvvl" => "__builtin_ve_vl_pvsrl_vvvl",
+    "llvm.ve.vl.pvsrl.vvvvl" => "__builtin_ve_vl_pvsrl_vvvvl",
+    "llvm.ve.vl.pvsubs.vsvMvl" => "__builtin_ve_vl_pvsubs_vsvMvl",
+    "llvm.ve.vl.pvsubs.vsvl" => "__builtin_ve_vl_pvsubs_vsvl",
+    "llvm.ve.vl.pvsubs.vsvvl" => "__builtin_ve_vl_pvsubs_vsvvl",
+    "llvm.ve.vl.pvsubs.vvvMvl" => "__builtin_ve_vl_pvsubs_vvvMvl",
+    "llvm.ve.vl.pvsubs.vvvl" => "__builtin_ve_vl_pvsubs_vvvl",
+    "llvm.ve.vl.pvsubs.vvvvl" => "__builtin_ve_vl_pvsubs_vvvvl",
+    "llvm.ve.vl.pvsubu.vsvMvl" => "__builtin_ve_vl_pvsubu_vsvMvl",
+    "llvm.ve.vl.pvsubu.vsvl" => "__builtin_ve_vl_pvsubu_vsvl",
+    "llvm.ve.vl.pvsubu.vsvvl" => "__builtin_ve_vl_pvsubu_vsvvl",
+    "llvm.ve.vl.pvsubu.vvvMvl" => "__builtin_ve_vl_pvsubu_vvvMvl",
+    "llvm.ve.vl.pvsubu.vvvl" => "__builtin_ve_vl_pvsubu_vvvl",
+    "llvm.ve.vl.pvsubu.vvvvl" => "__builtin_ve_vl_pvsubu_vvvvl",
+    "llvm.ve.vl.pvxor.vsvMvl" => "__builtin_ve_vl_pvxor_vsvMvl",
+    "llvm.ve.vl.pvxor.vsvl" => "__builtin_ve_vl_pvxor_vsvl",
+    "llvm.ve.vl.pvxor.vsvvl" => "__builtin_ve_vl_pvxor_vsvvl",
+    "llvm.ve.vl.pvxor.vvvMvl" => "__builtin_ve_vl_pvxor_vvvMvl",
+    "llvm.ve.vl.pvxor.vvvl" => "__builtin_ve_vl_pvxor_vvvl",
+    "llvm.ve.vl.pvxor.vvvvl" => "__builtin_ve_vl_pvxor_vvvvl",
+    "llvm.ve.vl.scr.sss" => "__builtin_ve_vl_scr_sss",
+    "llvm.ve.vl.svm.sMs" => "__builtin_ve_vl_svm_sMs",
+    "llvm.ve.vl.svm.sms" => "__builtin_ve_vl_svm_sms",
+    "llvm.ve.vl.svob" => "__builtin_ve_vl_svob",
+    "llvm.ve.vl.tovm.sml" => "__builtin_ve_vl_tovm_sml",
+    "llvm.ve.vl.tscr.ssss" => "__builtin_ve_vl_tscr_ssss",
+    "llvm.ve.vl.vaddsl.vsvl" => "__builtin_ve_vl_vaddsl_vsvl",
+    "llvm.ve.vl.vaddsl.vsvmvl" => "__builtin_ve_vl_vaddsl_vsvmvl",
+    "llvm.ve.vl.vaddsl.vsvvl" => "__builtin_ve_vl_vaddsl_vsvvl",
+    "llvm.ve.vl.vaddsl.vvvl" => "__builtin_ve_vl_vaddsl_vvvl",
+    "llvm.ve.vl.vaddsl.vvvmvl" => "__builtin_ve_vl_vaddsl_vvvmvl",
+    "llvm.ve.vl.vaddsl.vvvvl" => "__builtin_ve_vl_vaddsl_vvvvl",
+    "llvm.ve.vl.vaddswsx.vsvl" => "__builtin_ve_vl_vaddswsx_vsvl",
+    "llvm.ve.vl.vaddswsx.vsvmvl" => "__builtin_ve_vl_vaddswsx_vsvmvl",
+    "llvm.ve.vl.vaddswsx.vsvvl" => "__builtin_ve_vl_vaddswsx_vsvvl",
+    "llvm.ve.vl.vaddswsx.vvvl" => "__builtin_ve_vl_vaddswsx_vvvl",
+    "llvm.ve.vl.vaddswsx.vvvmvl" => "__builtin_ve_vl_vaddswsx_vvvmvl",
+    "llvm.ve.vl.vaddswsx.vvvvl" => "__builtin_ve_vl_vaddswsx_vvvvl",
+    "llvm.ve.vl.vaddswzx.vsvl" => "__builtin_ve_vl_vaddswzx_vsvl",
+    "llvm.ve.vl.vaddswzx.vsvmvl" => "__builtin_ve_vl_vaddswzx_vsvmvl",
+    "llvm.ve.vl.vaddswzx.vsvvl" => "__builtin_ve_vl_vaddswzx_vsvvl",
+    "llvm.ve.vl.vaddswzx.vvvl" => "__builtin_ve_vl_vaddswzx_vvvl",
+    "llvm.ve.vl.vaddswzx.vvvmvl" => "__builtin_ve_vl_vaddswzx_vvvmvl",
+    "llvm.ve.vl.vaddswzx.vvvvl" => "__builtin_ve_vl_vaddswzx_vvvvl",
+    "llvm.ve.vl.vaddul.vsvl" => "__builtin_ve_vl_vaddul_vsvl",
+    "llvm.ve.vl.vaddul.vsvmvl" => "__builtin_ve_vl_vaddul_vsvmvl",
+    "llvm.ve.vl.vaddul.vsvvl" => "__builtin_ve_vl_vaddul_vsvvl",
+    "llvm.ve.vl.vaddul.vvvl" => "__builtin_ve_vl_vaddul_vvvl",
+    "llvm.ve.vl.vaddul.vvvmvl" => "__builtin_ve_vl_vaddul_vvvmvl",
+    "llvm.ve.vl.vaddul.vvvvl" => "__builtin_ve_vl_vaddul_vvvvl",
+    "llvm.ve.vl.vadduw.vsvl" => "__builtin_ve_vl_vadduw_vsvl",
+    "llvm.ve.vl.vadduw.vsvmvl" => "__builtin_ve_vl_vadduw_vsvmvl",
+    "llvm.ve.vl.vadduw.vsvvl" => "__builtin_ve_vl_vadduw_vsvvl",
+    "llvm.ve.vl.vadduw.vvvl" => "__builtin_ve_vl_vadduw_vvvl",
+    "llvm.ve.vl.vadduw.vvvmvl" => "__builtin_ve_vl_vadduw_vvvmvl",
+    "llvm.ve.vl.vadduw.vvvvl" => "__builtin_ve_vl_vadduw_vvvvl",
+    "llvm.ve.vl.vand.vsvl" => "__builtin_ve_vl_vand_vsvl",
+    "llvm.ve.vl.vand.vsvmvl" => "__builtin_ve_vl_vand_vsvmvl",
+    "llvm.ve.vl.vand.vsvvl" => "__builtin_ve_vl_vand_vsvvl",
+    "llvm.ve.vl.vand.vvvl" => "__builtin_ve_vl_vand_vvvl",
+    "llvm.ve.vl.vand.vvvmvl" => "__builtin_ve_vl_vand_vvvmvl",
+    "llvm.ve.vl.vand.vvvvl" => "__builtin_ve_vl_vand_vvvvl",
+    "llvm.ve.vl.vbrdd.vsl" => "__builtin_ve_vl_vbrdd_vsl",
+    "llvm.ve.vl.vbrdd.vsmvl" => "__builtin_ve_vl_vbrdd_vsmvl",
+    "llvm.ve.vl.vbrdd.vsvl" => "__builtin_ve_vl_vbrdd_vsvl",
+    "llvm.ve.vl.vbrdl.vsl" => "__builtin_ve_vl_vbrdl_vsl",
+    "llvm.ve.vl.vbrdl.vsmvl" => "__builtin_ve_vl_vbrdl_vsmvl",
+    "llvm.ve.vl.vbrdl.vsvl" => "__builtin_ve_vl_vbrdl_vsvl",
+    "llvm.ve.vl.vbrds.vsl" => "__builtin_ve_vl_vbrds_vsl",
+    "llvm.ve.vl.vbrds.vsmvl" => "__builtin_ve_vl_vbrds_vsmvl",
+    "llvm.ve.vl.vbrds.vsvl" => "__builtin_ve_vl_vbrds_vsvl",
+    "llvm.ve.vl.vbrdw.vsl" => "__builtin_ve_vl_vbrdw_vsl",
+    "llvm.ve.vl.vbrdw.vsmvl" => "__builtin_ve_vl_vbrdw_vsmvl",
+    "llvm.ve.vl.vbrdw.vsvl" => "__builtin_ve_vl_vbrdw_vsvl",
+    "llvm.ve.vl.vbrv.vvl" => "__builtin_ve_vl_vbrv_vvl",
+    "llvm.ve.vl.vbrv.vvmvl" => "__builtin_ve_vl_vbrv_vvmvl",
+    "llvm.ve.vl.vbrv.vvvl" => "__builtin_ve_vl_vbrv_vvvl",
+    "llvm.ve.vl.vcmpsl.vsvl" => "__builtin_ve_vl_vcmpsl_vsvl",
+    "llvm.ve.vl.vcmpsl.vsvmvl" => "__builtin_ve_vl_vcmpsl_vsvmvl",
+    "llvm.ve.vl.vcmpsl.vsvvl" => "__builtin_ve_vl_vcmpsl_vsvvl",
+    "llvm.ve.vl.vcmpsl.vvvl" => "__builtin_ve_vl_vcmpsl_vvvl",
+    "llvm.ve.vl.vcmpsl.vvvmvl" => "__builtin_ve_vl_vcmpsl_vvvmvl",
+    "llvm.ve.vl.vcmpsl.vvvvl" => "__builtin_ve_vl_vcmpsl_vvvvl",
+    "llvm.ve.vl.vcmpswsx.vsvl" => "__builtin_ve_vl_vcmpswsx_vsvl",
+    "llvm.ve.vl.vcmpswsx.vsvmvl" => "__builtin_ve_vl_vcmpswsx_vsvmvl",
+    "llvm.ve.vl.vcmpswsx.vsvvl" => "__builtin_ve_vl_vcmpswsx_vsvvl",
+    "llvm.ve.vl.vcmpswsx.vvvl" => "__builtin_ve_vl_vcmpswsx_vvvl",
+    "llvm.ve.vl.vcmpswsx.vvvmvl" => "__builtin_ve_vl_vcmpswsx_vvvmvl",
+    "llvm.ve.vl.vcmpswsx.vvvvl" => "__builtin_ve_vl_vcmpswsx_vvvvl",
+    "llvm.ve.vl.vcmpswzx.vsvl" => "__builtin_ve_vl_vcmpswzx_vsvl",
+    "llvm.ve.vl.vcmpswzx.vsvmvl" => "__builtin_ve_vl_vcmpswzx_vsvmvl",
+    "llvm.ve.vl.vcmpswzx.vsvvl" => "__builtin_ve_vl_vcmpswzx_vsvvl",
+    "llvm.ve.vl.vcmpswzx.vvvl" => "__builtin_ve_vl_vcmpswzx_vvvl",
+    "llvm.ve.vl.vcmpswzx.vvvmvl" => "__builtin_ve_vl_vcmpswzx_vvvmvl",
+    "llvm.ve.vl.vcmpswzx.vvvvl" => "__builtin_ve_vl_vcmpswzx_vvvvl",
+    "llvm.ve.vl.vcmpul.vsvl" => "__builtin_ve_vl_vcmpul_vsvl",
+    "llvm.ve.vl.vcmpul.vsvmvl" => "__builtin_ve_vl_vcmpul_vsvmvl",
+    "llvm.ve.vl.vcmpul.vsvvl" => "__builtin_ve_vl_vcmpul_vsvvl",
+    "llvm.ve.vl.vcmpul.vvvl" => "__builtin_ve_vl_vcmpul_vvvl",
+    "llvm.ve.vl.vcmpul.vvvmvl" => "__builtin_ve_vl_vcmpul_vvvmvl",
+    "llvm.ve.vl.vcmpul.vvvvl" => "__builtin_ve_vl_vcmpul_vvvvl",
+    "llvm.ve.vl.vcmpuw.vsvl" => "__builtin_ve_vl_vcmpuw_vsvl",
+    "llvm.ve.vl.vcmpuw.vsvmvl" => "__builtin_ve_vl_vcmpuw_vsvmvl",
+    "llvm.ve.vl.vcmpuw.vsvvl" => "__builtin_ve_vl_vcmpuw_vsvvl",
+    "llvm.ve.vl.vcmpuw.vvvl" => "__builtin_ve_vl_vcmpuw_vvvl",
+    "llvm.ve.vl.vcmpuw.vvvmvl" => "__builtin_ve_vl_vcmpuw_vvvmvl",
+    "llvm.ve.vl.vcmpuw.vvvvl" => "__builtin_ve_vl_vcmpuw_vvvvl",
+    "llvm.ve.vl.vcp.vvmvl" => "__builtin_ve_vl_vcp_vvmvl",
+    "llvm.ve.vl.vcvtdl.vvl" => "__builtin_ve_vl_vcvtdl_vvl",
+    "llvm.ve.vl.vcvtdl.vvvl" => "__builtin_ve_vl_vcvtdl_vvvl",
+    "llvm.ve.vl.vcvtds.vvl" => "__builtin_ve_vl_vcvtds_vvl",
+    "llvm.ve.vl.vcvtds.vvvl" => "__builtin_ve_vl_vcvtds_vvvl",
+    "llvm.ve.vl.vcvtdw.vvl" => "__builtin_ve_vl_vcvtdw_vvl",
+    "llvm.ve.vl.vcvtdw.vvvl" => "__builtin_ve_vl_vcvtdw_vvvl",
+    "llvm.ve.vl.vcvtld.vvl" => "__builtin_ve_vl_vcvtld_vvl",
+    "llvm.ve.vl.vcvtld.vvmvl" => "__builtin_ve_vl_vcvtld_vvmvl",
+    "llvm.ve.vl.vcvtld.vvvl" => "__builtin_ve_vl_vcvtld_vvvl",
+    "llvm.ve.vl.vcvtldrz.vvl" => "__builtin_ve_vl_vcvtldrz_vvl",
+    "llvm.ve.vl.vcvtldrz.vvmvl" => "__builtin_ve_vl_vcvtldrz_vvmvl",
+    "llvm.ve.vl.vcvtldrz.vvvl" => "__builtin_ve_vl_vcvtldrz_vvvl",
+    "llvm.ve.vl.vcvtsd.vvl" => "__builtin_ve_vl_vcvtsd_vvl",
+    "llvm.ve.vl.vcvtsd.vvvl" => "__builtin_ve_vl_vcvtsd_vvvl",
+    "llvm.ve.vl.vcvtsw.vvl" => "__builtin_ve_vl_vcvtsw_vvl",
+    "llvm.ve.vl.vcvtsw.vvvl" => "__builtin_ve_vl_vcvtsw_vvvl",
+    "llvm.ve.vl.vcvtwdsx.vvl" => "__builtin_ve_vl_vcvtwdsx_vvl",
+    "llvm.ve.vl.vcvtwdsx.vvmvl" => "__builtin_ve_vl_vcvtwdsx_vvmvl",
+    "llvm.ve.vl.vcvtwdsx.vvvl" => "__builtin_ve_vl_vcvtwdsx_vvvl",
+    "llvm.ve.vl.vcvtwdsxrz.vvl" => "__builtin_ve_vl_vcvtwdsxrz_vvl",
+    "llvm.ve.vl.vcvtwdsxrz.vvmvl" => "__builtin_ve_vl_vcvtwdsxrz_vvmvl",
+    "llvm.ve.vl.vcvtwdsxrz.vvvl" => "__builtin_ve_vl_vcvtwdsxrz_vvvl",
+    "llvm.ve.vl.vcvtwdzx.vvl" => "__builtin_ve_vl_vcvtwdzx_vvl",
+    "llvm.ve.vl.vcvtwdzx.vvmvl" => "__builtin_ve_vl_vcvtwdzx_vvmvl",
+    "llvm.ve.vl.vcvtwdzx.vvvl" => "__builtin_ve_vl_vcvtwdzx_vvvl",
+    "llvm.ve.vl.vcvtwdzxrz.vvl" => "__builtin_ve_vl_vcvtwdzxrz_vvl",
+    "llvm.ve.vl.vcvtwdzxrz.vvmvl" => "__builtin_ve_vl_vcvtwdzxrz_vvmvl",
+    "llvm.ve.vl.vcvtwdzxrz.vvvl" => "__builtin_ve_vl_vcvtwdzxrz_vvvl",
+    "llvm.ve.vl.vcvtwssx.vvl" => "__builtin_ve_vl_vcvtwssx_vvl",
+    "llvm.ve.vl.vcvtwssx.vvmvl" => "__builtin_ve_vl_vcvtwssx_vvmvl",
+    "llvm.ve.vl.vcvtwssx.vvvl" => "__builtin_ve_vl_vcvtwssx_vvvl",
+    "llvm.ve.vl.vcvtwssxrz.vvl" => "__builtin_ve_vl_vcvtwssxrz_vvl",
+    "llvm.ve.vl.vcvtwssxrz.vvmvl" => "__builtin_ve_vl_vcvtwssxrz_vvmvl",
+    "llvm.ve.vl.vcvtwssxrz.vvvl" => "__builtin_ve_vl_vcvtwssxrz_vvvl",
+    "llvm.ve.vl.vcvtwszx.vvl" => "__builtin_ve_vl_vcvtwszx_vvl",
+    "llvm.ve.vl.vcvtwszx.vvmvl" => "__builtin_ve_vl_vcvtwszx_vvmvl",
+    "llvm.ve.vl.vcvtwszx.vvvl" => "__builtin_ve_vl_vcvtwszx_vvvl",
+    "llvm.ve.vl.vcvtwszxrz.vvl" => "__builtin_ve_vl_vcvtwszxrz_vvl",
+    "llvm.ve.vl.vcvtwszxrz.vvmvl" => "__builtin_ve_vl_vcvtwszxrz_vvmvl",
+    "llvm.ve.vl.vcvtwszxrz.vvvl" => "__builtin_ve_vl_vcvtwszxrz_vvvl",
+    "llvm.ve.vl.vdivsl.vsvl" => "__builtin_ve_vl_vdivsl_vsvl",
+    "llvm.ve.vl.vdivsl.vsvmvl" => "__builtin_ve_vl_vdivsl_vsvmvl",
+    "llvm.ve.vl.vdivsl.vsvvl" => "__builtin_ve_vl_vdivsl_vsvvl",
+    "llvm.ve.vl.vdivsl.vvsl" => "__builtin_ve_vl_vdivsl_vvsl",
+    "llvm.ve.vl.vdivsl.vvsmvl" => "__builtin_ve_vl_vdivsl_vvsmvl",
+    "llvm.ve.vl.vdivsl.vvsvl" => "__builtin_ve_vl_vdivsl_vvsvl",
+    "llvm.ve.vl.vdivsl.vvvl" => "__builtin_ve_vl_vdivsl_vvvl",
+    "llvm.ve.vl.vdivsl.vvvmvl" => "__builtin_ve_vl_vdivsl_vvvmvl",
+    "llvm.ve.vl.vdivsl.vvvvl" => "__builtin_ve_vl_vdivsl_vvvvl",
+    "llvm.ve.vl.vdivswsx.vsvl" => "__builtin_ve_vl_vdivswsx_vsvl",
+    "llvm.ve.vl.vdivswsx.vsvmvl" => "__builtin_ve_vl_vdivswsx_vsvmvl",
+    "llvm.ve.vl.vdivswsx.vsvvl" => "__builtin_ve_vl_vdivswsx_vsvvl",
+    "llvm.ve.vl.vdivswsx.vvsl" => "__builtin_ve_vl_vdivswsx_vvsl",
+    "llvm.ve.vl.vdivswsx.vvsmvl" => "__builtin_ve_vl_vdivswsx_vvsmvl",
+    "llvm.ve.vl.vdivswsx.vvsvl" => "__builtin_ve_vl_vdivswsx_vvsvl",
+    "llvm.ve.vl.vdivswsx.vvvl" => "__builtin_ve_vl_vdivswsx_vvvl",
+    "llvm.ve.vl.vdivswsx.vvvmvl" => "__builtin_ve_vl_vdivswsx_vvvmvl",
+    "llvm.ve.vl.vdivswsx.vvvvl" => "__builtin_ve_vl_vdivswsx_vvvvl",
+    "llvm.ve.vl.vdivswzx.vsvl" => "__builtin_ve_vl_vdivswzx_vsvl",
+    "llvm.ve.vl.vdivswzx.vsvmvl" => "__builtin_ve_vl_vdivswzx_vsvmvl",
+    "llvm.ve.vl.vdivswzx.vsvvl" => "__builtin_ve_vl_vdivswzx_vsvvl",
+    "llvm.ve.vl.vdivswzx.vvsl" => "__builtin_ve_vl_vdivswzx_vvsl",
+    "llvm.ve.vl.vdivswzx.vvsmvl" => "__builtin_ve_vl_vdivswzx_vvsmvl",
+    "llvm.ve.vl.vdivswzx.vvsvl" => "__builtin_ve_vl_vdivswzx_vvsvl",
+    "llvm.ve.vl.vdivswzx.vvvl" => "__builtin_ve_vl_vdivswzx_vvvl",
+    "llvm.ve.vl.vdivswzx.vvvmvl" => "__builtin_ve_vl_vdivswzx_vvvmvl",
+    "llvm.ve.vl.vdivswzx.vvvvl" => "__builtin_ve_vl_vdivswzx_vvvvl",
+    "llvm.ve.vl.vdivul.vsvl" => "__builtin_ve_vl_vdivul_vsvl",
+    "llvm.ve.vl.vdivul.vsvmvl" => "__builtin_ve_vl_vdivul_vsvmvl",
+    "llvm.ve.vl.vdivul.vsvvl" => "__builtin_ve_vl_vdivul_vsvvl",
+    "llvm.ve.vl.vdivul.vvsl" => "__builtin_ve_vl_vdivul_vvsl",
+    "llvm.ve.vl.vdivul.vvsmvl" => "__builtin_ve_vl_vdivul_vvsmvl",
+    "llvm.ve.vl.vdivul.vvsvl" => "__builtin_ve_vl_vdivul_vvsvl",
+    "llvm.ve.vl.vdivul.vvvl" => "__builtin_ve_vl_vdivul_vvvl",
+    "llvm.ve.vl.vdivul.vvvmvl" => "__builtin_ve_vl_vdivul_vvvmvl",
+    "llvm.ve.vl.vdivul.vvvvl" => "__builtin_ve_vl_vdivul_vvvvl",
+    "llvm.ve.vl.vdivuw.vsvl" => "__builtin_ve_vl_vdivuw_vsvl",
+    "llvm.ve.vl.vdivuw.vsvmvl" => "__builtin_ve_vl_vdivuw_vsvmvl",
+    "llvm.ve.vl.vdivuw.vsvvl" => "__builtin_ve_vl_vdivuw_vsvvl",
+    "llvm.ve.vl.vdivuw.vvsl" => "__builtin_ve_vl_vdivuw_vvsl",
+    "llvm.ve.vl.vdivuw.vvsmvl" => "__builtin_ve_vl_vdivuw_vvsmvl",
+    "llvm.ve.vl.vdivuw.vvsvl" => "__builtin_ve_vl_vdivuw_vvsvl",
+    "llvm.ve.vl.vdivuw.vvvl" => "__builtin_ve_vl_vdivuw_vvvl",
+    "llvm.ve.vl.vdivuw.vvvmvl" => "__builtin_ve_vl_vdivuw_vvvmvl",
+    "llvm.ve.vl.vdivuw.vvvvl" => "__builtin_ve_vl_vdivuw_vvvvl",
+    "llvm.ve.vl.veqv.vsvl" => "__builtin_ve_vl_veqv_vsvl",
+    "llvm.ve.vl.veqv.vsvmvl" => "__builtin_ve_vl_veqv_vsvmvl",
+    "llvm.ve.vl.veqv.vsvvl" => "__builtin_ve_vl_veqv_vsvvl",
+    "llvm.ve.vl.veqv.vvvl" => "__builtin_ve_vl_veqv_vvvl",
+    "llvm.ve.vl.veqv.vvvmvl" => "__builtin_ve_vl_veqv_vvvmvl",
+    "llvm.ve.vl.veqv.vvvvl" => "__builtin_ve_vl_veqv_vvvvl",
+    "llvm.ve.vl.vex.vvmvl" => "__builtin_ve_vl_vex_vvmvl",
+    "llvm.ve.vl.vfaddd.vsvl" => "__builtin_ve_vl_vfaddd_vsvl",
+    "llvm.ve.vl.vfaddd.vsvmvl" => "__builtin_ve_vl_vfaddd_vsvmvl",
+    "llvm.ve.vl.vfaddd.vsvvl" => "__builtin_ve_vl_vfaddd_vsvvl",
+    "llvm.ve.vl.vfaddd.vvvl" => "__builtin_ve_vl_vfaddd_vvvl",
+    "llvm.ve.vl.vfaddd.vvvmvl" => "__builtin_ve_vl_vfaddd_vvvmvl",
+    "llvm.ve.vl.vfaddd.vvvvl" => "__builtin_ve_vl_vfaddd_vvvvl",
+    "llvm.ve.vl.vfadds.vsvl" => "__builtin_ve_vl_vfadds_vsvl",
+    "llvm.ve.vl.vfadds.vsvmvl" => "__builtin_ve_vl_vfadds_vsvmvl",
+    "llvm.ve.vl.vfadds.vsvvl" => "__builtin_ve_vl_vfadds_vsvvl",
+    "llvm.ve.vl.vfadds.vvvl" => "__builtin_ve_vl_vfadds_vvvl",
+    "llvm.ve.vl.vfadds.vvvmvl" => "__builtin_ve_vl_vfadds_vvvmvl",
+    "llvm.ve.vl.vfadds.vvvvl" => "__builtin_ve_vl_vfadds_vvvvl",
+    "llvm.ve.vl.vfcmpd.vsvl" => "__builtin_ve_vl_vfcmpd_vsvl",
+    "llvm.ve.vl.vfcmpd.vsvmvl" => "__builtin_ve_vl_vfcmpd_vsvmvl",
+    "llvm.ve.vl.vfcmpd.vsvvl" => "__builtin_ve_vl_vfcmpd_vsvvl",
+    "llvm.ve.vl.vfcmpd.vvvl" => "__builtin_ve_vl_vfcmpd_vvvl",
+    "llvm.ve.vl.vfcmpd.vvvmvl" => "__builtin_ve_vl_vfcmpd_vvvmvl",
+    "llvm.ve.vl.vfcmpd.vvvvl" => "__builtin_ve_vl_vfcmpd_vvvvl",
+    "llvm.ve.vl.vfcmps.vsvl" => "__builtin_ve_vl_vfcmps_vsvl",
+    "llvm.ve.vl.vfcmps.vsvmvl" => "__builtin_ve_vl_vfcmps_vsvmvl",
+    "llvm.ve.vl.vfcmps.vsvvl" => "__builtin_ve_vl_vfcmps_vsvvl",
+    "llvm.ve.vl.vfcmps.vvvl" => "__builtin_ve_vl_vfcmps_vvvl",
+    "llvm.ve.vl.vfcmps.vvvmvl" => "__builtin_ve_vl_vfcmps_vvvmvl",
+    "llvm.ve.vl.vfcmps.vvvvl" => "__builtin_ve_vl_vfcmps_vvvvl",
+    "llvm.ve.vl.vfdivd.vsvl" => "__builtin_ve_vl_vfdivd_vsvl",
+    "llvm.ve.vl.vfdivd.vsvmvl" => "__builtin_ve_vl_vfdivd_vsvmvl",
+    "llvm.ve.vl.vfdivd.vsvvl" => "__builtin_ve_vl_vfdivd_vsvvl",
+    "llvm.ve.vl.vfdivd.vvvl" => "__builtin_ve_vl_vfdivd_vvvl",
+    "llvm.ve.vl.vfdivd.vvvmvl" => "__builtin_ve_vl_vfdivd_vvvmvl",
+    "llvm.ve.vl.vfdivd.vvvvl" => "__builtin_ve_vl_vfdivd_vvvvl",
+    "llvm.ve.vl.vfdivs.vsvl" => "__builtin_ve_vl_vfdivs_vsvl",
+    "llvm.ve.vl.vfdivs.vsvmvl" => "__builtin_ve_vl_vfdivs_vsvmvl",
+    "llvm.ve.vl.vfdivs.vsvvl" => "__builtin_ve_vl_vfdivs_vsvvl",
+    "llvm.ve.vl.vfdivs.vvvl" => "__builtin_ve_vl_vfdivs_vvvl",
+    "llvm.ve.vl.vfdivs.vvvmvl" => "__builtin_ve_vl_vfdivs_vvvmvl",
+    "llvm.ve.vl.vfdivs.vvvvl" => "__builtin_ve_vl_vfdivs_vvvvl",
+    "llvm.ve.vl.vfmadd.vsvvl" => "__builtin_ve_vl_vfmadd_vsvvl",
+    "llvm.ve.vl.vfmadd.vsvvmvl" => "__builtin_ve_vl_vfmadd_vsvvmvl",
+    "llvm.ve.vl.vfmadd.vsvvvl" => "__builtin_ve_vl_vfmadd_vsvvvl",
+    "llvm.ve.vl.vfmadd.vvsvl" => "__builtin_ve_vl_vfmadd_vvsvl",
+    "llvm.ve.vl.vfmadd.vvsvmvl" => "__builtin_ve_vl_vfmadd_vvsvmvl",
+    "llvm.ve.vl.vfmadd.vvsvvl" => "__builtin_ve_vl_vfmadd_vvsvvl",
+    "llvm.ve.vl.vfmadd.vvvvl" => "__builtin_ve_vl_vfmadd_vvvvl",
+    "llvm.ve.vl.vfmadd.vvvvmvl" => "__builtin_ve_vl_vfmadd_vvvvmvl",
+    "llvm.ve.vl.vfmadd.vvvvvl" => "__builtin_ve_vl_vfmadd_vvvvvl",
+    "llvm.ve.vl.vfmads.vsvvl" => "__builtin_ve_vl_vfmads_vsvvl",
+    "llvm.ve.vl.vfmads.vsvvmvl" => "__builtin_ve_vl_vfmads_vsvvmvl",
+    "llvm.ve.vl.vfmads.vsvvvl" => "__builtin_ve_vl_vfmads_vsvvvl",
+    "llvm.ve.vl.vfmads.vvsvl" => "__builtin_ve_vl_vfmads_vvsvl",
+    "llvm.ve.vl.vfmads.vvsvmvl" => "__builtin_ve_vl_vfmads_vvsvmvl",
+    "llvm.ve.vl.vfmads.vvsvvl" => "__builtin_ve_vl_vfmads_vvsvvl",
+    "llvm.ve.vl.vfmads.vvvvl" => "__builtin_ve_vl_vfmads_vvvvl",
+    "llvm.ve.vl.vfmads.vvvvmvl" => "__builtin_ve_vl_vfmads_vvvvmvl",
+    "llvm.ve.vl.vfmads.vvvvvl" => "__builtin_ve_vl_vfmads_vvvvvl",
+    "llvm.ve.vl.vfmaxd.vsvl" => "__builtin_ve_vl_vfmaxd_vsvl",
+    "llvm.ve.vl.vfmaxd.vsvmvl" => "__builtin_ve_vl_vfmaxd_vsvmvl",
+    "llvm.ve.vl.vfmaxd.vsvvl" => "__builtin_ve_vl_vfmaxd_vsvvl",
+    "llvm.ve.vl.vfmaxd.vvvl" => "__builtin_ve_vl_vfmaxd_vvvl",
+    "llvm.ve.vl.vfmaxd.vvvmvl" => "__builtin_ve_vl_vfmaxd_vvvmvl",
+    "llvm.ve.vl.vfmaxd.vvvvl" => "__builtin_ve_vl_vfmaxd_vvvvl",
+    "llvm.ve.vl.vfmaxs.vsvl" => "__builtin_ve_vl_vfmaxs_vsvl",
+    "llvm.ve.vl.vfmaxs.vsvmvl" => "__builtin_ve_vl_vfmaxs_vsvmvl",
+    "llvm.ve.vl.vfmaxs.vsvvl" => "__builtin_ve_vl_vfmaxs_vsvvl",
+    "llvm.ve.vl.vfmaxs.vvvl" => "__builtin_ve_vl_vfmaxs_vvvl",
+    "llvm.ve.vl.vfmaxs.vvvmvl" => "__builtin_ve_vl_vfmaxs_vvvmvl",
+    "llvm.ve.vl.vfmaxs.vvvvl" => "__builtin_ve_vl_vfmaxs_vvvvl",
+    "llvm.ve.vl.vfmind.vsvl" => "__builtin_ve_vl_vfmind_vsvl",
+    "llvm.ve.vl.vfmind.vsvmvl" => "__builtin_ve_vl_vfmind_vsvmvl",
+    "llvm.ve.vl.vfmind.vsvvl" => "__builtin_ve_vl_vfmind_vsvvl",
+    "llvm.ve.vl.vfmind.vvvl" => "__builtin_ve_vl_vfmind_vvvl",
+    "llvm.ve.vl.vfmind.vvvmvl" => "__builtin_ve_vl_vfmind_vvvmvl",
+    "llvm.ve.vl.vfmind.vvvvl" => "__builtin_ve_vl_vfmind_vvvvl",
+    "llvm.ve.vl.vfmins.vsvl" => "__builtin_ve_vl_vfmins_vsvl",
+    "llvm.ve.vl.vfmins.vsvmvl" => "__builtin_ve_vl_vfmins_vsvmvl",
+    "llvm.ve.vl.vfmins.vsvvl" => "__builtin_ve_vl_vfmins_vsvvl",
+    "llvm.ve.vl.vfmins.vvvl" => "__builtin_ve_vl_vfmins_vvvl",
+    "llvm.ve.vl.vfmins.vvvmvl" => "__builtin_ve_vl_vfmins_vvvmvl",
+    "llvm.ve.vl.vfmins.vvvvl" => "__builtin_ve_vl_vfmins_vvvvl",
+    "llvm.ve.vl.vfmkdeq.mvl" => "__builtin_ve_vl_vfmkdeq_mvl",
+    "llvm.ve.vl.vfmkdeq.mvml" => "__builtin_ve_vl_vfmkdeq_mvml",
+    "llvm.ve.vl.vfmkdeqnan.mvl" => "__builtin_ve_vl_vfmkdeqnan_mvl",
+    "llvm.ve.vl.vfmkdeqnan.mvml" => "__builtin_ve_vl_vfmkdeqnan_mvml",
+    "llvm.ve.vl.vfmkdge.mvl" => "__builtin_ve_vl_vfmkdge_mvl",
+    "llvm.ve.vl.vfmkdge.mvml" => "__builtin_ve_vl_vfmkdge_mvml",
+    "llvm.ve.vl.vfmkdgenan.mvl" => "__builtin_ve_vl_vfmkdgenan_mvl",
+    "llvm.ve.vl.vfmkdgenan.mvml" => "__builtin_ve_vl_vfmkdgenan_mvml",
+    "llvm.ve.vl.vfmkdgt.mvl" => "__builtin_ve_vl_vfmkdgt_mvl",
+    "llvm.ve.vl.vfmkdgt.mvml" => "__builtin_ve_vl_vfmkdgt_mvml",
+    "llvm.ve.vl.vfmkdgtnan.mvl" => "__builtin_ve_vl_vfmkdgtnan_mvl",
+    "llvm.ve.vl.vfmkdgtnan.mvml" => "__builtin_ve_vl_vfmkdgtnan_mvml",
+    "llvm.ve.vl.vfmkdle.mvl" => "__builtin_ve_vl_vfmkdle_mvl",
+    "llvm.ve.vl.vfmkdle.mvml" => "__builtin_ve_vl_vfmkdle_mvml",
+    "llvm.ve.vl.vfmkdlenan.mvl" => "__builtin_ve_vl_vfmkdlenan_mvl",
+    "llvm.ve.vl.vfmkdlenan.mvml" => "__builtin_ve_vl_vfmkdlenan_mvml",
+    "llvm.ve.vl.vfmkdlt.mvl" => "__builtin_ve_vl_vfmkdlt_mvl",
+    "llvm.ve.vl.vfmkdlt.mvml" => "__builtin_ve_vl_vfmkdlt_mvml",
+    "llvm.ve.vl.vfmkdltnan.mvl" => "__builtin_ve_vl_vfmkdltnan_mvl",
+    "llvm.ve.vl.vfmkdltnan.mvml" => "__builtin_ve_vl_vfmkdltnan_mvml",
+    "llvm.ve.vl.vfmkdnan.mvl" => "__builtin_ve_vl_vfmkdnan_mvl",
+    "llvm.ve.vl.vfmkdnan.mvml" => "__builtin_ve_vl_vfmkdnan_mvml",
+    "llvm.ve.vl.vfmkdne.mvl" => "__builtin_ve_vl_vfmkdne_mvl",
+    "llvm.ve.vl.vfmkdne.mvml" => "__builtin_ve_vl_vfmkdne_mvml",
+    "llvm.ve.vl.vfmkdnenan.mvl" => "__builtin_ve_vl_vfmkdnenan_mvl",
+    "llvm.ve.vl.vfmkdnenan.mvml" => "__builtin_ve_vl_vfmkdnenan_mvml",
+    "llvm.ve.vl.vfmkdnum.mvl" => "__builtin_ve_vl_vfmkdnum_mvl",
+    "llvm.ve.vl.vfmkdnum.mvml" => "__builtin_ve_vl_vfmkdnum_mvml",
+    "llvm.ve.vl.vfmklaf.ml" => "__builtin_ve_vl_vfmklaf_ml",
+    "llvm.ve.vl.vfmklat.ml" => "__builtin_ve_vl_vfmklat_ml",
+    "llvm.ve.vl.vfmkleq.mvl" => "__builtin_ve_vl_vfmkleq_mvl",
+    "llvm.ve.vl.vfmkleq.mvml" => "__builtin_ve_vl_vfmkleq_mvml",
+    "llvm.ve.vl.vfmkleqnan.mvl" => "__builtin_ve_vl_vfmkleqnan_mvl",
+    "llvm.ve.vl.vfmkleqnan.mvml" => "__builtin_ve_vl_vfmkleqnan_mvml",
+    "llvm.ve.vl.vfmklge.mvl" => "__builtin_ve_vl_vfmklge_mvl",
+    "llvm.ve.vl.vfmklge.mvml" => "__builtin_ve_vl_vfmklge_mvml",
+    "llvm.ve.vl.vfmklgenan.mvl" => "__builtin_ve_vl_vfmklgenan_mvl",
+    "llvm.ve.vl.vfmklgenan.mvml" => "__builtin_ve_vl_vfmklgenan_mvml",
+    "llvm.ve.vl.vfmklgt.mvl" => "__builtin_ve_vl_vfmklgt_mvl",
+    "llvm.ve.vl.vfmklgt.mvml" => "__builtin_ve_vl_vfmklgt_mvml",
+    "llvm.ve.vl.vfmklgtnan.mvl" => "__builtin_ve_vl_vfmklgtnan_mvl",
+    "llvm.ve.vl.vfmklgtnan.mvml" => "__builtin_ve_vl_vfmklgtnan_mvml",
+    "llvm.ve.vl.vfmklle.mvl" => "__builtin_ve_vl_vfmklle_mvl",
+    "llvm.ve.vl.vfmklle.mvml" => "__builtin_ve_vl_vfmklle_mvml",
+    "llvm.ve.vl.vfmkllenan.mvl" => "__builtin_ve_vl_vfmkllenan_mvl",
+    "llvm.ve.vl.vfmkllenan.mvml" => "__builtin_ve_vl_vfmkllenan_mvml",
+    "llvm.ve.vl.vfmkllt.mvl" => "__builtin_ve_vl_vfmkllt_mvl",
+    "llvm.ve.vl.vfmkllt.mvml" => "__builtin_ve_vl_vfmkllt_mvml",
+    "llvm.ve.vl.vfmklltnan.mvl" => "__builtin_ve_vl_vfmklltnan_mvl",
+    "llvm.ve.vl.vfmklltnan.mvml" => "__builtin_ve_vl_vfmklltnan_mvml",
+    "llvm.ve.vl.vfmklnan.mvl" => "__builtin_ve_vl_vfmklnan_mvl",
+    "llvm.ve.vl.vfmklnan.mvml" => "__builtin_ve_vl_vfmklnan_mvml",
+    "llvm.ve.vl.vfmklne.mvl" => "__builtin_ve_vl_vfmklne_mvl",
+    "llvm.ve.vl.vfmklne.mvml" => "__builtin_ve_vl_vfmklne_mvml",
+    "llvm.ve.vl.vfmklnenan.mvl" => "__builtin_ve_vl_vfmklnenan_mvl",
+    "llvm.ve.vl.vfmklnenan.mvml" => "__builtin_ve_vl_vfmklnenan_mvml",
+    "llvm.ve.vl.vfmklnum.mvl" => "__builtin_ve_vl_vfmklnum_mvl",
+    "llvm.ve.vl.vfmklnum.mvml" => "__builtin_ve_vl_vfmklnum_mvml",
+    "llvm.ve.vl.vfmkseq.mvl" => "__builtin_ve_vl_vfmkseq_mvl",
+    "llvm.ve.vl.vfmkseq.mvml" => "__builtin_ve_vl_vfmkseq_mvml",
+    "llvm.ve.vl.vfmkseqnan.mvl" => "__builtin_ve_vl_vfmkseqnan_mvl",
+    "llvm.ve.vl.vfmkseqnan.mvml" => "__builtin_ve_vl_vfmkseqnan_mvml",
+    "llvm.ve.vl.vfmksge.mvl" => "__builtin_ve_vl_vfmksge_mvl",
+    "llvm.ve.vl.vfmksge.mvml" => "__builtin_ve_vl_vfmksge_mvml",
+    "llvm.ve.vl.vfmksgenan.mvl" => "__builtin_ve_vl_vfmksgenan_mvl",
+    "llvm.ve.vl.vfmksgenan.mvml" => "__builtin_ve_vl_vfmksgenan_mvml",
+    "llvm.ve.vl.vfmksgt.mvl" => "__builtin_ve_vl_vfmksgt_mvl",
+    "llvm.ve.vl.vfmksgt.mvml" => "__builtin_ve_vl_vfmksgt_mvml",
+    "llvm.ve.vl.vfmksgtnan.mvl" => "__builtin_ve_vl_vfmksgtnan_mvl",
+    "llvm.ve.vl.vfmksgtnan.mvml" => "__builtin_ve_vl_vfmksgtnan_mvml",
+    "llvm.ve.vl.vfmksle.mvl" => "__builtin_ve_vl_vfmksle_mvl",
+    "llvm.ve.vl.vfmksle.mvml" => "__builtin_ve_vl_vfmksle_mvml",
+    "llvm.ve.vl.vfmkslenan.mvl" => "__builtin_ve_vl_vfmkslenan_mvl",
+    "llvm.ve.vl.vfmkslenan.mvml" => "__builtin_ve_vl_vfmkslenan_mvml",
+    "llvm.ve.vl.vfmkslt.mvl" => "__builtin_ve_vl_vfmkslt_mvl",
+    "llvm.ve.vl.vfmkslt.mvml" => "__builtin_ve_vl_vfmkslt_mvml",
+    "llvm.ve.vl.vfmksltnan.mvl" => "__builtin_ve_vl_vfmksltnan_mvl",
+    "llvm.ve.vl.vfmksltnan.mvml" => "__builtin_ve_vl_vfmksltnan_mvml",
+    "llvm.ve.vl.vfmksnan.mvl" => "__builtin_ve_vl_vfmksnan_mvl",
+    "llvm.ve.vl.vfmksnan.mvml" => "__builtin_ve_vl_vfmksnan_mvml",
+    "llvm.ve.vl.vfmksne.mvl" => "__builtin_ve_vl_vfmksne_mvl",
+    "llvm.ve.vl.vfmksne.mvml" => "__builtin_ve_vl_vfmksne_mvml",
+    "llvm.ve.vl.vfmksnenan.mvl" => "__builtin_ve_vl_vfmksnenan_mvl",
+    "llvm.ve.vl.vfmksnenan.mvml" => "__builtin_ve_vl_vfmksnenan_mvml",
+    "llvm.ve.vl.vfmksnum.mvl" => "__builtin_ve_vl_vfmksnum_mvl",
+    "llvm.ve.vl.vfmksnum.mvml" => "__builtin_ve_vl_vfmksnum_mvml",
+    "llvm.ve.vl.vfmkweq.mvl" => "__builtin_ve_vl_vfmkweq_mvl",
+    "llvm.ve.vl.vfmkweq.mvml" => "__builtin_ve_vl_vfmkweq_mvml",
+    "llvm.ve.vl.vfmkweqnan.mvl" => "__builtin_ve_vl_vfmkweqnan_mvl",
+    "llvm.ve.vl.vfmkweqnan.mvml" => "__builtin_ve_vl_vfmkweqnan_mvml",
+    "llvm.ve.vl.vfmkwge.mvl" => "__builtin_ve_vl_vfmkwge_mvl",
+    "llvm.ve.vl.vfmkwge.mvml" => "__builtin_ve_vl_vfmkwge_mvml",
+    "llvm.ve.vl.vfmkwgenan.mvl" => "__builtin_ve_vl_vfmkwgenan_mvl",
+    "llvm.ve.vl.vfmkwgenan.mvml" => "__builtin_ve_vl_vfmkwgenan_mvml",
+    "llvm.ve.vl.vfmkwgt.mvl" => "__builtin_ve_vl_vfmkwgt_mvl",
+    "llvm.ve.vl.vfmkwgt.mvml" => "__builtin_ve_vl_vfmkwgt_mvml",
+    "llvm.ve.vl.vfmkwgtnan.mvl" => "__builtin_ve_vl_vfmkwgtnan_mvl",
+    "llvm.ve.vl.vfmkwgtnan.mvml" => "__builtin_ve_vl_vfmkwgtnan_mvml",
+    "llvm.ve.vl.vfmkwle.mvl" => "__builtin_ve_vl_vfmkwle_mvl",
+    "llvm.ve.vl.vfmkwle.mvml" => "__builtin_ve_vl_vfmkwle_mvml",
+    "llvm.ve.vl.vfmkwlenan.mvl" => "__builtin_ve_vl_vfmkwlenan_mvl",
+    "llvm.ve.vl.vfmkwlenan.mvml" => "__builtin_ve_vl_vfmkwlenan_mvml",
+    "llvm.ve.vl.vfmkwlt.mvl" => "__builtin_ve_vl_vfmkwlt_mvl",
+    "llvm.ve.vl.vfmkwlt.mvml" => "__builtin_ve_vl_vfmkwlt_mvml",
+    "llvm.ve.vl.vfmkwltnan.mvl" => "__builtin_ve_vl_vfmkwltnan_mvl",
+    "llvm.ve.vl.vfmkwltnan.mvml" => "__builtin_ve_vl_vfmkwltnan_mvml",
+    "llvm.ve.vl.vfmkwnan.mvl" => "__builtin_ve_vl_vfmkwnan_mvl",
+    "llvm.ve.vl.vfmkwnan.mvml" => "__builtin_ve_vl_vfmkwnan_mvml",
+    "llvm.ve.vl.vfmkwne.mvl" => "__builtin_ve_vl_vfmkwne_mvl",
+    "llvm.ve.vl.vfmkwne.mvml" => "__builtin_ve_vl_vfmkwne_mvml",
+    "llvm.ve.vl.vfmkwnenan.mvl" => "__builtin_ve_vl_vfmkwnenan_mvl",
+    "llvm.ve.vl.vfmkwnenan.mvml" => "__builtin_ve_vl_vfmkwnenan_mvml",
+    "llvm.ve.vl.vfmkwnum.mvl" => "__builtin_ve_vl_vfmkwnum_mvl",
+    "llvm.ve.vl.vfmkwnum.mvml" => "__builtin_ve_vl_vfmkwnum_mvml",
+    "llvm.ve.vl.vfmsbd.vsvvl" => "__builtin_ve_vl_vfmsbd_vsvvl",
+    "llvm.ve.vl.vfmsbd.vsvvmvl" => "__builtin_ve_vl_vfmsbd_vsvvmvl",
+    "llvm.ve.vl.vfmsbd.vsvvvl" => "__builtin_ve_vl_vfmsbd_vsvvvl",
+    "llvm.ve.vl.vfmsbd.vvsvl" => "__builtin_ve_vl_vfmsbd_vvsvl",
+    "llvm.ve.vl.vfmsbd.vvsvmvl" => "__builtin_ve_vl_vfmsbd_vvsvmvl",
+    "llvm.ve.vl.vfmsbd.vvsvvl" => "__builtin_ve_vl_vfmsbd_vvsvvl",
+    "llvm.ve.vl.vfmsbd.vvvvl" => "__builtin_ve_vl_vfmsbd_vvvvl",
+    "llvm.ve.vl.vfmsbd.vvvvmvl" => "__builtin_ve_vl_vfmsbd_vvvvmvl",
+    "llvm.ve.vl.vfmsbd.vvvvvl" => "__builtin_ve_vl_vfmsbd_vvvvvl",
+    "llvm.ve.vl.vfmsbs.vsvvl" => "__builtin_ve_vl_vfmsbs_vsvvl",
+    "llvm.ve.vl.vfmsbs.vsvvmvl" => "__builtin_ve_vl_vfmsbs_vsvvmvl",
+    "llvm.ve.vl.vfmsbs.vsvvvl" => "__builtin_ve_vl_vfmsbs_vsvvvl",
+    "llvm.ve.vl.vfmsbs.vvsvl" => "__builtin_ve_vl_vfmsbs_vvsvl",
+    "llvm.ve.vl.vfmsbs.vvsvmvl" => "__builtin_ve_vl_vfmsbs_vvsvmvl",
+    "llvm.ve.vl.vfmsbs.vvsvvl" => "__builtin_ve_vl_vfmsbs_vvsvvl",
+    "llvm.ve.vl.vfmsbs.vvvvl" => "__builtin_ve_vl_vfmsbs_vvvvl",
+    "llvm.ve.vl.vfmsbs.vvvvmvl" => "__builtin_ve_vl_vfmsbs_vvvvmvl",
+    "llvm.ve.vl.vfmsbs.vvvvvl" => "__builtin_ve_vl_vfmsbs_vvvvvl",
+    "llvm.ve.vl.vfmuld.vsvl" => "__builtin_ve_vl_vfmuld_vsvl",
+    "llvm.ve.vl.vfmuld.vsvmvl" => "__builtin_ve_vl_vfmuld_vsvmvl",
+    "llvm.ve.vl.vfmuld.vsvvl" => "__builtin_ve_vl_vfmuld_vsvvl",
+    "llvm.ve.vl.vfmuld.vvvl" => "__builtin_ve_vl_vfmuld_vvvl",
+    "llvm.ve.vl.vfmuld.vvvmvl" => "__builtin_ve_vl_vfmuld_vvvmvl",
+    "llvm.ve.vl.vfmuld.vvvvl" => "__builtin_ve_vl_vfmuld_vvvvl",
+    "llvm.ve.vl.vfmuls.vsvl" => "__builtin_ve_vl_vfmuls_vsvl",
+    "llvm.ve.vl.vfmuls.vsvmvl" => "__builtin_ve_vl_vfmuls_vsvmvl",
+    "llvm.ve.vl.vfmuls.vsvvl" => "__builtin_ve_vl_vfmuls_vsvvl",
+    "llvm.ve.vl.vfmuls.vvvl" => "__builtin_ve_vl_vfmuls_vvvl",
+    "llvm.ve.vl.vfmuls.vvvmvl" => "__builtin_ve_vl_vfmuls_vvvmvl",
+    "llvm.ve.vl.vfmuls.vvvvl" => "__builtin_ve_vl_vfmuls_vvvvl",
+    "llvm.ve.vl.vfnmadd.vsvvl" => "__builtin_ve_vl_vfnmadd_vsvvl",
+    "llvm.ve.vl.vfnmadd.vsvvmvl" => "__builtin_ve_vl_vfnmadd_vsvvmvl",
+    "llvm.ve.vl.vfnmadd.vsvvvl" => "__builtin_ve_vl_vfnmadd_vsvvvl",
+    "llvm.ve.vl.vfnmadd.vvsvl" => "__builtin_ve_vl_vfnmadd_vvsvl",
+    "llvm.ve.vl.vfnmadd.vvsvmvl" => "__builtin_ve_vl_vfnmadd_vvsvmvl",
+    "llvm.ve.vl.vfnmadd.vvsvvl" => "__builtin_ve_vl_vfnmadd_vvsvvl",
+    "llvm.ve.vl.vfnmadd.vvvvl" => "__builtin_ve_vl_vfnmadd_vvvvl",
+    "llvm.ve.vl.vfnmadd.vvvvmvl" => "__builtin_ve_vl_vfnmadd_vvvvmvl",
+    "llvm.ve.vl.vfnmadd.vvvvvl" => "__builtin_ve_vl_vfnmadd_vvvvvl",
+    "llvm.ve.vl.vfnmads.vsvvl" => "__builtin_ve_vl_vfnmads_vsvvl",
+    "llvm.ve.vl.vfnmads.vsvvmvl" => "__builtin_ve_vl_vfnmads_vsvvmvl",
+    "llvm.ve.vl.vfnmads.vsvvvl" => "__builtin_ve_vl_vfnmads_vsvvvl",
+    "llvm.ve.vl.vfnmads.vvsvl" => "__builtin_ve_vl_vfnmads_vvsvl",
+    "llvm.ve.vl.vfnmads.vvsvmvl" => "__builtin_ve_vl_vfnmads_vvsvmvl",
+    "llvm.ve.vl.vfnmads.vvsvvl" => "__builtin_ve_vl_vfnmads_vvsvvl",
+    "llvm.ve.vl.vfnmads.vvvvl" => "__builtin_ve_vl_vfnmads_vvvvl",
+    "llvm.ve.vl.vfnmads.vvvvmvl" => "__builtin_ve_vl_vfnmads_vvvvmvl",
+    "llvm.ve.vl.vfnmads.vvvvvl" => "__builtin_ve_vl_vfnmads_vvvvvl",
+    "llvm.ve.vl.vfnmsbd.vsvvl" => "__builtin_ve_vl_vfnmsbd_vsvvl",
+    "llvm.ve.vl.vfnmsbd.vsvvmvl" => "__builtin_ve_vl_vfnmsbd_vsvvmvl",
+    "llvm.ve.vl.vfnmsbd.vsvvvl" => "__builtin_ve_vl_vfnmsbd_vsvvvl",
+    "llvm.ve.vl.vfnmsbd.vvsvl" => "__builtin_ve_vl_vfnmsbd_vvsvl",
+    "llvm.ve.vl.vfnmsbd.vvsvmvl" => "__builtin_ve_vl_vfnmsbd_vvsvmvl",
+    "llvm.ve.vl.vfnmsbd.vvsvvl" => "__builtin_ve_vl_vfnmsbd_vvsvvl",
+    "llvm.ve.vl.vfnmsbd.vvvvl" => "__builtin_ve_vl_vfnmsbd_vvvvl",
+    "llvm.ve.vl.vfnmsbd.vvvvmvl" => "__builtin_ve_vl_vfnmsbd_vvvvmvl",
+    "llvm.ve.vl.vfnmsbd.vvvvvl" => "__builtin_ve_vl_vfnmsbd_vvvvvl",
+    "llvm.ve.vl.vfnmsbs.vsvvl" => "__builtin_ve_vl_vfnmsbs_vsvvl",
+    "llvm.ve.vl.vfnmsbs.vsvvmvl" => "__builtin_ve_vl_vfnmsbs_vsvvmvl",
+    "llvm.ve.vl.vfnmsbs.vsvvvl" => "__builtin_ve_vl_vfnmsbs_vsvvvl",
+    "llvm.ve.vl.vfnmsbs.vvsvl" => "__builtin_ve_vl_vfnmsbs_vvsvl",
+    "llvm.ve.vl.vfnmsbs.vvsvmvl" => "__builtin_ve_vl_vfnmsbs_vvsvmvl",
+    "llvm.ve.vl.vfnmsbs.vvsvvl" => "__builtin_ve_vl_vfnmsbs_vvsvvl",
+    "llvm.ve.vl.vfnmsbs.vvvvl" => "__builtin_ve_vl_vfnmsbs_vvvvl",
+    "llvm.ve.vl.vfnmsbs.vvvvmvl" => "__builtin_ve_vl_vfnmsbs_vvvvmvl",
+    "llvm.ve.vl.vfnmsbs.vvvvvl" => "__builtin_ve_vl_vfnmsbs_vvvvvl",
+    "llvm.ve.vl.vfrmaxdfst.vvl" => "__builtin_ve_vl_vfrmaxdfst_vvl",
+    "llvm.ve.vl.vfrmaxdfst.vvvl" => "__builtin_ve_vl_vfrmaxdfst_vvvl",
+    "llvm.ve.vl.vfrmaxdlst.vvl" => "__builtin_ve_vl_vfrmaxdlst_vvl",
+    "llvm.ve.vl.vfrmaxdlst.vvvl" => "__builtin_ve_vl_vfrmaxdlst_vvvl",
+    "llvm.ve.vl.vfrmaxsfst.vvl" => "__builtin_ve_vl_vfrmaxsfst_vvl",
+    "llvm.ve.vl.vfrmaxsfst.vvvl" => "__builtin_ve_vl_vfrmaxsfst_vvvl",
+    "llvm.ve.vl.vfrmaxslst.vvl" => "__builtin_ve_vl_vfrmaxslst_vvl",
+    "llvm.ve.vl.vfrmaxslst.vvvl" => "__builtin_ve_vl_vfrmaxslst_vvvl",
+    "llvm.ve.vl.vfrmindfst.vvl" => "__builtin_ve_vl_vfrmindfst_vvl",
+    "llvm.ve.vl.vfrmindfst.vvvl" => "__builtin_ve_vl_vfrmindfst_vvvl",
+    "llvm.ve.vl.vfrmindlst.vvl" => "__builtin_ve_vl_vfrmindlst_vvl",
+    "llvm.ve.vl.vfrmindlst.vvvl" => "__builtin_ve_vl_vfrmindlst_vvvl",
+    "llvm.ve.vl.vfrminsfst.vvl" => "__builtin_ve_vl_vfrminsfst_vvl",
+    "llvm.ve.vl.vfrminsfst.vvvl" => "__builtin_ve_vl_vfrminsfst_vvvl",
+    "llvm.ve.vl.vfrminslst.vvl" => "__builtin_ve_vl_vfrminslst_vvl",
+    "llvm.ve.vl.vfrminslst.vvvl" => "__builtin_ve_vl_vfrminslst_vvvl",
+    "llvm.ve.vl.vfsqrtd.vvl" => "__builtin_ve_vl_vfsqrtd_vvl",
+    "llvm.ve.vl.vfsqrtd.vvvl" => "__builtin_ve_vl_vfsqrtd_vvvl",
+    "llvm.ve.vl.vfsqrts.vvl" => "__builtin_ve_vl_vfsqrts_vvl",
+    "llvm.ve.vl.vfsqrts.vvvl" => "__builtin_ve_vl_vfsqrts_vvvl",
+    "llvm.ve.vl.vfsubd.vsvl" => "__builtin_ve_vl_vfsubd_vsvl",
+    "llvm.ve.vl.vfsubd.vsvmvl" => "__builtin_ve_vl_vfsubd_vsvmvl",
+    "llvm.ve.vl.vfsubd.vsvvl" => "__builtin_ve_vl_vfsubd_vsvvl",
+    "llvm.ve.vl.vfsubd.vvvl" => "__builtin_ve_vl_vfsubd_vvvl",
+    "llvm.ve.vl.vfsubd.vvvmvl" => "__builtin_ve_vl_vfsubd_vvvmvl",
+    "llvm.ve.vl.vfsubd.vvvvl" => "__builtin_ve_vl_vfsubd_vvvvl",
+    "llvm.ve.vl.vfsubs.vsvl" => "__builtin_ve_vl_vfsubs_vsvl",
+    "llvm.ve.vl.vfsubs.vsvmvl" => "__builtin_ve_vl_vfsubs_vsvmvl",
+    "llvm.ve.vl.vfsubs.vsvvl" => "__builtin_ve_vl_vfsubs_vsvvl",
+    "llvm.ve.vl.vfsubs.vvvl" => "__builtin_ve_vl_vfsubs_vvvl",
+    "llvm.ve.vl.vfsubs.vvvmvl" => "__builtin_ve_vl_vfsubs_vvvmvl",
+    "llvm.ve.vl.vfsubs.vvvvl" => "__builtin_ve_vl_vfsubs_vvvvl",
+    "llvm.ve.vl.vfsumd.vvl" => "__builtin_ve_vl_vfsumd_vvl",
+    "llvm.ve.vl.vfsumd.vvml" => "__builtin_ve_vl_vfsumd_vvml",
+    "llvm.ve.vl.vfsums.vvl" => "__builtin_ve_vl_vfsums_vvl",
+    "llvm.ve.vl.vfsums.vvml" => "__builtin_ve_vl_vfsums_vvml",
+    "llvm.ve.vl.vgt.vvssl" => "__builtin_ve_vl_vgt_vvssl",
+    "llvm.ve.vl.vgt.vvssml" => "__builtin_ve_vl_vgt_vvssml",
+    "llvm.ve.vl.vgt.vvssmvl" => "__builtin_ve_vl_vgt_vvssmvl",
+    "llvm.ve.vl.vgt.vvssvl" => "__builtin_ve_vl_vgt_vvssvl",
+    "llvm.ve.vl.vgtlsx.vvssl" => "__builtin_ve_vl_vgtlsx_vvssl",
+    "llvm.ve.vl.vgtlsx.vvssml" => "__builtin_ve_vl_vgtlsx_vvssml",
+    "llvm.ve.vl.vgtlsx.vvssmvl" => "__builtin_ve_vl_vgtlsx_vvssmvl",
+    "llvm.ve.vl.vgtlsx.vvssvl" => "__builtin_ve_vl_vgtlsx_vvssvl",
+    "llvm.ve.vl.vgtlsxnc.vvssl" => "__builtin_ve_vl_vgtlsxnc_vvssl",
+    "llvm.ve.vl.vgtlsxnc.vvssml" => "__builtin_ve_vl_vgtlsxnc_vvssml",
+    "llvm.ve.vl.vgtlsxnc.vvssmvl" => "__builtin_ve_vl_vgtlsxnc_vvssmvl",
+    "llvm.ve.vl.vgtlsxnc.vvssvl" => "__builtin_ve_vl_vgtlsxnc_vvssvl",
+    "llvm.ve.vl.vgtlzx.vvssl" => "__builtin_ve_vl_vgtlzx_vvssl",
+    "llvm.ve.vl.vgtlzx.vvssml" => "__builtin_ve_vl_vgtlzx_vvssml",
+    "llvm.ve.vl.vgtlzx.vvssmvl" => "__builtin_ve_vl_vgtlzx_vvssmvl",
+    "llvm.ve.vl.vgtlzx.vvssvl" => "__builtin_ve_vl_vgtlzx_vvssvl",
+    "llvm.ve.vl.vgtlzxnc.vvssl" => "__builtin_ve_vl_vgtlzxnc_vvssl",
+    "llvm.ve.vl.vgtlzxnc.vvssml" => "__builtin_ve_vl_vgtlzxnc_vvssml",
+    "llvm.ve.vl.vgtlzxnc.vvssmvl" => "__builtin_ve_vl_vgtlzxnc_vvssmvl",
+    "llvm.ve.vl.vgtlzxnc.vvssvl" => "__builtin_ve_vl_vgtlzxnc_vvssvl",
+    "llvm.ve.vl.vgtnc.vvssl" => "__builtin_ve_vl_vgtnc_vvssl",
+    "llvm.ve.vl.vgtnc.vvssml" => "__builtin_ve_vl_vgtnc_vvssml",
+    "llvm.ve.vl.vgtnc.vvssmvl" => "__builtin_ve_vl_vgtnc_vvssmvl",
+    "llvm.ve.vl.vgtnc.vvssvl" => "__builtin_ve_vl_vgtnc_vvssvl",
+    "llvm.ve.vl.vgtu.vvssl" => "__builtin_ve_vl_vgtu_vvssl",
+    "llvm.ve.vl.vgtu.vvssml" => "__builtin_ve_vl_vgtu_vvssml",
+    "llvm.ve.vl.vgtu.vvssmvl" => "__builtin_ve_vl_vgtu_vvssmvl",
+    "llvm.ve.vl.vgtu.vvssvl" => "__builtin_ve_vl_vgtu_vvssvl",
+    "llvm.ve.vl.vgtunc.vvssl" => "__builtin_ve_vl_vgtunc_vvssl",
+    "llvm.ve.vl.vgtunc.vvssml" => "__builtin_ve_vl_vgtunc_vvssml",
+    "llvm.ve.vl.vgtunc.vvssmvl" => "__builtin_ve_vl_vgtunc_vvssmvl",
+    "llvm.ve.vl.vgtunc.vvssvl" => "__builtin_ve_vl_vgtunc_vvssvl",
+    "llvm.ve.vl.vld.vssl" => "__builtin_ve_vl_vld_vssl",
+    "llvm.ve.vl.vld.vssvl" => "__builtin_ve_vl_vld_vssvl",
+    "llvm.ve.vl.vld2d.vssl" => "__builtin_ve_vl_vld2d_vssl",
+    "llvm.ve.vl.vld2d.vssvl" => "__builtin_ve_vl_vld2d_vssvl",
+    "llvm.ve.vl.vld2dnc.vssl" => "__builtin_ve_vl_vld2dnc_vssl",
+    "llvm.ve.vl.vld2dnc.vssvl" => "__builtin_ve_vl_vld2dnc_vssvl",
+    "llvm.ve.vl.vldl2dsx.vssl" => "__builtin_ve_vl_vldl2dsx_vssl",
+    "llvm.ve.vl.vldl2dsx.vssvl" => "__builtin_ve_vl_vldl2dsx_vssvl",
+    "llvm.ve.vl.vldl2dsxnc.vssl" => "__builtin_ve_vl_vldl2dsxnc_vssl",
+    "llvm.ve.vl.vldl2dsxnc.vssvl" => "__builtin_ve_vl_vldl2dsxnc_vssvl",
+    "llvm.ve.vl.vldl2dzx.vssl" => "__builtin_ve_vl_vldl2dzx_vssl",
+    "llvm.ve.vl.vldl2dzx.vssvl" => "__builtin_ve_vl_vldl2dzx_vssvl",
+    "llvm.ve.vl.vldl2dzxnc.vssl" => "__builtin_ve_vl_vldl2dzxnc_vssl",
+    "llvm.ve.vl.vldl2dzxnc.vssvl" => "__builtin_ve_vl_vldl2dzxnc_vssvl",
+    "llvm.ve.vl.vldlsx.vssl" => "__builtin_ve_vl_vldlsx_vssl",
+    "llvm.ve.vl.vldlsx.vssvl" => "__builtin_ve_vl_vldlsx_vssvl",
+    "llvm.ve.vl.vldlsxnc.vssl" => "__builtin_ve_vl_vldlsxnc_vssl",
+    "llvm.ve.vl.vldlsxnc.vssvl" => "__builtin_ve_vl_vldlsxnc_vssvl",
+    "llvm.ve.vl.vldlzx.vssl" => "__builtin_ve_vl_vldlzx_vssl",
+    "llvm.ve.vl.vldlzx.vssvl" => "__builtin_ve_vl_vldlzx_vssvl",
+    "llvm.ve.vl.vldlzxnc.vssl" => "__builtin_ve_vl_vldlzxnc_vssl",
+    "llvm.ve.vl.vldlzxnc.vssvl" => "__builtin_ve_vl_vldlzxnc_vssvl",
+    "llvm.ve.vl.vldnc.vssl" => "__builtin_ve_vl_vldnc_vssl",
+    "llvm.ve.vl.vldnc.vssvl" => "__builtin_ve_vl_vldnc_vssvl",
+    "llvm.ve.vl.vldu.vssl" => "__builtin_ve_vl_vldu_vssl",
+    "llvm.ve.vl.vldu.vssvl" => "__builtin_ve_vl_vldu_vssvl",
+    "llvm.ve.vl.vldu2d.vssl" => "__builtin_ve_vl_vldu2d_vssl",
+    "llvm.ve.vl.vldu2d.vssvl" => "__builtin_ve_vl_vldu2d_vssvl",
+    "llvm.ve.vl.vldu2dnc.vssl" => "__builtin_ve_vl_vldu2dnc_vssl",
+    "llvm.ve.vl.vldu2dnc.vssvl" => "__builtin_ve_vl_vldu2dnc_vssvl",
+    "llvm.ve.vl.vldunc.vssl" => "__builtin_ve_vl_vldunc_vssl",
+    "llvm.ve.vl.vldunc.vssvl" => "__builtin_ve_vl_vldunc_vssvl",
+    "llvm.ve.vl.vldz.vvl" => "__builtin_ve_vl_vldz_vvl",
+    "llvm.ve.vl.vldz.vvmvl" => "__builtin_ve_vl_vldz_vvmvl",
+    "llvm.ve.vl.vldz.vvvl" => "__builtin_ve_vl_vldz_vvvl",
+    "llvm.ve.vl.vmaxsl.vsvl" => "__builtin_ve_vl_vmaxsl_vsvl",
+    "llvm.ve.vl.vmaxsl.vsvmvl" => "__builtin_ve_vl_vmaxsl_vsvmvl",
+    "llvm.ve.vl.vmaxsl.vsvvl" => "__builtin_ve_vl_vmaxsl_vsvvl",
+    "llvm.ve.vl.vmaxsl.vvvl" => "__builtin_ve_vl_vmaxsl_vvvl",
+    "llvm.ve.vl.vmaxsl.vvvmvl" => "__builtin_ve_vl_vmaxsl_vvvmvl",
+    "llvm.ve.vl.vmaxsl.vvvvl" => "__builtin_ve_vl_vmaxsl_vvvvl",
+    "llvm.ve.vl.vmaxswsx.vsvl" => "__builtin_ve_vl_vmaxswsx_vsvl",
+    "llvm.ve.vl.vmaxswsx.vsvmvl" => "__builtin_ve_vl_vmaxswsx_vsvmvl",
+    "llvm.ve.vl.vmaxswsx.vsvvl" => "__builtin_ve_vl_vmaxswsx_vsvvl",
+    "llvm.ve.vl.vmaxswsx.vvvl" => "__builtin_ve_vl_vmaxswsx_vvvl",
+    "llvm.ve.vl.vmaxswsx.vvvmvl" => "__builtin_ve_vl_vmaxswsx_vvvmvl",
+    "llvm.ve.vl.vmaxswsx.vvvvl" => "__builtin_ve_vl_vmaxswsx_vvvvl",
+    "llvm.ve.vl.vmaxswzx.vsvl" => "__builtin_ve_vl_vmaxswzx_vsvl",
+    "llvm.ve.vl.vmaxswzx.vsvmvl" => "__builtin_ve_vl_vmaxswzx_vsvmvl",
+    "llvm.ve.vl.vmaxswzx.vsvvl" => "__builtin_ve_vl_vmaxswzx_vsvvl",
+    "llvm.ve.vl.vmaxswzx.vvvl" => "__builtin_ve_vl_vmaxswzx_vvvl",
+    "llvm.ve.vl.vmaxswzx.vvvmvl" => "__builtin_ve_vl_vmaxswzx_vvvmvl",
+    "llvm.ve.vl.vmaxswzx.vvvvl" => "__builtin_ve_vl_vmaxswzx_vvvvl",
+    "llvm.ve.vl.vminsl.vsvl" => "__builtin_ve_vl_vminsl_vsvl",
+    "llvm.ve.vl.vminsl.vsvmvl" => "__builtin_ve_vl_vminsl_vsvmvl",
+    "llvm.ve.vl.vminsl.vsvvl" => "__builtin_ve_vl_vminsl_vsvvl",
+    "llvm.ve.vl.vminsl.vvvl" => "__builtin_ve_vl_vminsl_vvvl",
+    "llvm.ve.vl.vminsl.vvvmvl" => "__builtin_ve_vl_vminsl_vvvmvl",
+    "llvm.ve.vl.vminsl.vvvvl" => "__builtin_ve_vl_vminsl_vvvvl",
+    "llvm.ve.vl.vminswsx.vsvl" => "__builtin_ve_vl_vminswsx_vsvl",
+    "llvm.ve.vl.vminswsx.vsvmvl" => "__builtin_ve_vl_vminswsx_vsvmvl",
+    "llvm.ve.vl.vminswsx.vsvvl" => "__builtin_ve_vl_vminswsx_vsvvl",
+    "llvm.ve.vl.vminswsx.vvvl" => "__builtin_ve_vl_vminswsx_vvvl",
+    "llvm.ve.vl.vminswsx.vvvmvl" => "__builtin_ve_vl_vminswsx_vvvmvl",
+    "llvm.ve.vl.vminswsx.vvvvl" => "__builtin_ve_vl_vminswsx_vvvvl",
+    "llvm.ve.vl.vminswzx.vsvl" => "__builtin_ve_vl_vminswzx_vsvl",
+    "llvm.ve.vl.vminswzx.vsvmvl" => "__builtin_ve_vl_vminswzx_vsvmvl",
+    "llvm.ve.vl.vminswzx.vsvvl" => "__builtin_ve_vl_vminswzx_vsvvl",
+    "llvm.ve.vl.vminswzx.vvvl" => "__builtin_ve_vl_vminswzx_vvvl",
+    "llvm.ve.vl.vminswzx.vvvmvl" => "__builtin_ve_vl_vminswzx_vvvmvl",
+    "llvm.ve.vl.vminswzx.vvvvl" => "__builtin_ve_vl_vminswzx_vvvvl",
+    "llvm.ve.vl.vmrg.vsvml" => "__builtin_ve_vl_vmrg_vsvml",
+    "llvm.ve.vl.vmrg.vsvmvl" => "__builtin_ve_vl_vmrg_vsvmvl",
+    "llvm.ve.vl.vmrg.vvvml" => "__builtin_ve_vl_vmrg_vvvml",
+    "llvm.ve.vl.vmrg.vvvmvl" => "__builtin_ve_vl_vmrg_vvvmvl",
+    "llvm.ve.vl.vmrgw.vsvMl" => "__builtin_ve_vl_vmrgw_vsvMl",
+    "llvm.ve.vl.vmrgw.vsvMvl" => "__builtin_ve_vl_vmrgw_vsvMvl",
+    "llvm.ve.vl.vmrgw.vvvMl" => "__builtin_ve_vl_vmrgw_vvvMl",
+    "llvm.ve.vl.vmrgw.vvvMvl" => "__builtin_ve_vl_vmrgw_vvvMvl",
+    "llvm.ve.vl.vmulsl.vsvl" => "__builtin_ve_vl_vmulsl_vsvl",
+    "llvm.ve.vl.vmulsl.vsvmvl" => "__builtin_ve_vl_vmulsl_vsvmvl",
+    "llvm.ve.vl.vmulsl.vsvvl" => "__builtin_ve_vl_vmulsl_vsvvl",
+    "llvm.ve.vl.vmulsl.vvvl" => "__builtin_ve_vl_vmulsl_vvvl",
+    "llvm.ve.vl.vmulsl.vvvmvl" => "__builtin_ve_vl_vmulsl_vvvmvl",
+    "llvm.ve.vl.vmulsl.vvvvl" => "__builtin_ve_vl_vmulsl_vvvvl",
+    "llvm.ve.vl.vmulslw.vsvl" => "__builtin_ve_vl_vmulslw_vsvl",
+    "llvm.ve.vl.vmulslw.vsvvl" => "__builtin_ve_vl_vmulslw_vsvvl",
+    "llvm.ve.vl.vmulslw.vvvl" => "__builtin_ve_vl_vmulslw_vvvl",
+    "llvm.ve.vl.vmulslw.vvvvl" => "__builtin_ve_vl_vmulslw_vvvvl",
+    "llvm.ve.vl.vmulswsx.vsvl" => "__builtin_ve_vl_vmulswsx_vsvl",
+    "llvm.ve.vl.vmulswsx.vsvmvl" => "__builtin_ve_vl_vmulswsx_vsvmvl",
+    "llvm.ve.vl.vmulswsx.vsvvl" => "__builtin_ve_vl_vmulswsx_vsvvl",
+    "llvm.ve.vl.vmulswsx.vvvl" => "__builtin_ve_vl_vmulswsx_vvvl",
+    "llvm.ve.vl.vmulswsx.vvvmvl" => "__builtin_ve_vl_vmulswsx_vvvmvl",
+    "llvm.ve.vl.vmulswsx.vvvvl" => "__builtin_ve_vl_vmulswsx_vvvvl",
+    "llvm.ve.vl.vmulswzx.vsvl" => "__builtin_ve_vl_vmulswzx_vsvl",
+    "llvm.ve.vl.vmulswzx.vsvmvl" => "__builtin_ve_vl_vmulswzx_vsvmvl",
+    "llvm.ve.vl.vmulswzx.vsvvl" => "__builtin_ve_vl_vmulswzx_vsvvl",
+    "llvm.ve.vl.vmulswzx.vvvl" => "__builtin_ve_vl_vmulswzx_vvvl",
+    "llvm.ve.vl.vmulswzx.vvvmvl" => "__builtin_ve_vl_vmulswzx_vvvmvl",
+    "llvm.ve.vl.vmulswzx.vvvvl" => "__builtin_ve_vl_vmulswzx_vvvvl",
+    "llvm.ve.vl.vmulul.vsvl" => "__builtin_ve_vl_vmulul_vsvl",
+    "llvm.ve.vl.vmulul.vsvmvl" => "__builtin_ve_vl_vmulul_vsvmvl",
+    "llvm.ve.vl.vmulul.vsvvl" => "__builtin_ve_vl_vmulul_vsvvl",
+    "llvm.ve.vl.vmulul.vvvl" => "__builtin_ve_vl_vmulul_vvvl",
+    "llvm.ve.vl.vmulul.vvvmvl" => "__builtin_ve_vl_vmulul_vvvmvl",
+    "llvm.ve.vl.vmulul.vvvvl" => "__builtin_ve_vl_vmulul_vvvvl",
+    "llvm.ve.vl.vmuluw.vsvl" => "__builtin_ve_vl_vmuluw_vsvl",
+    "llvm.ve.vl.vmuluw.vsvmvl" => "__builtin_ve_vl_vmuluw_vsvmvl",
+    "llvm.ve.vl.vmuluw.vsvvl" => "__builtin_ve_vl_vmuluw_vsvvl",
+    "llvm.ve.vl.vmuluw.vvvl" => "__builtin_ve_vl_vmuluw_vvvl",
+    "llvm.ve.vl.vmuluw.vvvmvl" => "__builtin_ve_vl_vmuluw_vvvmvl",
+    "llvm.ve.vl.vmuluw.vvvvl" => "__builtin_ve_vl_vmuluw_vvvvl",
+    "llvm.ve.vl.vmv.vsvl" => "__builtin_ve_vl_vmv_vsvl",
+    "llvm.ve.vl.vmv.vsvmvl" => "__builtin_ve_vl_vmv_vsvmvl",
+    "llvm.ve.vl.vmv.vsvvl" => "__builtin_ve_vl_vmv_vsvvl",
+    "llvm.ve.vl.vor.vsvl" => "__builtin_ve_vl_vor_vsvl",
+    "llvm.ve.vl.vor.vsvmvl" => "__builtin_ve_vl_vor_vsvmvl",
+    "llvm.ve.vl.vor.vsvvl" => "__builtin_ve_vl_vor_vsvvl",
+    "llvm.ve.vl.vor.vvvl" => "__builtin_ve_vl_vor_vvvl",
+    "llvm.ve.vl.vor.vvvmvl" => "__builtin_ve_vl_vor_vvvmvl",
+    "llvm.ve.vl.vor.vvvvl" => "__builtin_ve_vl_vor_vvvvl",
+    "llvm.ve.vl.vpcnt.vvl" => "__builtin_ve_vl_vpcnt_vvl",
+    "llvm.ve.vl.vpcnt.vvmvl" => "__builtin_ve_vl_vpcnt_vvmvl",
+    "llvm.ve.vl.vpcnt.vvvl" => "__builtin_ve_vl_vpcnt_vvvl",
+    "llvm.ve.vl.vrand.vvl" => "__builtin_ve_vl_vrand_vvl",
+    "llvm.ve.vl.vrand.vvml" => "__builtin_ve_vl_vrand_vvml",
+    "llvm.ve.vl.vrcpd.vvl" => "__builtin_ve_vl_vrcpd_vvl",
+    "llvm.ve.vl.vrcpd.vvvl" => "__builtin_ve_vl_vrcpd_vvvl",
+    "llvm.ve.vl.vrcps.vvl" => "__builtin_ve_vl_vrcps_vvl",
+    "llvm.ve.vl.vrcps.vvvl" => "__builtin_ve_vl_vrcps_vvvl",
+    "llvm.ve.vl.vrmaxslfst.vvl" => "__builtin_ve_vl_vrmaxslfst_vvl",
+    "llvm.ve.vl.vrmaxslfst.vvvl" => "__builtin_ve_vl_vrmaxslfst_vvvl",
+    "llvm.ve.vl.vrmaxsllst.vvl" => "__builtin_ve_vl_vrmaxsllst_vvl",
+    "llvm.ve.vl.vrmaxsllst.vvvl" => "__builtin_ve_vl_vrmaxsllst_vvvl",
+    "llvm.ve.vl.vrmaxswfstsx.vvl" => "__builtin_ve_vl_vrmaxswfstsx_vvl",
+    "llvm.ve.vl.vrmaxswfstsx.vvvl" => "__builtin_ve_vl_vrmaxswfstsx_vvvl",
+    "llvm.ve.vl.vrmaxswfstzx.vvl" => "__builtin_ve_vl_vrmaxswfstzx_vvl",
+    "llvm.ve.vl.vrmaxswfstzx.vvvl" => "__builtin_ve_vl_vrmaxswfstzx_vvvl",
+    "llvm.ve.vl.vrmaxswlstsx.vvl" => "__builtin_ve_vl_vrmaxswlstsx_vvl",
+    "llvm.ve.vl.vrmaxswlstsx.vvvl" => "__builtin_ve_vl_vrmaxswlstsx_vvvl",
+    "llvm.ve.vl.vrmaxswlstzx.vvl" => "__builtin_ve_vl_vrmaxswlstzx_vvl",
+    "llvm.ve.vl.vrmaxswlstzx.vvvl" => "__builtin_ve_vl_vrmaxswlstzx_vvvl",
+    "llvm.ve.vl.vrminslfst.vvl" => "__builtin_ve_vl_vrminslfst_vvl",
+    "llvm.ve.vl.vrminslfst.vvvl" => "__builtin_ve_vl_vrminslfst_vvvl",
+    "llvm.ve.vl.vrminsllst.vvl" => "__builtin_ve_vl_vrminsllst_vvl",
+    "llvm.ve.vl.vrminsllst.vvvl" => "__builtin_ve_vl_vrminsllst_vvvl",
+    "llvm.ve.vl.vrminswfstsx.vvl" => "__builtin_ve_vl_vrminswfstsx_vvl",
+    "llvm.ve.vl.vrminswfstsx.vvvl" => "__builtin_ve_vl_vrminswfstsx_vvvl",
+    "llvm.ve.vl.vrminswfstzx.vvl" => "__builtin_ve_vl_vrminswfstzx_vvl",
+    "llvm.ve.vl.vrminswfstzx.vvvl" => "__builtin_ve_vl_vrminswfstzx_vvvl",
+    "llvm.ve.vl.vrminswlstsx.vvl" => "__builtin_ve_vl_vrminswlstsx_vvl",
+    "llvm.ve.vl.vrminswlstsx.vvvl" => "__builtin_ve_vl_vrminswlstsx_vvvl",
+    "llvm.ve.vl.vrminswlstzx.vvl" => "__builtin_ve_vl_vrminswlstzx_vvl",
+    "llvm.ve.vl.vrminswlstzx.vvvl" => "__builtin_ve_vl_vrminswlstzx_vvvl",
+    "llvm.ve.vl.vror.vvl" => "__builtin_ve_vl_vror_vvl",
+    "llvm.ve.vl.vror.vvml" => "__builtin_ve_vl_vror_vvml",
+    "llvm.ve.vl.vrsqrtd.vvl" => "__builtin_ve_vl_vrsqrtd_vvl",
+    "llvm.ve.vl.vrsqrtd.vvvl" => "__builtin_ve_vl_vrsqrtd_vvvl",
+    "llvm.ve.vl.vrsqrtdnex.vvl" => "__builtin_ve_vl_vrsqrtdnex_vvl",
+    "llvm.ve.vl.vrsqrtdnex.vvvl" => "__builtin_ve_vl_vrsqrtdnex_vvvl",
+    "llvm.ve.vl.vrsqrts.vvl" => "__builtin_ve_vl_vrsqrts_vvl",
+    "llvm.ve.vl.vrsqrts.vvvl" => "__builtin_ve_vl_vrsqrts_vvvl",
+    "llvm.ve.vl.vrsqrtsnex.vvl" => "__builtin_ve_vl_vrsqrtsnex_vvl",
+    "llvm.ve.vl.vrsqrtsnex.vvvl" => "__builtin_ve_vl_vrsqrtsnex_vvvl",
+    "llvm.ve.vl.vrxor.vvl" => "__builtin_ve_vl_vrxor_vvl",
+    "llvm.ve.vl.vrxor.vvml" => "__builtin_ve_vl_vrxor_vvml",
+    "llvm.ve.vl.vsc.vvssl" => "__builtin_ve_vl_vsc_vvssl",
+    "llvm.ve.vl.vsc.vvssml" => "__builtin_ve_vl_vsc_vvssml",
+    "llvm.ve.vl.vscl.vvssl" => "__builtin_ve_vl_vscl_vvssl",
+    "llvm.ve.vl.vscl.vvssml" => "__builtin_ve_vl_vscl_vvssml",
+    "llvm.ve.vl.vsclnc.vvssl" => "__builtin_ve_vl_vsclnc_vvssl",
+    "llvm.ve.vl.vsclnc.vvssml" => "__builtin_ve_vl_vsclnc_vvssml",
+    "llvm.ve.vl.vsclncot.vvssl" => "__builtin_ve_vl_vsclncot_vvssl",
+    "llvm.ve.vl.vsclncot.vvssml" => "__builtin_ve_vl_vsclncot_vvssml",
+    "llvm.ve.vl.vsclot.vvssl" => "__builtin_ve_vl_vsclot_vvssl",
+    "llvm.ve.vl.vsclot.vvssml" => "__builtin_ve_vl_vsclot_vvssml",
+    "llvm.ve.vl.vscnc.vvssl" => "__builtin_ve_vl_vscnc_vvssl",
+    "llvm.ve.vl.vscnc.vvssml" => "__builtin_ve_vl_vscnc_vvssml",
+    "llvm.ve.vl.vscncot.vvssl" => "__builtin_ve_vl_vscncot_vvssl",
+    "llvm.ve.vl.vscncot.vvssml" => "__builtin_ve_vl_vscncot_vvssml",
+    "llvm.ve.vl.vscot.vvssl" => "__builtin_ve_vl_vscot_vvssl",
+    "llvm.ve.vl.vscot.vvssml" => "__builtin_ve_vl_vscot_vvssml",
+    "llvm.ve.vl.vscu.vvssl" => "__builtin_ve_vl_vscu_vvssl",
+    "llvm.ve.vl.vscu.vvssml" => "__builtin_ve_vl_vscu_vvssml",
+    "llvm.ve.vl.vscunc.vvssl" => "__builtin_ve_vl_vscunc_vvssl",
+    "llvm.ve.vl.vscunc.vvssml" => "__builtin_ve_vl_vscunc_vvssml",
+    "llvm.ve.vl.vscuncot.vvssl" => "__builtin_ve_vl_vscuncot_vvssl",
+    "llvm.ve.vl.vscuncot.vvssml" => "__builtin_ve_vl_vscuncot_vvssml",
+    "llvm.ve.vl.vscuot.vvssl" => "__builtin_ve_vl_vscuot_vvssl",
+    "llvm.ve.vl.vscuot.vvssml" => "__builtin_ve_vl_vscuot_vvssml",
+    "llvm.ve.vl.vseq.vl" => "__builtin_ve_vl_vseq_vl",
+    "llvm.ve.vl.vseq.vvl" => "__builtin_ve_vl_vseq_vvl",
+    "llvm.ve.vl.vsfa.vvssl" => "__builtin_ve_vl_vsfa_vvssl",
+    "llvm.ve.vl.vsfa.vvssmvl" => "__builtin_ve_vl_vsfa_vvssmvl",
+    "llvm.ve.vl.vsfa.vvssvl" => "__builtin_ve_vl_vsfa_vvssvl",
+    "llvm.ve.vl.vshf.vvvsl" => "__builtin_ve_vl_vshf_vvvsl",
+    "llvm.ve.vl.vshf.vvvsvl" => "__builtin_ve_vl_vshf_vvvsvl",
+    "llvm.ve.vl.vslal.vvsl" => "__builtin_ve_vl_vslal_vvsl",
+    "llvm.ve.vl.vslal.vvsmvl" => "__builtin_ve_vl_vslal_vvsmvl",
+    "llvm.ve.vl.vslal.vvsvl" => "__builtin_ve_vl_vslal_vvsvl",
+    "llvm.ve.vl.vslal.vvvl" => "__builtin_ve_vl_vslal_vvvl",
+    "llvm.ve.vl.vslal.vvvmvl" => "__builtin_ve_vl_vslal_vvvmvl",
+    "llvm.ve.vl.vslal.vvvvl" => "__builtin_ve_vl_vslal_vvvvl",
+    "llvm.ve.vl.vslawsx.vvsl" => "__builtin_ve_vl_vslawsx_vvsl",
+    "llvm.ve.vl.vslawsx.vvsmvl" => "__builtin_ve_vl_vslawsx_vvsmvl",
+    "llvm.ve.vl.vslawsx.vvsvl" => "__builtin_ve_vl_vslawsx_vvsvl",
+    "llvm.ve.vl.vslawsx.vvvl" => "__builtin_ve_vl_vslawsx_vvvl",
+    "llvm.ve.vl.vslawsx.vvvmvl" => "__builtin_ve_vl_vslawsx_vvvmvl",
+    "llvm.ve.vl.vslawsx.vvvvl" => "__builtin_ve_vl_vslawsx_vvvvl",
+    "llvm.ve.vl.vslawzx.vvsl" => "__builtin_ve_vl_vslawzx_vvsl",
+    "llvm.ve.vl.vslawzx.vvsmvl" => "__builtin_ve_vl_vslawzx_vvsmvl",
+    "llvm.ve.vl.vslawzx.vvsvl" => "__builtin_ve_vl_vslawzx_vvsvl",
+    "llvm.ve.vl.vslawzx.vvvl" => "__builtin_ve_vl_vslawzx_vvvl",
+    "llvm.ve.vl.vslawzx.vvvmvl" => "__builtin_ve_vl_vslawzx_vvvmvl",
+    "llvm.ve.vl.vslawzx.vvvvl" => "__builtin_ve_vl_vslawzx_vvvvl",
+    "llvm.ve.vl.vsll.vvsl" => "__builtin_ve_vl_vsll_vvsl",
+    "llvm.ve.vl.vsll.vvsmvl" => "__builtin_ve_vl_vsll_vvsmvl",
+    "llvm.ve.vl.vsll.vvsvl" => "__builtin_ve_vl_vsll_vvsvl",
+    "llvm.ve.vl.vsll.vvvl" => "__builtin_ve_vl_vsll_vvvl",
+    "llvm.ve.vl.vsll.vvvmvl" => "__builtin_ve_vl_vsll_vvvmvl",
+    "llvm.ve.vl.vsll.vvvvl" => "__builtin_ve_vl_vsll_vvvvl",
+    "llvm.ve.vl.vsral.vvsl" => "__builtin_ve_vl_vsral_vvsl",
+    "llvm.ve.vl.vsral.vvsmvl" => "__builtin_ve_vl_vsral_vvsmvl",
+    "llvm.ve.vl.vsral.vvsvl" => "__builtin_ve_vl_vsral_vvsvl",
+    "llvm.ve.vl.vsral.vvvl" => "__builtin_ve_vl_vsral_vvvl",
+    "llvm.ve.vl.vsral.vvvmvl" => "__builtin_ve_vl_vsral_vvvmvl",
+    "llvm.ve.vl.vsral.vvvvl" => "__builtin_ve_vl_vsral_vvvvl",
+    "llvm.ve.vl.vsrawsx.vvsl" => "__builtin_ve_vl_vsrawsx_vvsl",
+    "llvm.ve.vl.vsrawsx.vvsmvl" => "__builtin_ve_vl_vsrawsx_vvsmvl",
+    "llvm.ve.vl.vsrawsx.vvsvl" => "__builtin_ve_vl_vsrawsx_vvsvl",
+    "llvm.ve.vl.vsrawsx.vvvl" => "__builtin_ve_vl_vsrawsx_vvvl",
+    "llvm.ve.vl.vsrawsx.vvvmvl" => "__builtin_ve_vl_vsrawsx_vvvmvl",
+    "llvm.ve.vl.vsrawsx.vvvvl" => "__builtin_ve_vl_vsrawsx_vvvvl",
+    "llvm.ve.vl.vsrawzx.vvsl" => "__builtin_ve_vl_vsrawzx_vvsl",
+    "llvm.ve.vl.vsrawzx.vvsmvl" => "__builtin_ve_vl_vsrawzx_vvsmvl",
+    "llvm.ve.vl.vsrawzx.vvsvl" => "__builtin_ve_vl_vsrawzx_vvsvl",
+    "llvm.ve.vl.vsrawzx.vvvl" => "__builtin_ve_vl_vsrawzx_vvvl",
+    "llvm.ve.vl.vsrawzx.vvvmvl" => "__builtin_ve_vl_vsrawzx_vvvmvl",
+    "llvm.ve.vl.vsrawzx.vvvvl" => "__builtin_ve_vl_vsrawzx_vvvvl",
+    "llvm.ve.vl.vsrl.vvsl" => "__builtin_ve_vl_vsrl_vvsl",
+    "llvm.ve.vl.vsrl.vvsmvl" => "__builtin_ve_vl_vsrl_vvsmvl",
+    "llvm.ve.vl.vsrl.vvsvl" => "__builtin_ve_vl_vsrl_vvsvl",
+    "llvm.ve.vl.vsrl.vvvl" => "__builtin_ve_vl_vsrl_vvvl",
+    "llvm.ve.vl.vsrl.vvvmvl" => "__builtin_ve_vl_vsrl_vvvmvl",
+    "llvm.ve.vl.vsrl.vvvvl" => "__builtin_ve_vl_vsrl_vvvvl",
+    "llvm.ve.vl.vst.vssl" => "__builtin_ve_vl_vst_vssl",
+    "llvm.ve.vl.vst.vssml" => "__builtin_ve_vl_vst_vssml",
+    "llvm.ve.vl.vst2d.vssl" => "__builtin_ve_vl_vst2d_vssl",
+    "llvm.ve.vl.vst2d.vssml" => "__builtin_ve_vl_vst2d_vssml",
+    "llvm.ve.vl.vst2dnc.vssl" => "__builtin_ve_vl_vst2dnc_vssl",
+    "llvm.ve.vl.vst2dnc.vssml" => "__builtin_ve_vl_vst2dnc_vssml",
+    "llvm.ve.vl.vst2dncot.vssl" => "__builtin_ve_vl_vst2dncot_vssl",
+    "llvm.ve.vl.vst2dncot.vssml" => "__builtin_ve_vl_vst2dncot_vssml",
+    "llvm.ve.vl.vst2dot.vssl" => "__builtin_ve_vl_vst2dot_vssl",
+    "llvm.ve.vl.vst2dot.vssml" => "__builtin_ve_vl_vst2dot_vssml",
+    "llvm.ve.vl.vstl.vssl" => "__builtin_ve_vl_vstl_vssl",
+    "llvm.ve.vl.vstl.vssml" => "__builtin_ve_vl_vstl_vssml",
+    "llvm.ve.vl.vstl2d.vssl" => "__builtin_ve_vl_vstl2d_vssl",
+    "llvm.ve.vl.vstl2d.vssml" => "__builtin_ve_vl_vstl2d_vssml",
+    "llvm.ve.vl.vstl2dnc.vssl" => "__builtin_ve_vl_vstl2dnc_vssl",
+    "llvm.ve.vl.vstl2dnc.vssml" => "__builtin_ve_vl_vstl2dnc_vssml",
+    "llvm.ve.vl.vstl2dncot.vssl" => "__builtin_ve_vl_vstl2dncot_vssl",
+    "llvm.ve.vl.vstl2dncot.vssml" => "__builtin_ve_vl_vstl2dncot_vssml",
+    "llvm.ve.vl.vstl2dot.vssl" => "__builtin_ve_vl_vstl2dot_vssl",
+    "llvm.ve.vl.vstl2dot.vssml" => "__builtin_ve_vl_vstl2dot_vssml",
+    "llvm.ve.vl.vstlnc.vssl" => "__builtin_ve_vl_vstlnc_vssl",
+    "llvm.ve.vl.vstlnc.vssml" => "__builtin_ve_vl_vstlnc_vssml",
+    "llvm.ve.vl.vstlncot.vssl" => "__builtin_ve_vl_vstlncot_vssl",
+    "llvm.ve.vl.vstlncot.vssml" => "__builtin_ve_vl_vstlncot_vssml",
+    "llvm.ve.vl.vstlot.vssl" => "__builtin_ve_vl_vstlot_vssl",
+    "llvm.ve.vl.vstlot.vssml" => "__builtin_ve_vl_vstlot_vssml",
+    "llvm.ve.vl.vstnc.vssl" => "__builtin_ve_vl_vstnc_vssl",
+    "llvm.ve.vl.vstnc.vssml" => "__builtin_ve_vl_vstnc_vssml",
+    "llvm.ve.vl.vstncot.vssl" => "__builtin_ve_vl_vstncot_vssl",
+    "llvm.ve.vl.vstncot.vssml" => "__builtin_ve_vl_vstncot_vssml",
+    "llvm.ve.vl.vstot.vssl" => "__builtin_ve_vl_vstot_vssl",
+    "llvm.ve.vl.vstot.vssml" => "__builtin_ve_vl_vstot_vssml",
+    "llvm.ve.vl.vstu.vssl" => "__builtin_ve_vl_vstu_vssl",
+    "llvm.ve.vl.vstu.vssml" => "__builtin_ve_vl_vstu_vssml",
+    "llvm.ve.vl.vstu2d.vssl" => "__builtin_ve_vl_vstu2d_vssl",
+    "llvm.ve.vl.vstu2d.vssml" => "__builtin_ve_vl_vstu2d_vssml",
+    "llvm.ve.vl.vstu2dnc.vssl" => "__builtin_ve_vl_vstu2dnc_vssl",
+    "llvm.ve.vl.vstu2dnc.vssml" => "__builtin_ve_vl_vstu2dnc_vssml",
+    "llvm.ve.vl.vstu2dncot.vssl" => "__builtin_ve_vl_vstu2dncot_vssl",
+    "llvm.ve.vl.vstu2dncot.vssml" => "__builtin_ve_vl_vstu2dncot_vssml",
+    "llvm.ve.vl.vstu2dot.vssl" => "__builtin_ve_vl_vstu2dot_vssl",
+    "llvm.ve.vl.vstu2dot.vssml" => "__builtin_ve_vl_vstu2dot_vssml",
+    "llvm.ve.vl.vstunc.vssl" => "__builtin_ve_vl_vstunc_vssl",
+    "llvm.ve.vl.vstunc.vssml" => "__builtin_ve_vl_vstunc_vssml",
+    "llvm.ve.vl.vstuncot.vssl" => "__builtin_ve_vl_vstuncot_vssl",
+    "llvm.ve.vl.vstuncot.vssml" => "__builtin_ve_vl_vstuncot_vssml",
+    "llvm.ve.vl.vstuot.vssl" => "__builtin_ve_vl_vstuot_vssl",
+    "llvm.ve.vl.vstuot.vssml" => "__builtin_ve_vl_vstuot_vssml",
+    "llvm.ve.vl.vsubsl.vsvl" => "__builtin_ve_vl_vsubsl_vsvl",
+    "llvm.ve.vl.vsubsl.vsvmvl" => "__builtin_ve_vl_vsubsl_vsvmvl",
+    "llvm.ve.vl.vsubsl.vsvvl" => "__builtin_ve_vl_vsubsl_vsvvl",
+    "llvm.ve.vl.vsubsl.vvvl" => "__builtin_ve_vl_vsubsl_vvvl",
+    "llvm.ve.vl.vsubsl.vvvmvl" => "__builtin_ve_vl_vsubsl_vvvmvl",
+    "llvm.ve.vl.vsubsl.vvvvl" => "__builtin_ve_vl_vsubsl_vvvvl",
+    "llvm.ve.vl.vsubswsx.vsvl" => "__builtin_ve_vl_vsubswsx_vsvl",
+    "llvm.ve.vl.vsubswsx.vsvmvl" => "__builtin_ve_vl_vsubswsx_vsvmvl",
+    "llvm.ve.vl.vsubswsx.vsvvl" => "__builtin_ve_vl_vsubswsx_vsvvl",
+    "llvm.ve.vl.vsubswsx.vvvl" => "__builtin_ve_vl_vsubswsx_vvvl",
+    "llvm.ve.vl.vsubswsx.vvvmvl" => "__builtin_ve_vl_vsubswsx_vvvmvl",
+    "llvm.ve.vl.vsubswsx.vvvvl" => "__builtin_ve_vl_vsubswsx_vvvvl",
+    "llvm.ve.vl.vsubswzx.vsvl" => "__builtin_ve_vl_vsubswzx_vsvl",
+    "llvm.ve.vl.vsubswzx.vsvmvl" => "__builtin_ve_vl_vsubswzx_vsvmvl",
+    "llvm.ve.vl.vsubswzx.vsvvl" => "__builtin_ve_vl_vsubswzx_vsvvl",
+    "llvm.ve.vl.vsubswzx.vvvl" => "__builtin_ve_vl_vsubswzx_vvvl",
+    "llvm.ve.vl.vsubswzx.vvvmvl" => "__builtin_ve_vl_vsubswzx_vvvmvl",
+    "llvm.ve.vl.vsubswzx.vvvvl" => "__builtin_ve_vl_vsubswzx_vvvvl",
+    "llvm.ve.vl.vsubul.vsvl" => "__builtin_ve_vl_vsubul_vsvl",
+    "llvm.ve.vl.vsubul.vsvmvl" => "__builtin_ve_vl_vsubul_vsvmvl",
+    "llvm.ve.vl.vsubul.vsvvl" => "__builtin_ve_vl_vsubul_vsvvl",
+    "llvm.ve.vl.vsubul.vvvl" => "__builtin_ve_vl_vsubul_vvvl",
+    "llvm.ve.vl.vsubul.vvvmvl" => "__builtin_ve_vl_vsubul_vvvmvl",
+    "llvm.ve.vl.vsubul.vvvvl" => "__builtin_ve_vl_vsubul_vvvvl",
+    "llvm.ve.vl.vsubuw.vsvl" => "__builtin_ve_vl_vsubuw_vsvl",
+    "llvm.ve.vl.vsubuw.vsvmvl" => "__builtin_ve_vl_vsubuw_vsvmvl",
+    "llvm.ve.vl.vsubuw.vsvvl" => "__builtin_ve_vl_vsubuw_vsvvl",
+    "llvm.ve.vl.vsubuw.vvvl" => "__builtin_ve_vl_vsubuw_vvvl",
+    "llvm.ve.vl.vsubuw.vvvmvl" => "__builtin_ve_vl_vsubuw_vvvmvl",
+    "llvm.ve.vl.vsubuw.vvvvl" => "__builtin_ve_vl_vsubuw_vvvvl",
+    "llvm.ve.vl.vsuml.vvl" => "__builtin_ve_vl_vsuml_vvl",
+    "llvm.ve.vl.vsuml.vvml" => "__builtin_ve_vl_vsuml_vvml",
+    "llvm.ve.vl.vsumwsx.vvl" => "__builtin_ve_vl_vsumwsx_vvl",
+    "llvm.ve.vl.vsumwsx.vvml" => "__builtin_ve_vl_vsumwsx_vvml",
+    "llvm.ve.vl.vsumwzx.vvl" => "__builtin_ve_vl_vsumwzx_vvl",
+    "llvm.ve.vl.vsumwzx.vvml" => "__builtin_ve_vl_vsumwzx_vvml",
+    "llvm.ve.vl.vxor.vsvl" => "__builtin_ve_vl_vxor_vsvl",
+    "llvm.ve.vl.vxor.vsvmvl" => "__builtin_ve_vl_vxor_vsvmvl",
+    "llvm.ve.vl.vxor.vsvvl" => "__builtin_ve_vl_vxor_vsvvl",
+    "llvm.ve.vl.vxor.vvvl" => "__builtin_ve_vl_vxor_vvvl",
+    "llvm.ve.vl.vxor.vvvmvl" => "__builtin_ve_vl_vxor_vvvmvl",
+    "llvm.ve.vl.vxor.vvvvl" => "__builtin_ve_vl_vxor_vvvvl",
+    "llvm.ve.vl.xorm.MMM" => "__builtin_ve_vl_xorm_MMM",
+    "llvm.ve.vl.xorm.mmm" => "__builtin_ve_vl_xorm_mmm",
+    // x86
+    "llvm.x86.aadd32" => "__builtin_ia32_aadd32",
+    "llvm.x86.aadd64" => "__builtin_ia32_aadd64",
+    "llvm.x86.aand32" => "__builtin_ia32_aand32",
+    "llvm.x86.aand64" => "__builtin_ia32_aand64",
+    "llvm.x86.addcarry.u32" => "__builtin_ia32_addcarry_u32",
+    "llvm.x86.addcarry.u64" => "__builtin_ia32_addcarry_u64",
+    "llvm.x86.addcarryx.u32" => "__builtin_ia32_addcarryx_u32",
+    "llvm.x86.addcarryx.u64" => "__builtin_ia32_addcarryx_u64",
+    "llvm.x86.aesni.aesdec" => "__builtin_ia32_aesdec128",
+    "llvm.x86.aesni.aesdec.256" => "__builtin_ia32_aesdec256",
+    "llvm.x86.aesni.aesdec.512" => "__builtin_ia32_aesdec512",
+    "llvm.x86.aesni.aesdeclast" => "__builtin_ia32_aesdeclast128",
+    "llvm.x86.aesni.aesdeclast.256" => "__builtin_ia32_aesdeclast256",
+    "llvm.x86.aesni.aesdeclast.512" => "__builtin_ia32_aesdeclast512",
+    "llvm.x86.aesni.aesenc" => "__builtin_ia32_aesenc128",
+    "llvm.x86.aesni.aesenc.256" => "__builtin_ia32_aesenc256",
+    "llvm.x86.aesni.aesenc.512" => "__builtin_ia32_aesenc512",
+    "llvm.x86.aesni.aesenclast" => "__builtin_ia32_aesenclast128",
+    "llvm.x86.aesni.aesenclast.256" => "__builtin_ia32_aesenclast256",
+    "llvm.x86.aesni.aesenclast.512" => "__builtin_ia32_aesenclast512",
+    "llvm.x86.aesni.aesimc" => "__builtin_ia32_aesimc128",
+    "llvm.x86.aesni.aeskeygenassist" => "__builtin_ia32_aeskeygenassist128",
+    "llvm.x86.aor32" => "__builtin_ia32_aor32",
+    "llvm.x86.aor64" => "__builtin_ia32_aor64",
+    "llvm.x86.avx.addsub.pd.256" => "__builtin_ia32_addsubpd256",
+    "llvm.x86.avx.addsub.ps.256" => "__builtin_ia32_addsubps256",
+    "llvm.x86.avx.blend.pd.256" => "__builtin_ia32_blendpd256",
+    "llvm.x86.avx.blend.ps.256" => "__builtin_ia32_blendps256",
+    "llvm.x86.avx.blendv.pd.256" => "__builtin_ia32_blendvpd256",
+    "llvm.x86.avx.blendv.ps.256" => "__builtin_ia32_blendvps256",
+    "llvm.x86.avx.cmp.pd.256" => "__builtin_ia32_cmppd256",
+    "llvm.x86.avx.cmp.ps.256" => "__builtin_ia32_cmpps256",
+    "llvm.x86.avx.cvt.pd2.ps.256" => "__builtin_ia32_cvtpd2ps256",
+    "llvm.x86.avx.cvt.pd2dq.256" => "__builtin_ia32_cvtpd2dq256",
+    "llvm.x86.avx.cvt.ps2.pd.256" => "__builtin_ia32_cvtps2pd256",
+    "llvm.x86.avx.cvt.ps2dq.256" => "__builtin_ia32_cvtps2dq256",
+    "llvm.x86.avx.cvtdq2.pd.256" => "__builtin_ia32_cvtdq2pd256",
+    "llvm.x86.avx.cvtdq2.ps.256" => "__builtin_ia32_cvtdq2ps256",
+    "llvm.x86.avx.cvtt.pd2dq.256" => "__builtin_ia32_cvttpd2dq256",
+    "llvm.x86.avx.cvtt.ps2dq.256" => "__builtin_ia32_cvttps2dq256",
+    "llvm.x86.avx.dp.ps.256" => "__builtin_ia32_dpps256",
+    "llvm.x86.avx.hadd.pd.256" => "__builtin_ia32_haddpd256",
+    "llvm.x86.avx.hadd.ps.256" => "__builtin_ia32_haddps256",
+    "llvm.x86.avx.hsub.pd.256" => "__builtin_ia32_hsubpd256",
+    "llvm.x86.avx.hsub.ps.256" => "__builtin_ia32_hsubps256",
+    "llvm.x86.avx.ldu.dq.256" => "__builtin_ia32_lddqu256",
+    "llvm.x86.avx.maskload.pd" => "__builtin_ia32_maskloadpd",
+    "llvm.x86.avx.maskload.pd.256" => "__builtin_ia32_maskloadpd256",
+    "llvm.x86.avx.maskload.ps" => "__builtin_ia32_maskloadps",
+    "llvm.x86.avx.maskload.ps.256" => "__builtin_ia32_maskloadps256",
+    "llvm.x86.avx.maskstore.pd" => "__builtin_ia32_maskstorepd",
+    "llvm.x86.avx.maskstore.pd.256" => "__builtin_ia32_maskstorepd256",
+    "llvm.x86.avx.maskstore.ps" => "__builtin_ia32_maskstoreps",
+    "llvm.x86.avx.maskstore.ps.256" => "__builtin_ia32_maskstoreps256",
+    "llvm.x86.avx.max.pd.256" => "__builtin_ia32_maxpd256",
+    "llvm.x86.avx.max.ps.256" => "__builtin_ia32_maxps256",
+    "llvm.x86.avx.min.pd.256" => "__builtin_ia32_minpd256",
+    "llvm.x86.avx.min.ps.256" => "__builtin_ia32_minps256",
+    "llvm.x86.avx.movmsk.pd.256" => "__builtin_ia32_movmskpd256",
+    "llvm.x86.avx.movmsk.ps.256" => "__builtin_ia32_movmskps256",
+    "llvm.x86.avx.ptestc.256" => "__builtin_ia32_ptestc256",
+    "llvm.x86.avx.ptestnzc.256" => "__builtin_ia32_ptestnzc256",
+    "llvm.x86.avx.ptestz.256" => "__builtin_ia32_ptestz256",
+    "llvm.x86.avx.rcp.ps.256" => "__builtin_ia32_rcpps256",
+    "llvm.x86.avx.round.pd.256" => "__builtin_ia32_roundpd256",
+    "llvm.x86.avx.round.ps.256" => "__builtin_ia32_roundps256",
+    "llvm.x86.avx.rsqrt.ps.256" => "__builtin_ia32_rsqrtps256",
+    "llvm.x86.avx.sqrt.pd.256" => "__builtin_ia32_sqrtpd256",
+    "llvm.x86.avx.sqrt.ps.256" => "__builtin_ia32_sqrtps256",
+    "llvm.x86.avx.storeu.dq.256" => "__builtin_ia32_storedqu256",
+    "llvm.x86.avx.storeu.pd.256" => "__builtin_ia32_storeupd256",
+    "llvm.x86.avx.storeu.ps.256" => "__builtin_ia32_storeups256",
+    "llvm.x86.avx.vbroadcastf128.pd.256" => "__builtin_ia32_vbroadcastf128_pd256",
+    "llvm.x86.avx.vbroadcastf128.ps.256" => "__builtin_ia32_vbroadcastf128_ps256",
+    "llvm.x86.avx.vextractf128.pd.256" => "__builtin_ia32_vextractf128_pd256",
+    "llvm.x86.avx.vextractf128.ps.256" => "__builtin_ia32_vextractf128_ps256",
+    "llvm.x86.avx.vextractf128.si.256" => "__builtin_ia32_vextractf128_si256",
+    "llvm.x86.avx.vinsertf128.pd.256" => "__builtin_ia32_vinsertf128_pd256",
+    "llvm.x86.avx.vinsertf128.ps.256" => "__builtin_ia32_vinsertf128_ps256",
+    "llvm.x86.avx.vinsertf128.si.256" => "__builtin_ia32_vinsertf128_si256",
+    "llvm.x86.avx.vperm2f128.pd.256" => "__builtin_ia32_vperm2f128_pd256",
+    "llvm.x86.avx.vperm2f128.ps.256" => "__builtin_ia32_vperm2f128_ps256",
+    "llvm.x86.avx.vperm2f128.si.256" => "__builtin_ia32_vperm2f128_si256",
+    "llvm.x86.avx.vpermilvar.pd" => "__builtin_ia32_vpermilvarpd",
+    "llvm.x86.avx.vpermilvar.pd.256" => "__builtin_ia32_vpermilvarpd256",
+    "llvm.x86.avx.vpermilvar.ps" => "__builtin_ia32_vpermilvarps",
+    "llvm.x86.avx.vpermilvar.ps.256" => "__builtin_ia32_vpermilvarps256",
+    "llvm.x86.avx.vtestc.pd" => "__builtin_ia32_vtestcpd",
+    "llvm.x86.avx.vtestc.pd.256" => "__builtin_ia32_vtestcpd256",
+    "llvm.x86.avx.vtestc.ps" => "__builtin_ia32_vtestcps",
+    "llvm.x86.avx.vtestc.ps.256" => "__builtin_ia32_vtestcps256",
+    "llvm.x86.avx.vtestnzc.pd" => "__builtin_ia32_vtestnzcpd",
+    "llvm.x86.avx.vtestnzc.pd.256" => "__builtin_ia32_vtestnzcpd256",
+    "llvm.x86.avx.vtestnzc.ps" => "__builtin_ia32_vtestnzcps",
+    "llvm.x86.avx.vtestnzc.ps.256" => "__builtin_ia32_vtestnzcps256",
+    "llvm.x86.avx.vtestz.pd" => "__builtin_ia32_vtestzpd",
+    "llvm.x86.avx.vtestz.pd.256" => "__builtin_ia32_vtestzpd256",
+    "llvm.x86.avx.vtestz.ps" => "__builtin_ia32_vtestzps",
+    "llvm.x86.avx.vtestz.ps.256" => "__builtin_ia32_vtestzps256",
+    "llvm.x86.avx.vzeroall" => "__builtin_ia32_vzeroall",
+    "llvm.x86.avx.vzeroupper" => "__builtin_ia32_vzeroupper",
+    "llvm.x86.avx10.mask.vcvt2ps2phx.128" => "__builtin_ia32_vcvt2ps2phx128_mask",
+    "llvm.x86.avx10.mask.vcvt2ps2phx.256" => "__builtin_ia32_vcvt2ps2phx256_mask",
+    "llvm.x86.avx10.mask.vcvt2ps2phx.512" => "__builtin_ia32_vcvt2ps2phx512_mask",
+    "llvm.x86.avx10.mask.vcvtbiasph2bf8128" => "__builtin_ia32_vcvtbiasph2bf8_128_mask",
+    "llvm.x86.avx10.mask.vcvtbiasph2bf8256" => "__builtin_ia32_vcvtbiasph2bf8_256_mask",
+    "llvm.x86.avx10.mask.vcvtbiasph2bf8512" => "__builtin_ia32_vcvtbiasph2bf8_512_mask",
+    "llvm.x86.avx10.mask.vcvtbiasph2bf8s128" => "__builtin_ia32_vcvtbiasph2bf8s_128_mask",
+    "llvm.x86.avx10.mask.vcvtbiasph2bf8s256" => "__builtin_ia32_vcvtbiasph2bf8s_256_mask",
+    "llvm.x86.avx10.mask.vcvtbiasph2bf8s512" => "__builtin_ia32_vcvtbiasph2bf8s_512_mask",
+    "llvm.x86.avx10.mask.vcvtbiasph2hf8128" => "__builtin_ia32_vcvtbiasph2hf8_128_mask",
+    "llvm.x86.avx10.mask.vcvtbiasph2hf8256" => "__builtin_ia32_vcvtbiasph2hf8_256_mask",
+    "llvm.x86.avx10.mask.vcvtbiasph2hf8512" => "__builtin_ia32_vcvtbiasph2hf8_512_mask",
+    "llvm.x86.avx10.mask.vcvtbiasph2hf8s128" => "__builtin_ia32_vcvtbiasph2hf8s_128_mask",
+    "llvm.x86.avx10.mask.vcvtbiasph2hf8s256" => "__builtin_ia32_vcvtbiasph2hf8s_256_mask",
+    "llvm.x86.avx10.mask.vcvtbiasph2hf8s512" => "__builtin_ia32_vcvtbiasph2hf8s_512_mask",
+    "llvm.x86.avx10.mask.vcvthf82ph128" => "__builtin_ia32_vcvthf8_2ph128_mask",
+    "llvm.x86.avx10.mask.vcvthf82ph256" => "__builtin_ia32_vcvthf8_2ph256_mask",
+    "llvm.x86.avx10.mask.vcvthf82ph512" => "__builtin_ia32_vcvthf8_2ph512_mask",
+    "llvm.x86.avx10.mask.vcvtneph2bf8128" => "__builtin_ia32_vcvtneph2bf8_128_mask",
+    "llvm.x86.avx10.mask.vcvtneph2bf8256" => "__builtin_ia32_vcvtneph2bf8_256_mask",
+    "llvm.x86.avx10.mask.vcvtneph2bf8512" => "__builtin_ia32_vcvtneph2bf8_512_mask",
+    "llvm.x86.avx10.mask.vcvtneph2bf8s128" => "__builtin_ia32_vcvtneph2bf8s_128_mask",
+    "llvm.x86.avx10.mask.vcvtneph2bf8s256" => "__builtin_ia32_vcvtneph2bf8s_256_mask",
+    "llvm.x86.avx10.mask.vcvtneph2bf8s512" => "__builtin_ia32_vcvtneph2bf8s_512_mask",
+    "llvm.x86.avx10.mask.vcvtneph2hf8128" => "__builtin_ia32_vcvtneph2hf8_128_mask",
+    "llvm.x86.avx10.mask.vcvtneph2hf8256" => "__builtin_ia32_vcvtneph2hf8_256_mask",
+    "llvm.x86.avx10.mask.vcvtneph2hf8512" => "__builtin_ia32_vcvtneph2hf8_512_mask",
+    "llvm.x86.avx10.mask.vcvtneph2hf8s128" => "__builtin_ia32_vcvtneph2hf8s_128_mask",
+    "llvm.x86.avx10.mask.vcvtneph2hf8s256" => "__builtin_ia32_vcvtneph2hf8s_256_mask",
+    "llvm.x86.avx10.mask.vcvtneph2hf8s512" => "__builtin_ia32_vcvtneph2hf8s_512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtpd2dq256" => "__builtin_ia32_vcvtpd2dq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtpd2ph256" => "__builtin_ia32_vcvtpd2ph256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtpd2ps256" => "__builtin_ia32_vcvtpd2ps256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtpd2qq256" => "__builtin_ia32_vcvtpd2qq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtpd2udq256" => "__builtin_ia32_vcvtpd2udq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtpd2uqq256" => "__builtin_ia32_vcvtpd2uqq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtph2dq256" => "__builtin_ia32_vcvtph2dq256_round_mask",
+    "llvm.x86.avx10.mask.vcvtph2ibs128" => "__builtin_ia32_vcvtph2ibs128_mask",
+    "llvm.x86.avx10.mask.vcvtph2ibs256" => "__builtin_ia32_vcvtph2ibs256_mask",
+    "llvm.x86.avx10.mask.vcvtph2ibs512" => "__builtin_ia32_vcvtph2ibs512_mask",
+    "llvm.x86.avx10.mask.vcvtph2iubs128" => "__builtin_ia32_vcvtph2iubs128_mask",
+    "llvm.x86.avx10.mask.vcvtph2iubs256" => "__builtin_ia32_vcvtph2iubs256_mask",
+    "llvm.x86.avx10.mask.vcvtph2iubs512" => "__builtin_ia32_vcvtph2iubs512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtph2pd256" => "__builtin_ia32_vcvtph2pd256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtph2psx256" => "__builtin_ia32_vcvtph2psx256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtph2qq256" => "__builtin_ia32_vcvtph2qq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtph2udq256" => "__builtin_ia32_vcvtph2udq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtph2uqq256" => "__builtin_ia32_vcvtph2uqq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtph2uw256" => "__builtin_ia32_vcvtph2uw256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtph2w256" => "__builtin_ia32_vcvtph2w256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtps2dq256" => "__builtin_ia32_vcvtps2dq256_round_mask",
+    "llvm.x86.avx10.mask.vcvtps2ibs128" => "__builtin_ia32_vcvtps2ibs128_mask",
+    "llvm.x86.avx10.mask.vcvtps2ibs256" => "__builtin_ia32_vcvtps2ibs256_mask",
+    "llvm.x86.avx10.mask.vcvtps2ibs512" => "__builtin_ia32_vcvtps2ibs512_mask",
+    "llvm.x86.avx10.mask.vcvtps2iubs128" => "__builtin_ia32_vcvtps2iubs128_mask",
+    "llvm.x86.avx10.mask.vcvtps2iubs256" => "__builtin_ia32_vcvtps2iubs256_mask",
+    "llvm.x86.avx10.mask.vcvtps2iubs512" => "__builtin_ia32_vcvtps2iubs512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtps2pd256" => "__builtin_ia32_vcvtps2pd256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtps2ph256" => "__builtin_ia32_vcvtps2ph256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtps2phx256" => "__builtin_ia32_vcvtps2phx256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtps2qq256" => "__builtin_ia32_vcvtps2qq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtps2udq256" => "__builtin_ia32_vcvtps2udq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvtps2uqq256" => "__builtin_ia32_vcvtps2uqq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttpd2dq256" => "__builtin_ia32_vcvttpd2dq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttpd2qq256" => "__builtin_ia32_vcvttpd2qq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttpd2udq256" => "__builtin_ia32_vcvttpd2udq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttpd2uqq256" => "__builtin_ia32_vcvttpd2uqq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttph2dq256" => "__builtin_ia32_vcvttph2dq256_round_mask",
+    "llvm.x86.avx10.mask.vcvttph2ibs128" => "__builtin_ia32_vcvttph2ibs128_mask",
+    "llvm.x86.avx10.mask.vcvttph2ibs256" => "__builtin_ia32_vcvttph2ibs256_mask",
+    "llvm.x86.avx10.mask.vcvttph2ibs512" => "__builtin_ia32_vcvttph2ibs512_mask",
+    "llvm.x86.avx10.mask.vcvttph2iubs128" => "__builtin_ia32_vcvttph2iubs128_mask",
+    "llvm.x86.avx10.mask.vcvttph2iubs256" => "__builtin_ia32_vcvttph2iubs256_mask",
+    "llvm.x86.avx10.mask.vcvttph2iubs512" => "__builtin_ia32_vcvttph2iubs512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttph2qq256" => "__builtin_ia32_vcvttph2qq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttph2udq256" => "__builtin_ia32_vcvttph2udq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttph2uqq256" => "__builtin_ia32_vcvttph2uqq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttph2uw256" => "__builtin_ia32_vcvttph2uw256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttph2w256" => "__builtin_ia32_vcvttph2w256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttps2dq256" => "__builtin_ia32_vcvttps2dq256_round_mask",
+    "llvm.x86.avx10.mask.vcvttps2ibs128" => "__builtin_ia32_vcvttps2ibs128_mask",
+    "llvm.x86.avx10.mask.vcvttps2ibs256" => "__builtin_ia32_vcvttps2ibs256_mask",
+    "llvm.x86.avx10.mask.vcvttps2ibs512" => "__builtin_ia32_vcvttps2ibs512_mask",
+    "llvm.x86.avx10.mask.vcvttps2iubs128" => "__builtin_ia32_vcvttps2iubs128_mask",
+    "llvm.x86.avx10.mask.vcvttps2iubs256" => "__builtin_ia32_vcvttps2iubs256_mask",
+    "llvm.x86.avx10.mask.vcvttps2iubs512" => "__builtin_ia32_vcvttps2iubs512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttps2qq256" => "__builtin_ia32_vcvttps2qq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttps2udq256" => "__builtin_ia32_vcvttps2udq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vcvttps2uqq256" => "__builtin_ia32_vcvttps2uqq256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vfcmaddcph256" => "__builtin_ia32_vfcmaddcph256_round_mask3",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vfcmulcph256" => "__builtin_ia32_vfcmulcph256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vfixupimmpd256" => "__builtin_ia32_vfixupimmpd256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vfixupimmps256" => "__builtin_ia32_vfixupimmps256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vfmaddcph256" => "__builtin_ia32_vfmaddcph256_round_mask3",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vfmulcph256" => "__builtin_ia32_vfmulcph256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vgetexppd256" => "__builtin_ia32_vgetexppd256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vgetexpph256" => "__builtin_ia32_vgetexpph256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vgetexpps256" => "__builtin_ia32_vgetexpps256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vgetmantpd256" => "__builtin_ia32_vgetmantpd256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vgetmantph256" => "__builtin_ia32_vgetmantph256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vgetmantps256" => "__builtin_ia32_vgetmantps256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vminmaxpd.round" => "__builtin_ia32_vminmaxpd512_round_mask",
+    "llvm.x86.avx10.mask.vminmaxpd128" => "__builtin_ia32_vminmaxpd128_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vminmaxpd256.round" => "__builtin_ia32_vminmaxpd256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vminmaxph.round" => "__builtin_ia32_vminmaxph512_round_mask",
+    "llvm.x86.avx10.mask.vminmaxph128" => "__builtin_ia32_vminmaxph128_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vminmaxph256.round" => "__builtin_ia32_vminmaxph256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vminmaxps.round" => "__builtin_ia32_vminmaxps512_round_mask",
+    "llvm.x86.avx10.mask.vminmaxps128" => "__builtin_ia32_vminmaxps128_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vminmaxps256.round" => "__builtin_ia32_vminmaxps256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vminmaxsd.round" => "__builtin_ia32_vminmaxsd_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vminmaxsh.round" => "__builtin_ia32_vminmaxsh_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vminmaxss.round" => "__builtin_ia32_vminmaxss_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vrangepd256" => "__builtin_ia32_vrangepd256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vrangeps256" => "__builtin_ia32_vrangeps256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vreducepd256" => "__builtin_ia32_vreducepd256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vreduceph256" => "__builtin_ia32_vreduceph256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vreduceps256" => "__builtin_ia32_vreduceps256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vrndscalepd256" => "__builtin_ia32_vrndscalepd256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vrndscaleph256" => "__builtin_ia32_vrndscaleph256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vrndscaleps256" => "__builtin_ia32_vrndscaleps256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vscalefpd256" => "__builtin_ia32_vscalefpd256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vscalefph256" => "__builtin_ia32_vscalefph256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.mask.vscalefps256" => "__builtin_ia32_vscalefps256_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.maskz.vfcmaddcph256" => "__builtin_ia32_vfcmaddcph256_round_maskz",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.maskz.vfixupimmpd256" => "__builtin_ia32_vfixupimmpd256_round_maskz",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.maskz.vfixupimmps256" => "__builtin_ia32_vfixupimmps256_round_maskz",
+    // [INVALID CONVERSION]: "llvm.x86.avx10.maskz.vfmaddcph256" => "__builtin_ia32_vfmaddcph256_round_maskz",
+    "llvm.x86.avx10.vaddpd256" => "__builtin_ia32_vaddpd256_round",
+    "llvm.x86.avx10.vaddph256" => "__builtin_ia32_vaddph256_round",
+    "llvm.x86.avx10.vaddps256" => "__builtin_ia32_vaddps256_round",
+    "llvm.x86.avx10.vcvtne2ph2bf8128" => "__builtin_ia32_vcvtne2ph2bf8_128",
+    "llvm.x86.avx10.vcvtne2ph2bf8256" => "__builtin_ia32_vcvtne2ph2bf8_256",
+    "llvm.x86.avx10.vcvtne2ph2bf8512" => "__builtin_ia32_vcvtne2ph2bf8_512",
+    "llvm.x86.avx10.vcvtne2ph2bf8s128" => "__builtin_ia32_vcvtne2ph2bf8s_128",
+    "llvm.x86.avx10.vcvtne2ph2bf8s256" => "__builtin_ia32_vcvtne2ph2bf8s_256",
+    "llvm.x86.avx10.vcvtne2ph2bf8s512" => "__builtin_ia32_vcvtne2ph2bf8s_512",
+    "llvm.x86.avx10.vcvtne2ph2hf8128" => "__builtin_ia32_vcvtne2ph2hf8_128",
+    "llvm.x86.avx10.vcvtne2ph2hf8256" => "__builtin_ia32_vcvtne2ph2hf8_256",
+    "llvm.x86.avx10.vcvtne2ph2hf8512" => "__builtin_ia32_vcvtne2ph2hf8_512",
+    "llvm.x86.avx10.vcvtne2ph2hf8s128" => "__builtin_ia32_vcvtne2ph2hf8s_128",
+    "llvm.x86.avx10.vcvtne2ph2hf8s256" => "__builtin_ia32_vcvtne2ph2hf8s_256",
+    "llvm.x86.avx10.vcvtne2ph2hf8s512" => "__builtin_ia32_vcvtne2ph2hf8s_512",
+    "llvm.x86.avx10.vcvtnebf162ibs128" => "__builtin_ia32_vcvtnebf162ibs128",
+    "llvm.x86.avx10.vcvtnebf162ibs256" => "__builtin_ia32_vcvtnebf162ibs256",
+    "llvm.x86.avx10.vcvtnebf162ibs512" => "__builtin_ia32_vcvtnebf162ibs512",
+    "llvm.x86.avx10.vcvtnebf162iubs128" => "__builtin_ia32_vcvtnebf162iubs128",
+    "llvm.x86.avx10.vcvtnebf162iubs256" => "__builtin_ia32_vcvtnebf162iubs256",
+    "llvm.x86.avx10.vcvtnebf162iubs512" => "__builtin_ia32_vcvtnebf162iubs512",
+    "llvm.x86.avx10.vcvttnebf162ibs128" => "__builtin_ia32_vcvttnebf162ibs128",
+    "llvm.x86.avx10.vcvttnebf162ibs256" => "__builtin_ia32_vcvttnebf162ibs256",
+    "llvm.x86.avx10.vcvttnebf162ibs512" => "__builtin_ia32_vcvttnebf162ibs512",
+    "llvm.x86.avx10.vcvttnebf162iubs128" => "__builtin_ia32_vcvttnebf162iubs128",
+    "llvm.x86.avx10.vcvttnebf162iubs256" => "__builtin_ia32_vcvttnebf162iubs256",
+    "llvm.x86.avx10.vcvttnebf162iubs512" => "__builtin_ia32_vcvttnebf162iubs512",
+    "llvm.x86.avx10.vdivpd256" => "__builtin_ia32_vdivpd256_round",
+    "llvm.x86.avx10.vdivph256" => "__builtin_ia32_vdivph256_round",
+    "llvm.x86.avx10.vdivps256" => "__builtin_ia32_vdivps256_round",
+    "llvm.x86.avx10.vdpphps.128" => "__builtin_ia32_vdpphps128",
+    "llvm.x86.avx10.vdpphps.256" => "__builtin_ia32_vdpphps256",
+    "llvm.x86.avx10.vdpphps.512" => "__builtin_ia32_vdpphps512",
+    "llvm.x86.avx10.vfmaddsubpd256" => "__builtin_ia32_vfmaddsubpd256_round",
+    "llvm.x86.avx10.vfmaddsubph256" => "__builtin_ia32_vfmaddsubph256_round",
+    "llvm.x86.avx10.vfmaddsubps256" => "__builtin_ia32_vfmaddsubps256_round",
+    "llvm.x86.avx10.vmaxpd256" => "__builtin_ia32_vmaxpd256_round",
+    "llvm.x86.avx10.vmaxph256" => "__builtin_ia32_vmaxph256_round",
+    "llvm.x86.avx10.vmaxps256" => "__builtin_ia32_vmaxps256_round",
+    "llvm.x86.avx10.vminmaxnepbf16128" => "__builtin_ia32_vminmaxnepbf16128",
+    "llvm.x86.avx10.vminmaxnepbf16256" => "__builtin_ia32_vminmaxnepbf16256",
+    "llvm.x86.avx10.vminmaxnepbf16512" => "__builtin_ia32_vminmaxnepbf16512",
+    "llvm.x86.avx10.vminmaxpd128" => "__builtin_ia32_vminmaxpd128",
+    "llvm.x86.avx10.vminmaxpd256" => "__builtin_ia32_vminmaxpd256",
+    "llvm.x86.avx10.vminmaxph128" => "__builtin_ia32_vminmaxph128",
+    "llvm.x86.avx10.vminmaxph256" => "__builtin_ia32_vminmaxph256",
+    "llvm.x86.avx10.vminmaxps128" => "__builtin_ia32_vminmaxps128",
+    "llvm.x86.avx10.vminmaxps256" => "__builtin_ia32_vminmaxps256",
+    "llvm.x86.avx10.vminpd256" => "__builtin_ia32_vminpd256_round",
+    "llvm.x86.avx10.vminph256" => "__builtin_ia32_vminph256_round",
+    "llvm.x86.avx10.vminps256" => "__builtin_ia32_vminps256_round",
+    "llvm.x86.avx10.vmpsadbw.512" => "__builtin_ia32_mpsadbw512",
+    "llvm.x86.avx10.vmulpd256" => "__builtin_ia32_vmulpd256_round",
+    "llvm.x86.avx10.vmulph256" => "__builtin_ia32_vmulph256_round",
+    "llvm.x86.avx10.vmulps256" => "__builtin_ia32_vmulps256_round",
+    "llvm.x86.avx10.vpdpbssd.512" => "__builtin_ia32_vpdpbssd512",
+    "llvm.x86.avx10.vpdpbssds.512" => "__builtin_ia32_vpdpbssds512",
+    "llvm.x86.avx10.vpdpbsud.512" => "__builtin_ia32_vpdpbsud512",
+    "llvm.x86.avx10.vpdpbsuds.512" => "__builtin_ia32_vpdpbsuds512",
+    "llvm.x86.avx10.vpdpbuud.512" => "__builtin_ia32_vpdpbuud512",
+    "llvm.x86.avx10.vpdpbuuds.512" => "__builtin_ia32_vpdpbuuds512",
+    "llvm.x86.avx10.vpdpwsud.512" => "__builtin_ia32_vpdpwsud512",
+    "llvm.x86.avx10.vpdpwsuds.512" => "__builtin_ia32_vpdpwsuds512",
+    "llvm.x86.avx10.vpdpwusd.512" => "__builtin_ia32_vpdpwusd512",
+    "llvm.x86.avx10.vpdpwusds.512" => "__builtin_ia32_vpdpwusds512",
+    "llvm.x86.avx10.vpdpwuud.512" => "__builtin_ia32_vpdpwuud512",
+    "llvm.x86.avx10.vpdpwuuds.512" => "__builtin_ia32_vpdpwuuds512",
+    "llvm.x86.avx10.vsqrtpd256" => "__builtin_ia32_vsqrtpd256_round",
+    "llvm.x86.avx10.vsqrtph256" => "__builtin_ia32_vsqrtph256_round",
+    "llvm.x86.avx10.vsqrtps256" => "__builtin_ia32_vsqrtps256_round",
+    "llvm.x86.avx10.vsubpd256" => "__builtin_ia32_vsubpd256_round",
+    "llvm.x86.avx10.vsubph256" => "__builtin_ia32_vsubph256_round",
+    "llvm.x86.avx10.vsubps256" => "__builtin_ia32_vsubps256_round",
+    "llvm.x86.avx2.gather.d.d" => "__builtin_ia32_gatherd_d",
+    "llvm.x86.avx2.gather.d.d.256" => "__builtin_ia32_gatherd_d256",
+    "llvm.x86.avx2.gather.d.pd" => "__builtin_ia32_gatherd_pd",
+    "llvm.x86.avx2.gather.d.pd.256" => "__builtin_ia32_gatherd_pd256",
+    "llvm.x86.avx2.gather.d.ps" => "__builtin_ia32_gatherd_ps",
+    "llvm.x86.avx2.gather.d.ps.256" => "__builtin_ia32_gatherd_ps256",
+    "llvm.x86.avx2.gather.d.q" => "__builtin_ia32_gatherd_q",
+    "llvm.x86.avx2.gather.d.q.256" => "__builtin_ia32_gatherd_q256",
+    "llvm.x86.avx2.gather.q.d" => "__builtin_ia32_gatherq_d",
+    "llvm.x86.avx2.gather.q.d.256" => "__builtin_ia32_gatherq_d256",
+    "llvm.x86.avx2.gather.q.pd" => "__builtin_ia32_gatherq_pd",
+    "llvm.x86.avx2.gather.q.pd.256" => "__builtin_ia32_gatherq_pd256",
+    "llvm.x86.avx2.gather.q.ps" => "__builtin_ia32_gatherq_ps",
+    "llvm.x86.avx2.gather.q.ps.256" => "__builtin_ia32_gatherq_ps256",
+    "llvm.x86.avx2.gather.q.q" => "__builtin_ia32_gatherq_q",
+    "llvm.x86.avx2.gather.q.q.256" => "__builtin_ia32_gatherq_q256",
+    "llvm.x86.avx2.maskload.d" => "__builtin_ia32_maskloadd",
+    "llvm.x86.avx2.maskload.d.256" => "__builtin_ia32_maskloadd256",
+    "llvm.x86.avx2.maskload.q" => "__builtin_ia32_maskloadq",
+    "llvm.x86.avx2.maskload.q.256" => "__builtin_ia32_maskloadq256",
+    "llvm.x86.avx2.maskstore.d" => "__builtin_ia32_maskstored",
+    "llvm.x86.avx2.maskstore.d.256" => "__builtin_ia32_maskstored256",
+    "llvm.x86.avx2.maskstore.q" => "__builtin_ia32_maskstoreq",
+    "llvm.x86.avx2.maskstore.q.256" => "__builtin_ia32_maskstoreq256",
+    "llvm.x86.avx2.movntdqa" => "__builtin_ia32_movntdqa256",
+    "llvm.x86.avx2.mpsadbw" => "__builtin_ia32_mpsadbw256",
+    "llvm.x86.avx2.pabs.b" => "__builtin_ia32_pabsb256",
+    "llvm.x86.avx2.pabs.d" => "__builtin_ia32_pabsd256",
+    "llvm.x86.avx2.pabs.w" => "__builtin_ia32_pabsw256",
+    "llvm.x86.avx2.packssdw" => "__builtin_ia32_packssdw256",
+    "llvm.x86.avx2.packsswb" => "__builtin_ia32_packsswb256",
+    "llvm.x86.avx2.packusdw" => "__builtin_ia32_packusdw256",
+    "llvm.x86.avx2.packuswb" => "__builtin_ia32_packuswb256",
+    "llvm.x86.avx2.padds.b" => "__builtin_ia32_paddsb256",
+    "llvm.x86.avx2.padds.w" => "__builtin_ia32_paddsw256",
+    "llvm.x86.avx2.paddus.b" => "__builtin_ia32_paddusb256",
+    "llvm.x86.avx2.paddus.w" => "__builtin_ia32_paddusw256",
+    "llvm.x86.avx2.pavg.b" => "__builtin_ia32_pavgb256",
+    "llvm.x86.avx2.pavg.w" => "__builtin_ia32_pavgw256",
+    "llvm.x86.avx2.pblendd.128" => "__builtin_ia32_pblendd128",
+    "llvm.x86.avx2.pblendd.256" => "__builtin_ia32_pblendd256",
+    "llvm.x86.avx2.pblendvb" => "__builtin_ia32_pblendvb256",
+    "llvm.x86.avx2.pblendw" => "__builtin_ia32_pblendw256",
+    "llvm.x86.avx2.pbroadcastb.128" => "__builtin_ia32_pbroadcastb128",
+    "llvm.x86.avx2.pbroadcastb.256" => "__builtin_ia32_pbroadcastb256",
+    "llvm.x86.avx2.pbroadcastd.128" => "__builtin_ia32_pbroadcastd128",
+    "llvm.x86.avx2.pbroadcastd.256" => "__builtin_ia32_pbroadcastd256",
+    "llvm.x86.avx2.pbroadcastq.128" => "__builtin_ia32_pbroadcastq128",
+    "llvm.x86.avx2.pbroadcastq.256" => "__builtin_ia32_pbroadcastq256",
+    "llvm.x86.avx2.pbroadcastw.128" => "__builtin_ia32_pbroadcastw128",
+    "llvm.x86.avx2.pbroadcastw.256" => "__builtin_ia32_pbroadcastw256",
+    "llvm.x86.avx2.permd" => "__builtin_ia32_permvarsi256",
+    "llvm.x86.avx2.permps" => "__builtin_ia32_permvarsf256",
+    "llvm.x86.avx2.phadd.d" => "__builtin_ia32_phaddd256",
+    "llvm.x86.avx2.phadd.sw" => "__builtin_ia32_phaddsw256",
+    "llvm.x86.avx2.phadd.w" => "__builtin_ia32_phaddw256",
+    "llvm.x86.avx2.phsub.d" => "__builtin_ia32_phsubd256",
+    "llvm.x86.avx2.phsub.sw" => "__builtin_ia32_phsubsw256",
+    "llvm.x86.avx2.phsub.w" => "__builtin_ia32_phsubw256",
+    "llvm.x86.avx2.pmadd.ub.sw" => "__builtin_ia32_pmaddubsw256",
+    "llvm.x86.avx2.pmadd.wd" => "__builtin_ia32_pmaddwd256",
+    "llvm.x86.avx2.pmaxs.b" => "__builtin_ia32_pmaxsb256",
+    "llvm.x86.avx2.pmaxs.d" => "__builtin_ia32_pmaxsd256",
+    "llvm.x86.avx2.pmaxs.w" => "__builtin_ia32_pmaxsw256",
+    "llvm.x86.avx2.pmaxu.b" => "__builtin_ia32_pmaxub256",
+    "llvm.x86.avx2.pmaxu.d" => "__builtin_ia32_pmaxud256",
+    "llvm.x86.avx2.pmaxu.w" => "__builtin_ia32_pmaxuw256",
+    "llvm.x86.avx2.pmins.b" => "__builtin_ia32_pminsb256",
+    "llvm.x86.avx2.pmins.d" => "__builtin_ia32_pminsd256",
+    "llvm.x86.avx2.pmins.w" => "__builtin_ia32_pminsw256",
+    "llvm.x86.avx2.pminu.b" => "__builtin_ia32_pminub256",
+    "llvm.x86.avx2.pminu.d" => "__builtin_ia32_pminud256",
+    "llvm.x86.avx2.pminu.w" => "__builtin_ia32_pminuw256",
+    "llvm.x86.avx2.pmovmskb" => "__builtin_ia32_pmovmskb256",
+    "llvm.x86.avx2.pmovsxbd" => "__builtin_ia32_pmovsxbd256",
+    "llvm.x86.avx2.pmovsxbq" => "__builtin_ia32_pmovsxbq256",
+    "llvm.x86.avx2.pmovsxbw" => "__builtin_ia32_pmovsxbw256",
+    "llvm.x86.avx2.pmovsxdq" => "__builtin_ia32_pmovsxdq256",
+    "llvm.x86.avx2.pmovsxwd" => "__builtin_ia32_pmovsxwd256",
+    "llvm.x86.avx2.pmovsxwq" => "__builtin_ia32_pmovsxwq256",
+    "llvm.x86.avx2.pmovzxbd" => "__builtin_ia32_pmovzxbd256",
+    "llvm.x86.avx2.pmovzxbq" => "__builtin_ia32_pmovzxbq256",
+    "llvm.x86.avx2.pmovzxbw" => "__builtin_ia32_pmovzxbw256",
+    "llvm.x86.avx2.pmovzxdq" => "__builtin_ia32_pmovzxdq256",
+    "llvm.x86.avx2.pmovzxwd" => "__builtin_ia32_pmovzxwd256",
+    "llvm.x86.avx2.pmovzxwq" => "__builtin_ia32_pmovzxwq256",
+    "llvm.x86.avx2.pmul.dq" => "__builtin_ia32_pmuldq256",
+    "llvm.x86.avx2.pmul.hr.sw" => "__builtin_ia32_pmulhrsw256",
+    "llvm.x86.avx2.pmulh.w" => "__builtin_ia32_pmulhw256",
+    "llvm.x86.avx2.pmulhu.w" => "__builtin_ia32_pmulhuw256",
+    "llvm.x86.avx2.pmulu.dq" => "__builtin_ia32_pmuludq256",
+    "llvm.x86.avx2.psad.bw" => "__builtin_ia32_psadbw256",
+    "llvm.x86.avx2.pshuf.b" => "__builtin_ia32_pshufb256",
+    "llvm.x86.avx2.psign.b" => "__builtin_ia32_psignb256",
+    "llvm.x86.avx2.psign.d" => "__builtin_ia32_psignd256",
+    "llvm.x86.avx2.psign.w" => "__builtin_ia32_psignw256",
+    "llvm.x86.avx2.psll.d" => "__builtin_ia32_pslld256",
+    "llvm.x86.avx2.psll.dq" => "__builtin_ia32_pslldqi256",
+    "llvm.x86.avx2.psll.dq.bs" => "__builtin_ia32_pslldqi256_byteshift",
+    "llvm.x86.avx2.psll.q" => "__builtin_ia32_psllq256",
+    "llvm.x86.avx2.psll.w" => "__builtin_ia32_psllw256",
+    "llvm.x86.avx2.pslli.d" => "__builtin_ia32_pslldi256",
+    "llvm.x86.avx2.pslli.q" => "__builtin_ia32_psllqi256",
+    "llvm.x86.avx2.pslli.w" => "__builtin_ia32_psllwi256",
+    "llvm.x86.avx2.psllv.d" => "__builtin_ia32_psllv4si",
+    "llvm.x86.avx2.psllv.d.256" => "__builtin_ia32_psllv8si",
+    "llvm.x86.avx2.psllv.q" => "__builtin_ia32_psllv2di",
+    "llvm.x86.avx2.psllv.q.256" => "__builtin_ia32_psllv4di",
+    "llvm.x86.avx2.psra.d" => "__builtin_ia32_psrad256",
+    "llvm.x86.avx2.psra.w" => "__builtin_ia32_psraw256",
+    "llvm.x86.avx2.psrai.d" => "__builtin_ia32_psradi256",
+    "llvm.x86.avx2.psrai.w" => "__builtin_ia32_psrawi256",
+    "llvm.x86.avx2.psrav.d" => "__builtin_ia32_psrav4si",
+    "llvm.x86.avx2.psrav.d.256" => "__builtin_ia32_psrav8si",
+    "llvm.x86.avx2.psrl.d" => "__builtin_ia32_psrld256",
+    "llvm.x86.avx2.psrl.dq" => "__builtin_ia32_psrldqi256",
+    "llvm.x86.avx2.psrl.dq.bs" => "__builtin_ia32_psrldqi256_byteshift",
+    "llvm.x86.avx2.psrl.q" => "__builtin_ia32_psrlq256",
+    "llvm.x86.avx2.psrl.w" => "__builtin_ia32_psrlw256",
+    "llvm.x86.avx2.psrli.d" => "__builtin_ia32_psrldi256",
+    "llvm.x86.avx2.psrli.q" => "__builtin_ia32_psrlqi256",
+    "llvm.x86.avx2.psrli.w" => "__builtin_ia32_psrlwi256",
+    "llvm.x86.avx2.psrlv.d" => "__builtin_ia32_psrlv4si",
+    "llvm.x86.avx2.psrlv.d.256" => "__builtin_ia32_psrlv8si",
+    "llvm.x86.avx2.psrlv.q" => "__builtin_ia32_psrlv2di",
+    "llvm.x86.avx2.psrlv.q.256" => "__builtin_ia32_psrlv4di",
+    "llvm.x86.avx2.psubs.b" => "__builtin_ia32_psubsb256",
+    "llvm.x86.avx2.psubs.w" => "__builtin_ia32_psubsw256",
+    "llvm.x86.avx2.psubus.b" => "__builtin_ia32_psubusb256",
+    "llvm.x86.avx2.psubus.w" => "__builtin_ia32_psubusw256",
+    "llvm.x86.avx2.vbroadcast.sd.pd.256" => "__builtin_ia32_vbroadcastsd_pd256",
+    "llvm.x86.avx2.vbroadcast.ss.ps" => "__builtin_ia32_vbroadcastss_ps",
+    "llvm.x86.avx2.vbroadcast.ss.ps.256" => "__builtin_ia32_vbroadcastss_ps256",
+    "llvm.x86.avx2.vextracti128" => "__builtin_ia32_extract128i256",
+    "llvm.x86.avx2.vinserti128" => "__builtin_ia32_insert128i256",
+    "llvm.x86.avx2.vpdpbssd.128" => "__builtin_ia32_vpdpbssd128",
+    "llvm.x86.avx2.vpdpbssd.256" => "__builtin_ia32_vpdpbssd256",
+    "llvm.x86.avx2.vpdpbssds.128" => "__builtin_ia32_vpdpbssds128",
+    "llvm.x86.avx2.vpdpbssds.256" => "__builtin_ia32_vpdpbssds256",
+    "llvm.x86.avx2.vpdpbsud.128" => "__builtin_ia32_vpdpbsud128",
+    "llvm.x86.avx2.vpdpbsud.256" => "__builtin_ia32_vpdpbsud256",
+    "llvm.x86.avx2.vpdpbsuds.128" => "__builtin_ia32_vpdpbsuds128",
+    "llvm.x86.avx2.vpdpbsuds.256" => "__builtin_ia32_vpdpbsuds256",
+    "llvm.x86.avx2.vpdpbuud.128" => "__builtin_ia32_vpdpbuud128",
+    "llvm.x86.avx2.vpdpbuud.256" => "__builtin_ia32_vpdpbuud256",
+    "llvm.x86.avx2.vpdpbuuds.128" => "__builtin_ia32_vpdpbuuds128",
+    "llvm.x86.avx2.vpdpbuuds.256" => "__builtin_ia32_vpdpbuuds256",
+    "llvm.x86.avx2.vpdpwsud.128" => "__builtin_ia32_vpdpwsud128",
+    "llvm.x86.avx2.vpdpwsud.256" => "__builtin_ia32_vpdpwsud256",
+    "llvm.x86.avx2.vpdpwsuds.128" => "__builtin_ia32_vpdpwsuds128",
+    "llvm.x86.avx2.vpdpwsuds.256" => "__builtin_ia32_vpdpwsuds256",
+    "llvm.x86.avx2.vpdpwusd.128" => "__builtin_ia32_vpdpwusd128",
+    "llvm.x86.avx2.vpdpwusd.256" => "__builtin_ia32_vpdpwusd256",
+    "llvm.x86.avx2.vpdpwusds.128" => "__builtin_ia32_vpdpwusds128",
+    "llvm.x86.avx2.vpdpwusds.256" => "__builtin_ia32_vpdpwusds256",
+    "llvm.x86.avx2.vpdpwuud.128" => "__builtin_ia32_vpdpwuud128",
+    "llvm.x86.avx2.vpdpwuud.256" => "__builtin_ia32_vpdpwuud256",
+    "llvm.x86.avx2.vpdpwuuds.128" => "__builtin_ia32_vpdpwuuds128",
+    "llvm.x86.avx2.vpdpwuuds.256" => "__builtin_ia32_vpdpwuuds256",
+    "llvm.x86.avx2.vperm2i128" => "__builtin_ia32_permti256",
+    "llvm.x86.avx512.add.pd.512" => "__builtin_ia32_addpd512",
+    "llvm.x86.avx512.add.ps.512" => "__builtin_ia32_addps512",
+    "llvm.x86.avx512.broadcastmb.128" => "__builtin_ia32_broadcastmb128",
+    "llvm.x86.avx512.broadcastmb.256" => "__builtin_ia32_broadcastmb256",
+    "llvm.x86.avx512.broadcastmb.512" => "__builtin_ia32_broadcastmb512",
+    "llvm.x86.avx512.broadcastmw.128" => "__builtin_ia32_broadcastmw128",
+    "llvm.x86.avx512.broadcastmw.256" => "__builtin_ia32_broadcastmw256",
+    "llvm.x86.avx512.broadcastmw.512" => "__builtin_ia32_broadcastmw512",
+    "llvm.x86.avx512.conflict.d.128" => "__builtin_ia32_vpconflictsi_128",
+    "llvm.x86.avx512.conflict.d.256" => "__builtin_ia32_vpconflictsi_256",
+    "llvm.x86.avx512.conflict.d.512" => "__builtin_ia32_vpconflictsi_512",
+    "llvm.x86.avx512.conflict.q.128" => "__builtin_ia32_vpconflictdi_128",
+    "llvm.x86.avx512.conflict.q.256" => "__builtin_ia32_vpconflictdi_256",
+    "llvm.x86.avx512.conflict.q.512" => "__builtin_ia32_vpconflictdi_512",
+    "llvm.x86.avx512.cvtb2mask.128" => "__builtin_ia32_cvtb2mask128",
+    "llvm.x86.avx512.cvtb2mask.256" => "__builtin_ia32_cvtb2mask256",
+    "llvm.x86.avx512.cvtb2mask.512" => "__builtin_ia32_cvtb2mask512",
+    "llvm.x86.avx512.cvtd2mask.128" => "__builtin_ia32_cvtd2mask128",
+    "llvm.x86.avx512.cvtd2mask.256" => "__builtin_ia32_cvtd2mask256",
+    "llvm.x86.avx512.cvtd2mask.512" => "__builtin_ia32_cvtd2mask512",
+    "llvm.x86.avx512.cvtmask2b.128" => "__builtin_ia32_cvtmask2b128",
+    "llvm.x86.avx512.cvtmask2b.256" => "__builtin_ia32_cvtmask2b256",
+    "llvm.x86.avx512.cvtmask2b.512" => "__builtin_ia32_cvtmask2b512",
+    "llvm.x86.avx512.cvtmask2d.128" => "__builtin_ia32_cvtmask2d128",
+    "llvm.x86.avx512.cvtmask2d.256" => "__builtin_ia32_cvtmask2d256",
+    "llvm.x86.avx512.cvtmask2d.512" => "__builtin_ia32_cvtmask2d512",
+    "llvm.x86.avx512.cvtmask2q.128" => "__builtin_ia32_cvtmask2q128",
+    "llvm.x86.avx512.cvtmask2q.256" => "__builtin_ia32_cvtmask2q256",
+    "llvm.x86.avx512.cvtmask2q.512" => "__builtin_ia32_cvtmask2q512",
+    "llvm.x86.avx512.cvtmask2w.128" => "__builtin_ia32_cvtmask2w128",
+    "llvm.x86.avx512.cvtmask2w.256" => "__builtin_ia32_cvtmask2w256",
+    "llvm.x86.avx512.cvtmask2w.512" => "__builtin_ia32_cvtmask2w512",
+    "llvm.x86.avx512.cvtq2mask.128" => "__builtin_ia32_cvtq2mask128",
+    "llvm.x86.avx512.cvtq2mask.256" => "__builtin_ia32_cvtq2mask256",
+    "llvm.x86.avx512.cvtq2mask.512" => "__builtin_ia32_cvtq2mask512",
+    "llvm.x86.avx512.cvtsd2usi" => "__builtin_ia32_cvtsd2usi",
+    "llvm.x86.avx512.cvtsd2usi64" => "__builtin_ia32_cvtsd2usi64",
+    "llvm.x86.avx512.cvtsi2sd32" => "__builtin_ia32_cvtsi2sd32",
+    "llvm.x86.avx512.cvtsi2sd64" => "__builtin_ia32_cvtsi2sd64",
+    "llvm.x86.avx512.cvtsi2ss32" => "__builtin_ia32_cvtsi2ss32",
+    "llvm.x86.avx512.cvtsi2ss64" => "__builtin_ia32_cvtsi2ss64",
+    "llvm.x86.avx512.cvtss2usi" => "__builtin_ia32_cvtss2usi",
+    "llvm.x86.avx512.cvtss2usi64" => "__builtin_ia32_cvtss2usi64",
+    "llvm.x86.avx512.cvttsd2si" => "__builtin_ia32_vcvttsd2si32",
+    "llvm.x86.avx512.cvttsd2si64" => "__builtin_ia32_vcvttsd2si64",
+    "llvm.x86.avx512.cvttsd2usi" => "__builtin_ia32_vcvttsd2usi32",
+    // [DUPLICATE]: "llvm.x86.avx512.cvttsd2usi" => "__builtin_ia32_cvttsd2usi",
+    "llvm.x86.avx512.cvttsd2usi64" => "__builtin_ia32_vcvttsd2usi64",
+    // [DUPLICATE]: "llvm.x86.avx512.cvttsd2usi64" => "__builtin_ia32_cvttsd2usi64",
+    "llvm.x86.avx512.cvttss2si" => "__builtin_ia32_vcvttss2si32",
+    "llvm.x86.avx512.cvttss2si64" => "__builtin_ia32_vcvttss2si64",
+    "llvm.x86.avx512.cvttss2usi" => "__builtin_ia32_vcvttss2usi32",
+    // [DUPLICATE]: "llvm.x86.avx512.cvttss2usi" => "__builtin_ia32_cvttss2usi",
+    "llvm.x86.avx512.cvttss2usi64" => "__builtin_ia32_vcvttss2usi64",
+    // [DUPLICATE]: "llvm.x86.avx512.cvttss2usi64" => "__builtin_ia32_cvttss2usi64",
+    "llvm.x86.avx512.cvtusi2sd" => "__builtin_ia32_cvtusi2sd",
+    // [DUPLICATE]: "llvm.x86.avx512.cvtusi2sd" => "__builtin_ia32_cvtusi2sd32",
+    "llvm.x86.avx512.cvtusi2ss" => "__builtin_ia32_cvtusi2ss32",
+    // [DUPLICATE]: "llvm.x86.avx512.cvtusi2ss" => "__builtin_ia32_cvtusi2ss",
+    "llvm.x86.avx512.cvtusi642sd" => "__builtin_ia32_cvtusi2sd64",
+    // [DUPLICATE]: "llvm.x86.avx512.cvtusi642sd" => "__builtin_ia32_cvtusi642sd",
+    "llvm.x86.avx512.cvtusi642ss" => "__builtin_ia32_cvtusi2ss64",
+    // [DUPLICATE]: "llvm.x86.avx512.cvtusi642ss" => "__builtin_ia32_cvtusi642ss",
+    "llvm.x86.avx512.cvtw2mask.128" => "__builtin_ia32_cvtw2mask128",
+    "llvm.x86.avx512.cvtw2mask.256" => "__builtin_ia32_cvtw2mask256",
+    "llvm.x86.avx512.cvtw2mask.512" => "__builtin_ia32_cvtw2mask512",
+    "llvm.x86.avx512.dbpsadbw.128" => "__builtin_ia32_dbpsadbw128",
+    "llvm.x86.avx512.dbpsadbw.256" => "__builtin_ia32_dbpsadbw256",
+    "llvm.x86.avx512.dbpsadbw.512" => "__builtin_ia32_dbpsadbw512",
+    "llvm.x86.avx512.div.pd.512" => "__builtin_ia32_divpd512",
+    "llvm.x86.avx512.div.ps.512" => "__builtin_ia32_divps512",
+    "llvm.x86.avx512.exp2.pd" => "__builtin_ia32_exp2pd_mask",
+    "llvm.x86.avx512.exp2.ps" => "__builtin_ia32_exp2ps_mask",
+    "llvm.x86.avx512.gather.dpd.512" => "__builtin_ia32_gathersiv8df",
+    "llvm.x86.avx512.gather.dpi.512" => "__builtin_ia32_gathersiv16si",
+    "llvm.x86.avx512.gather.dpq.512" => "__builtin_ia32_gathersiv8di",
+    "llvm.x86.avx512.gather.dps.512" => "__builtin_ia32_gathersiv16sf",
+    "llvm.x86.avx512.gather.qpd.512" => "__builtin_ia32_gatherdiv8df",
+    "llvm.x86.avx512.gather.qpi.512" => "__builtin_ia32_gatherdiv16si",
+    "llvm.x86.avx512.gather.qpq.512" => "__builtin_ia32_gatherdiv8di",
+    "llvm.x86.avx512.gather.qps.512" => "__builtin_ia32_gatherdiv16sf",
+    "llvm.x86.avx512.gather3div2.df" => "__builtin_ia32_gather3div2df",
+    "llvm.x86.avx512.gather3div2.di" => "__builtin_ia32_gather3div2di",
+    "llvm.x86.avx512.gather3div4.df" => "__builtin_ia32_gather3div4df",
+    "llvm.x86.avx512.gather3div4.di" => "__builtin_ia32_gather3div4di",
+    "llvm.x86.avx512.gather3div4.sf" => "__builtin_ia32_gather3div4sf",
+    "llvm.x86.avx512.gather3div4.si" => "__builtin_ia32_gather3div4si",
+    "llvm.x86.avx512.gather3div8.sf" => "__builtin_ia32_gather3div8sf",
+    "llvm.x86.avx512.gather3div8.si" => "__builtin_ia32_gather3div8si",
+    "llvm.x86.avx512.gather3siv2.df" => "__builtin_ia32_gather3siv2df",
+    "llvm.x86.avx512.gather3siv2.di" => "__builtin_ia32_gather3siv2di",
+    "llvm.x86.avx512.gather3siv4.df" => "__builtin_ia32_gather3siv4df",
+    "llvm.x86.avx512.gather3siv4.di" => "__builtin_ia32_gather3siv4di",
+    "llvm.x86.avx512.gather3siv4.sf" => "__builtin_ia32_gather3siv4sf",
+    "llvm.x86.avx512.gather3siv4.si" => "__builtin_ia32_gather3siv4si",
+    "llvm.x86.avx512.gather3siv8.sf" => "__builtin_ia32_gather3siv8sf",
+    "llvm.x86.avx512.gather3siv8.si" => "__builtin_ia32_gather3siv8si",
+    "llvm.x86.avx512.gatherpf.dpd.512" => "__builtin_ia32_gatherpfdpd",
+    "llvm.x86.avx512.gatherpf.dps.512" => "__builtin_ia32_gatherpfdps",
+    "llvm.x86.avx512.gatherpf.qpd.512" => "__builtin_ia32_gatherpfqpd",
+    "llvm.x86.avx512.gatherpf.qps.512" => "__builtin_ia32_gatherpfqps",
+    "llvm.x86.avx512.kand.w" => "__builtin_ia32_kandhi",
+    "llvm.x86.avx512.kandn.w" => "__builtin_ia32_kandnhi",
+    "llvm.x86.avx512.knot.w" => "__builtin_ia32_knothi",
+    "llvm.x86.avx512.kor.w" => "__builtin_ia32_korhi",
+    "llvm.x86.avx512.kortestc.w" => "__builtin_ia32_kortestchi",
+    "llvm.x86.avx512.kortestz.w" => "__builtin_ia32_kortestzhi",
+    "llvm.x86.avx512.kunpck.bw" => "__builtin_ia32_kunpckhi",
+    "llvm.x86.avx512.kunpck.dq" => "__builtin_ia32_kunpckdi",
+    "llvm.x86.avx512.kunpck.wd" => "__builtin_ia32_kunpcksi",
+    "llvm.x86.avx512.kxnor.w" => "__builtin_ia32_kxnorhi",
+    "llvm.x86.avx512.kxor.w" => "__builtin_ia32_kxorhi",
+    "llvm.x86.avx512.mask.add.pd.128" => "__builtin_ia32_addpd128_mask",
+    "llvm.x86.avx512.mask.add.pd.256" => "__builtin_ia32_addpd256_mask",
+    "llvm.x86.avx512.mask.add.pd.512" => "__builtin_ia32_addpd512_mask",
+    "llvm.x86.avx512.mask.add.ps.128" => "__builtin_ia32_addps128_mask",
+    "llvm.x86.avx512.mask.add.ps.256" => "__builtin_ia32_addps256_mask",
+    "llvm.x86.avx512.mask.add.ps.512" => "__builtin_ia32_addps512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.add.sd.round" => "__builtin_ia32_addsd_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.add.ss.round" => "__builtin_ia32_addss_round_mask",
+    "llvm.x86.avx512.mask.and.pd.128" => "__builtin_ia32_andpd128_mask",
+    "llvm.x86.avx512.mask.and.pd.256" => "__builtin_ia32_andpd256_mask",
+    "llvm.x86.avx512.mask.and.pd.512" => "__builtin_ia32_andpd512_mask",
+    "llvm.x86.avx512.mask.and.ps.128" => "__builtin_ia32_andps128_mask",
+    "llvm.x86.avx512.mask.and.ps.256" => "__builtin_ia32_andps256_mask",
+    "llvm.x86.avx512.mask.and.ps.512" => "__builtin_ia32_andps512_mask",
+    "llvm.x86.avx512.mask.andn.pd.128" => "__builtin_ia32_andnpd128_mask",
+    "llvm.x86.avx512.mask.andn.pd.256" => "__builtin_ia32_andnpd256_mask",
+    "llvm.x86.avx512.mask.andn.pd.512" => "__builtin_ia32_andnpd512_mask",
+    "llvm.x86.avx512.mask.andn.ps.128" => "__builtin_ia32_andnps128_mask",
+    "llvm.x86.avx512.mask.andn.ps.256" => "__builtin_ia32_andnps256_mask",
+    "llvm.x86.avx512.mask.andn.ps.512" => "__builtin_ia32_andnps512_mask",
+    "llvm.x86.avx512.mask.blend.d.512" => "__builtin_ia32_blendmd_512_mask",
+    "llvm.x86.avx512.mask.blend.pd.512" => "__builtin_ia32_blendmpd_512_mask",
+    "llvm.x86.avx512.mask.blend.ps.512" => "__builtin_ia32_blendmps_512_mask",
+    "llvm.x86.avx512.mask.blend.q.512" => "__builtin_ia32_blendmq_512_mask",
+    "llvm.x86.avx512.mask.broadcastf32x2.256" => "__builtin_ia32_broadcastf32x2_256_mask",
+    "llvm.x86.avx512.mask.broadcastf32x2.512" => "__builtin_ia32_broadcastf32x2_512_mask",
+    "llvm.x86.avx512.mask.broadcastf32x4.256" => "__builtin_ia32_broadcastf32x4_256_mask",
+    "llvm.x86.avx512.mask.broadcastf32x4.512" => "__builtin_ia32_broadcastf32x4_512",
+    "llvm.x86.avx512.mask.broadcastf32x8.512" => "__builtin_ia32_broadcastf32x8_512_mask",
+    "llvm.x86.avx512.mask.broadcastf64x2.256" => "__builtin_ia32_broadcastf64x2_256_mask",
+    "llvm.x86.avx512.mask.broadcastf64x2.512" => "__builtin_ia32_broadcastf64x2_512_mask",
+    "llvm.x86.avx512.mask.broadcastf64x4.512" => "__builtin_ia32_broadcastf64x4_512",
+    "llvm.x86.avx512.mask.broadcasti32x2.128" => "__builtin_ia32_broadcasti32x2_128_mask",
+    "llvm.x86.avx512.mask.broadcasti32x2.256" => "__builtin_ia32_broadcasti32x2_256_mask",
+    "llvm.x86.avx512.mask.broadcasti32x2.512" => "__builtin_ia32_broadcasti32x2_512_mask",
+    "llvm.x86.avx512.mask.broadcasti32x4.256" => "__builtin_ia32_broadcasti32x4_256_mask",
+    "llvm.x86.avx512.mask.broadcasti32x4.512" => "__builtin_ia32_broadcasti32x4_512",
+    "llvm.x86.avx512.mask.broadcasti32x8.512" => "__builtin_ia32_broadcasti32x8_512_mask",
+    "llvm.x86.avx512.mask.broadcasti64x2.256" => "__builtin_ia32_broadcasti64x2_256_mask",
+    "llvm.x86.avx512.mask.broadcasti64x2.512" => "__builtin_ia32_broadcasti64x2_512_mask",
+    "llvm.x86.avx512.mask.broadcasti64x4.512" => "__builtin_ia32_broadcasti64x4_512",
+    "llvm.x86.avx512.mask.cmp.pd.128" => "__builtin_ia32_cmppd128_mask",
+    "llvm.x86.avx512.mask.cmp.pd.256" => "__builtin_ia32_cmppd256_mask",
+    "llvm.x86.avx512.mask.cmp.pd.512" => "__builtin_ia32_cmppd512_mask",
+    "llvm.x86.avx512.mask.cmp.ps.128" => "__builtin_ia32_cmpps128_mask",
+    "llvm.x86.avx512.mask.cmp.ps.256" => "__builtin_ia32_cmpps256_mask",
+    "llvm.x86.avx512.mask.cmp.ps.512" => "__builtin_ia32_cmpps512_mask",
+    "llvm.x86.avx512.mask.cmp.sd" => "__builtin_ia32_cmpsd_mask",
+    "llvm.x86.avx512.mask.cmp.ss" => "__builtin_ia32_cmpss_mask",
+    "llvm.x86.avx512.mask.compress.d.128" => "__builtin_ia32_compresssi128_mask",
+    "llvm.x86.avx512.mask.compress.d.256" => "__builtin_ia32_compresssi256_mask",
+    "llvm.x86.avx512.mask.compress.d.512" => "__builtin_ia32_compresssi512_mask",
+    "llvm.x86.avx512.mask.compress.pd.128" => "__builtin_ia32_compressdf128_mask",
+    "llvm.x86.avx512.mask.compress.pd.256" => "__builtin_ia32_compressdf256_mask",
+    "llvm.x86.avx512.mask.compress.pd.512" => "__builtin_ia32_compressdf512_mask",
+    "llvm.x86.avx512.mask.compress.ps.128" => "__builtin_ia32_compresssf128_mask",
+    "llvm.x86.avx512.mask.compress.ps.256" => "__builtin_ia32_compresssf256_mask",
+    "llvm.x86.avx512.mask.compress.ps.512" => "__builtin_ia32_compresssf512_mask",
+    "llvm.x86.avx512.mask.compress.q.128" => "__builtin_ia32_compressdi128_mask",
+    "llvm.x86.avx512.mask.compress.q.256" => "__builtin_ia32_compressdi256_mask",
+    "llvm.x86.avx512.mask.compress.q.512" => "__builtin_ia32_compressdi512_mask",
+    "llvm.x86.avx512.mask.compress.store.d.128" => "__builtin_ia32_compressstoresi128_mask",
+    "llvm.x86.avx512.mask.compress.store.d.256" => "__builtin_ia32_compressstoresi256_mask",
+    "llvm.x86.avx512.mask.compress.store.d.512" => "__builtin_ia32_compressstoresi512_mask",
+    "llvm.x86.avx512.mask.compress.store.pd.128" => "__builtin_ia32_compressstoredf128_mask",
+    "llvm.x86.avx512.mask.compress.store.pd.256" => "__builtin_ia32_compressstoredf256_mask",
+    "llvm.x86.avx512.mask.compress.store.pd.512" => "__builtin_ia32_compressstoredf512_mask",
+    "llvm.x86.avx512.mask.compress.store.ps.128" => "__builtin_ia32_compressstoresf128_mask",
+    "llvm.x86.avx512.mask.compress.store.ps.256" => "__builtin_ia32_compressstoresf256_mask",
+    "llvm.x86.avx512.mask.compress.store.ps.512" => "__builtin_ia32_compressstoresf512_mask",
+    "llvm.x86.avx512.mask.compress.store.q.128" => "__builtin_ia32_compressstoredi128_mask",
+    "llvm.x86.avx512.mask.compress.store.q.256" => "__builtin_ia32_compressstoredi256_mask",
+    "llvm.x86.avx512.mask.compress.store.q.512" => "__builtin_ia32_compressstoredi512_mask",
+    "llvm.x86.avx512.mask.conflict.d.128" => "__builtin_ia32_vpconflictsi_128_mask",
+    "llvm.x86.avx512.mask.conflict.d.256" => "__builtin_ia32_vpconflictsi_256_mask",
+    "llvm.x86.avx512.mask.conflict.d.512" => "__builtin_ia32_vpconflictsi_512_mask",
+    "llvm.x86.avx512.mask.conflict.q.128" => "__builtin_ia32_vpconflictdi_128_mask",
+    "llvm.x86.avx512.mask.conflict.q.256" => "__builtin_ia32_vpconflictdi_256_mask",
+    "llvm.x86.avx512.mask.conflict.q.512" => "__builtin_ia32_vpconflictdi_512_mask",
+    "llvm.x86.avx512.mask.cvtdq2pd.128" => "__builtin_ia32_cvtdq2pd128_mask",
+    "llvm.x86.avx512.mask.cvtdq2pd.256" => "__builtin_ia32_cvtdq2pd256_mask",
+    "llvm.x86.avx512.mask.cvtdq2pd.512" => "__builtin_ia32_cvtdq2pd512_mask",
+    "llvm.x86.avx512.mask.cvtdq2ps.128" => "__builtin_ia32_cvtdq2ps128_mask",
+    "llvm.x86.avx512.mask.cvtdq2ps.256" => "__builtin_ia32_cvtdq2ps256_mask",
+    "llvm.x86.avx512.mask.cvtdq2ps.512" => "__builtin_ia32_cvtdq2ps512_mask",
+    "llvm.x86.avx512.mask.cvtpd2dq.128" => "__builtin_ia32_cvtpd2dq128_mask",
+    "llvm.x86.avx512.mask.cvtpd2dq.256" => "__builtin_ia32_cvtpd2dq256_mask",
+    "llvm.x86.avx512.mask.cvtpd2dq.512" => "__builtin_ia32_cvtpd2dq512_mask",
+    "llvm.x86.avx512.mask.cvtpd2ps" => "__builtin_ia32_cvtpd2ps_mask",
+    "llvm.x86.avx512.mask.cvtpd2ps.256" => "__builtin_ia32_cvtpd2ps256_mask",
+    "llvm.x86.avx512.mask.cvtpd2ps.512" => "__builtin_ia32_cvtpd2ps512_mask",
+    "llvm.x86.avx512.mask.cvtpd2qq.128" => "__builtin_ia32_cvtpd2qq128_mask",
+    "llvm.x86.avx512.mask.cvtpd2qq.256" => "__builtin_ia32_cvtpd2qq256_mask",
+    "llvm.x86.avx512.mask.cvtpd2qq.512" => "__builtin_ia32_cvtpd2qq512_mask",
+    "llvm.x86.avx512.mask.cvtpd2udq.128" => "__builtin_ia32_cvtpd2udq128_mask",
+    "llvm.x86.avx512.mask.cvtpd2udq.256" => "__builtin_ia32_cvtpd2udq256_mask",
+    "llvm.x86.avx512.mask.cvtpd2udq.512" => "__builtin_ia32_cvtpd2udq512_mask",
+    "llvm.x86.avx512.mask.cvtpd2uqq.128" => "__builtin_ia32_cvtpd2uqq128_mask",
+    "llvm.x86.avx512.mask.cvtpd2uqq.256" => "__builtin_ia32_cvtpd2uqq256_mask",
+    "llvm.x86.avx512.mask.cvtpd2uqq.512" => "__builtin_ia32_cvtpd2uqq512_mask",
+    "llvm.x86.avx512.mask.cvtps2dq.128" => "__builtin_ia32_cvtps2dq128_mask",
+    "llvm.x86.avx512.mask.cvtps2dq.256" => "__builtin_ia32_cvtps2dq256_mask",
+    "llvm.x86.avx512.mask.cvtps2dq.512" => "__builtin_ia32_cvtps2dq512_mask",
+    "llvm.x86.avx512.mask.cvtps2pd.128" => "__builtin_ia32_cvtps2pd128_mask",
+    "llvm.x86.avx512.mask.cvtps2pd.256" => "__builtin_ia32_cvtps2pd256_mask",
+    "llvm.x86.avx512.mask.cvtps2pd.512" => "__builtin_ia32_cvtps2pd512_mask",
+    "llvm.x86.avx512.mask.cvtps2qq.128" => "__builtin_ia32_cvtps2qq128_mask",
+    "llvm.x86.avx512.mask.cvtps2qq.256" => "__builtin_ia32_cvtps2qq256_mask",
+    "llvm.x86.avx512.mask.cvtps2qq.512" => "__builtin_ia32_cvtps2qq512_mask",
+    "llvm.x86.avx512.mask.cvtps2udq.128" => "__builtin_ia32_cvtps2udq128_mask",
+    "llvm.x86.avx512.mask.cvtps2udq.256" => "__builtin_ia32_cvtps2udq256_mask",
+    "llvm.x86.avx512.mask.cvtps2udq.512" => "__builtin_ia32_cvtps2udq512_mask",
+    "llvm.x86.avx512.mask.cvtps2uqq.128" => "__builtin_ia32_cvtps2uqq128_mask",
+    "llvm.x86.avx512.mask.cvtps2uqq.256" => "__builtin_ia32_cvtps2uqq256_mask",
+    "llvm.x86.avx512.mask.cvtps2uqq.512" => "__builtin_ia32_cvtps2uqq512_mask",
+    "llvm.x86.avx512.mask.cvtqq2pd.128" => "__builtin_ia32_cvtqq2pd128_mask",
+    "llvm.x86.avx512.mask.cvtqq2pd.256" => "__builtin_ia32_cvtqq2pd256_mask",
+    "llvm.x86.avx512.mask.cvtqq2pd.512" => "__builtin_ia32_cvtqq2pd512_mask",
+    "llvm.x86.avx512.mask.cvtqq2ps.128" => "__builtin_ia32_cvtqq2ps128_mask",
+    "llvm.x86.avx512.mask.cvtqq2ps.256" => "__builtin_ia32_cvtqq2ps256_mask",
+    "llvm.x86.avx512.mask.cvtqq2ps.512" => "__builtin_ia32_cvtqq2ps512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.cvtsd2ss.round" => "__builtin_ia32_cvtsd2ss_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.cvtss2sd.round" => "__builtin_ia32_cvtss2sd_round_mask",
+    "llvm.x86.avx512.mask.cvttpd2dq.128" => "__builtin_ia32_cvttpd2dq128_mask",
+    "llvm.x86.avx512.mask.cvttpd2dq.256" => "__builtin_ia32_cvttpd2dq256_mask",
+    "llvm.x86.avx512.mask.cvttpd2dq.512" => "__builtin_ia32_cvttpd2dq512_mask",
+    "llvm.x86.avx512.mask.cvttpd2qq.128" => "__builtin_ia32_cvttpd2qq128_mask",
+    "llvm.x86.avx512.mask.cvttpd2qq.256" => "__builtin_ia32_cvttpd2qq256_mask",
+    "llvm.x86.avx512.mask.cvttpd2qq.512" => "__builtin_ia32_cvttpd2qq512_mask",
+    "llvm.x86.avx512.mask.cvttpd2udq.128" => "__builtin_ia32_cvttpd2udq128_mask",
+    "llvm.x86.avx512.mask.cvttpd2udq.256" => "__builtin_ia32_cvttpd2udq256_mask",
+    "llvm.x86.avx512.mask.cvttpd2udq.512" => "__builtin_ia32_cvttpd2udq512_mask",
+    "llvm.x86.avx512.mask.cvttpd2uqq.128" => "__builtin_ia32_cvttpd2uqq128_mask",
+    "llvm.x86.avx512.mask.cvttpd2uqq.256" => "__builtin_ia32_cvttpd2uqq256_mask",
+    "llvm.x86.avx512.mask.cvttpd2uqq.512" => "__builtin_ia32_cvttpd2uqq512_mask",
+    "llvm.x86.avx512.mask.cvttps2dq.128" => "__builtin_ia32_cvttps2dq128_mask",
+    "llvm.x86.avx512.mask.cvttps2dq.256" => "__builtin_ia32_cvttps2dq256_mask",
+    "llvm.x86.avx512.mask.cvttps2dq.512" => "__builtin_ia32_cvttps2dq512_mask",
+    "llvm.x86.avx512.mask.cvttps2qq.128" => "__builtin_ia32_cvttps2qq128_mask",
+    "llvm.x86.avx512.mask.cvttps2qq.256" => "__builtin_ia32_cvttps2qq256_mask",
+    "llvm.x86.avx512.mask.cvttps2qq.512" => "__builtin_ia32_cvttps2qq512_mask",
+    "llvm.x86.avx512.mask.cvttps2udq.128" => "__builtin_ia32_cvttps2udq128_mask",
+    "llvm.x86.avx512.mask.cvttps2udq.256" => "__builtin_ia32_cvttps2udq256_mask",
+    "llvm.x86.avx512.mask.cvttps2udq.512" => "__builtin_ia32_cvttps2udq512_mask",
+    "llvm.x86.avx512.mask.cvttps2uqq.128" => "__builtin_ia32_cvttps2uqq128_mask",
+    "llvm.x86.avx512.mask.cvttps2uqq.256" => "__builtin_ia32_cvttps2uqq256_mask",
+    "llvm.x86.avx512.mask.cvttps2uqq.512" => "__builtin_ia32_cvttps2uqq512_mask",
+    "llvm.x86.avx512.mask.cvtudq2pd.128" => "__builtin_ia32_cvtudq2pd128_mask",
+    "llvm.x86.avx512.mask.cvtudq2pd.256" => "__builtin_ia32_cvtudq2pd256_mask",
+    "llvm.x86.avx512.mask.cvtudq2pd.512" => "__builtin_ia32_cvtudq2pd512_mask",
+    "llvm.x86.avx512.mask.cvtudq2ps.128" => "__builtin_ia32_cvtudq2ps128_mask",
+    "llvm.x86.avx512.mask.cvtudq2ps.256" => "__builtin_ia32_cvtudq2ps256_mask",
+    "llvm.x86.avx512.mask.cvtudq2ps.512" => "__builtin_ia32_cvtudq2ps512_mask",
+    "llvm.x86.avx512.mask.cvtuqq2pd.128" => "__builtin_ia32_cvtuqq2pd128_mask",
+    "llvm.x86.avx512.mask.cvtuqq2pd.256" => "__builtin_ia32_cvtuqq2pd256_mask",
+    "llvm.x86.avx512.mask.cvtuqq2pd.512" => "__builtin_ia32_cvtuqq2pd512_mask",
+    "llvm.x86.avx512.mask.cvtuqq2ps.128" => "__builtin_ia32_cvtuqq2ps128_mask",
+    "llvm.x86.avx512.mask.cvtuqq2ps.256" => "__builtin_ia32_cvtuqq2ps256_mask",
+    "llvm.x86.avx512.mask.cvtuqq2ps.512" => "__builtin_ia32_cvtuqq2ps512_mask",
+    "llvm.x86.avx512.mask.dbpsadbw.128" => "__builtin_ia32_dbpsadbw128_mask",
+    "llvm.x86.avx512.mask.dbpsadbw.256" => "__builtin_ia32_dbpsadbw256_mask",
+    "llvm.x86.avx512.mask.dbpsadbw.512" => "__builtin_ia32_dbpsadbw512_mask",
+    "llvm.x86.avx512.mask.div.pd.128" => "__builtin_ia32_divpd_mask",
+    "llvm.x86.avx512.mask.div.pd.256" => "__builtin_ia32_divpd256_mask",
+    "llvm.x86.avx512.mask.div.pd.512" => "__builtin_ia32_divpd512_mask",
+    "llvm.x86.avx512.mask.div.ps.128" => "__builtin_ia32_divps_mask",
+    "llvm.x86.avx512.mask.div.ps.256" => "__builtin_ia32_divps256_mask",
+    "llvm.x86.avx512.mask.div.ps.512" => "__builtin_ia32_divps512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.div.sd.round" => "__builtin_ia32_divsd_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.div.ss.round" => "__builtin_ia32_divss_round_mask",
+    "llvm.x86.avx512.mask.expand.d.128" => "__builtin_ia32_expandsi128_mask",
+    "llvm.x86.avx512.mask.expand.d.256" => "__builtin_ia32_expandsi256_mask",
+    "llvm.x86.avx512.mask.expand.d.512" => "__builtin_ia32_expandsi512_mask",
+    "llvm.x86.avx512.mask.expand.load.d.128" => "__builtin_ia32_expandloadsi128_mask",
+    "llvm.x86.avx512.mask.expand.load.d.256" => "__builtin_ia32_expandloadsi256_mask",
+    "llvm.x86.avx512.mask.expand.load.d.512" => "__builtin_ia32_expandloadsi512_mask",
+    "llvm.x86.avx512.mask.expand.load.pd.128" => "__builtin_ia32_expandloaddf128_mask",
+    "llvm.x86.avx512.mask.expand.load.pd.256" => "__builtin_ia32_expandloaddf256_mask",
+    "llvm.x86.avx512.mask.expand.load.pd.512" => "__builtin_ia32_expandloaddf512_mask",
+    "llvm.x86.avx512.mask.expand.load.ps.128" => "__builtin_ia32_expandloadsf128_mask",
+    "llvm.x86.avx512.mask.expand.load.ps.256" => "__builtin_ia32_expandloadsf256_mask",
+    "llvm.x86.avx512.mask.expand.load.ps.512" => "__builtin_ia32_expandloadsf512_mask",
+    "llvm.x86.avx512.mask.expand.load.q.128" => "__builtin_ia32_expandloaddi128_mask",
+    "llvm.x86.avx512.mask.expand.load.q.256" => "__builtin_ia32_expandloaddi256_mask",
+    "llvm.x86.avx512.mask.expand.load.q.512" => "__builtin_ia32_expandloaddi512_mask",
+    "llvm.x86.avx512.mask.expand.pd.128" => "__builtin_ia32_expanddf128_mask",
+    "llvm.x86.avx512.mask.expand.pd.256" => "__builtin_ia32_expanddf256_mask",
+    "llvm.x86.avx512.mask.expand.pd.512" => "__builtin_ia32_expanddf512_mask",
+    "llvm.x86.avx512.mask.expand.ps.128" => "__builtin_ia32_expandsf128_mask",
+    "llvm.x86.avx512.mask.expand.ps.256" => "__builtin_ia32_expandsf256_mask",
+    "llvm.x86.avx512.mask.expand.ps.512" => "__builtin_ia32_expandsf512_mask",
+    "llvm.x86.avx512.mask.expand.q.128" => "__builtin_ia32_expanddi128_mask",
+    "llvm.x86.avx512.mask.expand.q.256" => "__builtin_ia32_expanddi256_mask",
+    "llvm.x86.avx512.mask.expand.q.512" => "__builtin_ia32_expanddi512_mask",
+    "llvm.x86.avx512.mask.fixupimm.pd.128" => "__builtin_ia32_fixupimmpd128_mask",
+    "llvm.x86.avx512.mask.fixupimm.pd.256" => "__builtin_ia32_fixupimmpd256_mask",
+    "llvm.x86.avx512.mask.fixupimm.pd.512" => "__builtin_ia32_fixupimmpd512_mask",
+    "llvm.x86.avx512.mask.fixupimm.ps.128" => "__builtin_ia32_fixupimmps128_mask",
+    "llvm.x86.avx512.mask.fixupimm.ps.256" => "__builtin_ia32_fixupimmps256_mask",
+    "llvm.x86.avx512.mask.fixupimm.ps.512" => "__builtin_ia32_fixupimmps512_mask",
+    "llvm.x86.avx512.mask.fixupimm.sd" => "__builtin_ia32_fixupimmsd_mask",
+    "llvm.x86.avx512.mask.fixupimm.ss" => "__builtin_ia32_fixupimmss_mask",
+    "llvm.x86.avx512.mask.fpclass.pd.128" => "__builtin_ia32_fpclasspd128_mask",
+    "llvm.x86.avx512.mask.fpclass.pd.256" => "__builtin_ia32_fpclasspd256_mask",
+    "llvm.x86.avx512.mask.fpclass.pd.512" => "__builtin_ia32_fpclasspd512_mask",
+    "llvm.x86.avx512.mask.fpclass.ps.128" => "__builtin_ia32_fpclassps128_mask",
+    "llvm.x86.avx512.mask.fpclass.ps.256" => "__builtin_ia32_fpclassps256_mask",
+    "llvm.x86.avx512.mask.fpclass.ps.512" => "__builtin_ia32_fpclassps512_mask",
+    "llvm.x86.avx512.mask.fpclass.sd" => "__builtin_ia32_fpclasssd_mask",
+    "llvm.x86.avx512.mask.fpclass.ss" => "__builtin_ia32_fpclassss_mask",
+    "llvm.x86.avx512.mask.getexp.pd.128" => "__builtin_ia32_getexppd128_mask",
+    "llvm.x86.avx512.mask.getexp.pd.256" => "__builtin_ia32_getexppd256_mask",
+    "llvm.x86.avx512.mask.getexp.pd.512" => "__builtin_ia32_getexppd512_mask",
+    "llvm.x86.avx512.mask.getexp.ps.128" => "__builtin_ia32_getexpps128_mask",
+    "llvm.x86.avx512.mask.getexp.ps.256" => "__builtin_ia32_getexpps256_mask",
+    "llvm.x86.avx512.mask.getexp.ps.512" => "__builtin_ia32_getexpps512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.getexp.sd" => "__builtin_ia32_getexpsd128_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.getexp.ss" => "__builtin_ia32_getexpss128_round_mask",
+    "llvm.x86.avx512.mask.getmant.pd.128" => "__builtin_ia32_getmantpd128_mask",
+    "llvm.x86.avx512.mask.getmant.pd.256" => "__builtin_ia32_getmantpd256_mask",
+    "llvm.x86.avx512.mask.getmant.pd.512" => "__builtin_ia32_getmantpd512_mask",
+    "llvm.x86.avx512.mask.getmant.ps.128" => "__builtin_ia32_getmantps128_mask",
+    "llvm.x86.avx512.mask.getmant.ps.256" => "__builtin_ia32_getmantps256_mask",
+    "llvm.x86.avx512.mask.getmant.ps.512" => "__builtin_ia32_getmantps512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.getmant.sd" => "__builtin_ia32_getmantsd_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.getmant.ss" => "__builtin_ia32_getmantss_round_mask",
+    "llvm.x86.avx512.mask.insertf32x4.256" => "__builtin_ia32_insertf32x4_256_mask",
+    "llvm.x86.avx512.mask.insertf32x4.512" => "__builtin_ia32_insertf32x4_mask",
+    "llvm.x86.avx512.mask.insertf32x8.512" => "__builtin_ia32_insertf32x8_mask",
+    "llvm.x86.avx512.mask.insertf64x2.256" => "__builtin_ia32_insertf64x2_256_mask",
+    "llvm.x86.avx512.mask.insertf64x2.512" => "__builtin_ia32_insertf64x2_512_mask",
+    "llvm.x86.avx512.mask.insertf64x4.512" => "__builtin_ia32_insertf64x4_mask",
+    "llvm.x86.avx512.mask.inserti32x4.256" => "__builtin_ia32_inserti32x4_256_mask",
+    "llvm.x86.avx512.mask.inserti32x4.512" => "__builtin_ia32_inserti32x4_mask",
+    "llvm.x86.avx512.mask.inserti32x8.512" => "__builtin_ia32_inserti32x8_mask",
+    "llvm.x86.avx512.mask.inserti64x2.256" => "__builtin_ia32_inserti64x2_256_mask",
+    "llvm.x86.avx512.mask.inserti64x2.512" => "__builtin_ia32_inserti64x2_512_mask",
+    "llvm.x86.avx512.mask.inserti64x4.512" => "__builtin_ia32_inserti64x4_mask",
+    "llvm.x86.avx512.mask.loadu.d.512" => "__builtin_ia32_loaddqusi512_mask",
+    "llvm.x86.avx512.mask.loadu.pd.512" => "__builtin_ia32_loadupd512_mask",
+    "llvm.x86.avx512.mask.loadu.ps.512" => "__builtin_ia32_loadups512_mask",
+    "llvm.x86.avx512.mask.loadu.q.512" => "__builtin_ia32_loaddqudi512_mask",
+    "llvm.x86.avx512.mask.lzcnt.d.512" => "__builtin_ia32_vplzcntd_512_mask",
+    "llvm.x86.avx512.mask.lzcnt.q.512" => "__builtin_ia32_vplzcntq_512_mask",
+    "llvm.x86.avx512.mask.max.pd.128" => "__builtin_ia32_maxpd_mask",
+    "llvm.x86.avx512.mask.max.pd.256" => "__builtin_ia32_maxpd256_mask",
+    "llvm.x86.avx512.mask.max.pd.512" => "__builtin_ia32_maxpd512_mask",
+    "llvm.x86.avx512.mask.max.ps.128" => "__builtin_ia32_maxps_mask",
+    "llvm.x86.avx512.mask.max.ps.256" => "__builtin_ia32_maxps256_mask",
+    "llvm.x86.avx512.mask.max.ps.512" => "__builtin_ia32_maxps512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.max.sd.round" => "__builtin_ia32_maxsd_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.max.ss.round" => "__builtin_ia32_maxss_round_mask",
+    "llvm.x86.avx512.mask.min.pd.128" => "__builtin_ia32_minpd_mask",
+    "llvm.x86.avx512.mask.min.pd.256" => "__builtin_ia32_minpd256_mask",
+    "llvm.x86.avx512.mask.min.pd.512" => "__builtin_ia32_minpd512_mask",
+    "llvm.x86.avx512.mask.min.ps.128" => "__builtin_ia32_minps_mask",
+    "llvm.x86.avx512.mask.min.ps.256" => "__builtin_ia32_minps256_mask",
+    "llvm.x86.avx512.mask.min.ps.512" => "__builtin_ia32_minps512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.min.sd.round" => "__builtin_ia32_minsd_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.min.ss.round" => "__builtin_ia32_minss_round_mask",
+    "llvm.x86.avx512.mask.move.sd" => "__builtin_ia32_movsd_mask",
+    "llvm.x86.avx512.mask.move.ss" => "__builtin_ia32_movss_mask",
+    "llvm.x86.avx512.mask.mul.pd.128" => "__builtin_ia32_mulpd_mask",
+    "llvm.x86.avx512.mask.mul.pd.256" => "__builtin_ia32_mulpd256_mask",
+    "llvm.x86.avx512.mask.mul.pd.512" => "__builtin_ia32_mulpd512_mask",
+    "llvm.x86.avx512.mask.mul.ps.128" => "__builtin_ia32_mulps_mask",
+    "llvm.x86.avx512.mask.mul.ps.256" => "__builtin_ia32_mulps256_mask",
+    "llvm.x86.avx512.mask.mul.ps.512" => "__builtin_ia32_mulps512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.mul.sd.round" => "__builtin_ia32_mulsd_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.mul.ss.round" => "__builtin_ia32_mulss_round_mask",
+    "llvm.x86.avx512.mask.or.pd.128" => "__builtin_ia32_orpd128_mask",
+    "llvm.x86.avx512.mask.or.pd.256" => "__builtin_ia32_orpd256_mask",
+    "llvm.x86.avx512.mask.or.pd.512" => "__builtin_ia32_orpd512_mask",
+    "llvm.x86.avx512.mask.or.ps.128" => "__builtin_ia32_orps128_mask",
+    "llvm.x86.avx512.mask.or.ps.256" => "__builtin_ia32_orps256_mask",
+    "llvm.x86.avx512.mask.or.ps.512" => "__builtin_ia32_orps512_mask",
+    "llvm.x86.avx512.mask.pabs.b.128" => "__builtin_ia32_pabsb128_mask",
+    "llvm.x86.avx512.mask.pabs.b.256" => "__builtin_ia32_pabsb256_mask",
+    "llvm.x86.avx512.mask.pabs.b.512" => "__builtin_ia32_pabsb512_mask",
+    "llvm.x86.avx512.mask.pabs.d.128" => "__builtin_ia32_pabsd128_mask",
+    "llvm.x86.avx512.mask.pabs.d.256" => "__builtin_ia32_pabsd256_mask",
+    "llvm.x86.avx512.mask.pabs.d.512" => "__builtin_ia32_pabsd512_mask",
+    "llvm.x86.avx512.mask.pabs.q.128" => "__builtin_ia32_pabsq128_mask",
+    "llvm.x86.avx512.mask.pabs.q.256" => "__builtin_ia32_pabsq256_mask",
+    "llvm.x86.avx512.mask.pabs.q.512" => "__builtin_ia32_pabsq512_mask",
+    "llvm.x86.avx512.mask.pabs.w.128" => "__builtin_ia32_pabsw128_mask",
+    "llvm.x86.avx512.mask.pabs.w.256" => "__builtin_ia32_pabsw256_mask",
+    "llvm.x86.avx512.mask.pabs.w.512" => "__builtin_ia32_pabsw512_mask",
+    "llvm.x86.avx512.mask.packssdw.128" => "__builtin_ia32_packssdw128_mask",
+    "llvm.x86.avx512.mask.packssdw.256" => "__builtin_ia32_packssdw256_mask",
+    "llvm.x86.avx512.mask.packssdw.512" => "__builtin_ia32_packssdw512_mask",
+    "llvm.x86.avx512.mask.packsswb.128" => "__builtin_ia32_packsswb128_mask",
+    "llvm.x86.avx512.mask.packsswb.256" => "__builtin_ia32_packsswb256_mask",
+    "llvm.x86.avx512.mask.packsswb.512" => "__builtin_ia32_packsswb512_mask",
+    "llvm.x86.avx512.mask.packusdw.128" => "__builtin_ia32_packusdw128_mask",
+    "llvm.x86.avx512.mask.packusdw.256" => "__builtin_ia32_packusdw256_mask",
+    "llvm.x86.avx512.mask.packusdw.512" => "__builtin_ia32_packusdw512_mask",
+    "llvm.x86.avx512.mask.packuswb.128" => "__builtin_ia32_packuswb128_mask",
+    "llvm.x86.avx512.mask.packuswb.256" => "__builtin_ia32_packuswb256_mask",
+    "llvm.x86.avx512.mask.packuswb.512" => "__builtin_ia32_packuswb512_mask",
+    "llvm.x86.avx512.mask.padd.b.128" => "__builtin_ia32_paddb128_mask",
+    "llvm.x86.avx512.mask.padd.b.256" => "__builtin_ia32_paddb256_mask",
+    "llvm.x86.avx512.mask.padd.b.512" => "__builtin_ia32_paddb512_mask",
+    "llvm.x86.avx512.mask.padd.d.128" => "__builtin_ia32_paddd128_mask",
+    "llvm.x86.avx512.mask.padd.d.256" => "__builtin_ia32_paddd256_mask",
+    "llvm.x86.avx512.mask.padd.d.512" => "__builtin_ia32_paddd512_mask",
+    "llvm.x86.avx512.mask.padd.q.128" => "__builtin_ia32_paddq128_mask",
+    "llvm.x86.avx512.mask.padd.q.256" => "__builtin_ia32_paddq256_mask",
+    "llvm.x86.avx512.mask.padd.q.512" => "__builtin_ia32_paddq512_mask",
+    "llvm.x86.avx512.mask.padd.w.128" => "__builtin_ia32_paddw128_mask",
+    "llvm.x86.avx512.mask.padd.w.256" => "__builtin_ia32_paddw256_mask",
+    "llvm.x86.avx512.mask.padd.w.512" => "__builtin_ia32_paddw512_mask",
+    "llvm.x86.avx512.mask.padds.b.128" => "__builtin_ia32_paddsb128_mask",
+    "llvm.x86.avx512.mask.padds.b.256" => "__builtin_ia32_paddsb256_mask",
+    "llvm.x86.avx512.mask.padds.b.512" => "__builtin_ia32_paddsb512_mask",
+    "llvm.x86.avx512.mask.padds.w.128" => "__builtin_ia32_paddsw128_mask",
+    "llvm.x86.avx512.mask.padds.w.256" => "__builtin_ia32_paddsw256_mask",
+    "llvm.x86.avx512.mask.padds.w.512" => "__builtin_ia32_paddsw512_mask",
+    "llvm.x86.avx512.mask.paddus.b.128" => "__builtin_ia32_paddusb128_mask",
+    "llvm.x86.avx512.mask.paddus.b.256" => "__builtin_ia32_paddusb256_mask",
+    "llvm.x86.avx512.mask.paddus.b.512" => "__builtin_ia32_paddusb512_mask",
+    "llvm.x86.avx512.mask.paddus.w.128" => "__builtin_ia32_paddusw128_mask",
+    "llvm.x86.avx512.mask.paddus.w.256" => "__builtin_ia32_paddusw256_mask",
+    "llvm.x86.avx512.mask.paddus.w.512" => "__builtin_ia32_paddusw512_mask",
+    "llvm.x86.avx512.mask.pand.d.512" => "__builtin_ia32_pandd512_mask",
+    "llvm.x86.avx512.mask.pand.q.512" => "__builtin_ia32_pandq512_mask",
+    "llvm.x86.avx512.mask.pavg.b.128" => "__builtin_ia32_pavgb128_mask",
+    "llvm.x86.avx512.mask.pavg.b.256" => "__builtin_ia32_pavgb256_mask",
+    "llvm.x86.avx512.mask.pavg.b.512" => "__builtin_ia32_pavgb512_mask",
+    "llvm.x86.avx512.mask.pavg.w.128" => "__builtin_ia32_pavgw128_mask",
+    "llvm.x86.avx512.mask.pavg.w.256" => "__builtin_ia32_pavgw256_mask",
+    "llvm.x86.avx512.mask.pavg.w.512" => "__builtin_ia32_pavgw512_mask",
+    "llvm.x86.avx512.mask.pbroadcast.b.gpr.128" => "__builtin_ia32_pbroadcastb128_gpr_mask",
+    "llvm.x86.avx512.mask.pbroadcast.b.gpr.256" => "__builtin_ia32_pbroadcastb256_gpr_mask",
+    "llvm.x86.avx512.mask.pbroadcast.b.gpr.512" => "__builtin_ia32_pbroadcastb512_gpr_mask",
+    "llvm.x86.avx512.mask.pbroadcast.d.gpr.128" => "__builtin_ia32_pbroadcastd128_gpr_mask",
+    "llvm.x86.avx512.mask.pbroadcast.d.gpr.256" => "__builtin_ia32_pbroadcastd256_gpr_mask",
+    "llvm.x86.avx512.mask.pbroadcast.d.gpr.512" => "__builtin_ia32_pbroadcastd512_gpr_mask",
+    "llvm.x86.avx512.mask.pbroadcast.q.gpr.128" => "__builtin_ia32_pbroadcastq128_gpr_mask",
+    "llvm.x86.avx512.mask.pbroadcast.q.gpr.256" => "__builtin_ia32_pbroadcastq256_gpr_mask",
+    "llvm.x86.avx512.mask.pbroadcast.q.gpr.512" => "__builtin_ia32_pbroadcastq512_gpr_mask",
+    "llvm.x86.avx512.mask.pbroadcast.q.mem.512" => "__builtin_ia32_pbroadcastq512_mem_mask",
+    "llvm.x86.avx512.mask.pbroadcast.w.gpr.128" => "__builtin_ia32_pbroadcastw128_gpr_mask",
+    "llvm.x86.avx512.mask.pbroadcast.w.gpr.256" => "__builtin_ia32_pbroadcastw256_gpr_mask",
+    "llvm.x86.avx512.mask.pbroadcast.w.gpr.512" => "__builtin_ia32_pbroadcastw512_gpr_mask",
+    "llvm.x86.avx512.mask.pcmpeq.b.128" => "__builtin_ia32_pcmpeqb128_mask",
+    "llvm.x86.avx512.mask.pcmpeq.b.256" => "__builtin_ia32_pcmpeqb256_mask",
+    "llvm.x86.avx512.mask.pcmpeq.b.512" => "__builtin_ia32_pcmpeqb512_mask",
+    "llvm.x86.avx512.mask.pcmpeq.d.128" => "__builtin_ia32_pcmpeqd128_mask",
+    "llvm.x86.avx512.mask.pcmpeq.d.256" => "__builtin_ia32_pcmpeqd256_mask",
+    "llvm.x86.avx512.mask.pcmpeq.d.512" => "__builtin_ia32_pcmpeqd512_mask",
+    "llvm.x86.avx512.mask.pcmpeq.q.128" => "__builtin_ia32_pcmpeqq128_mask",
+    "llvm.x86.avx512.mask.pcmpeq.q.256" => "__builtin_ia32_pcmpeqq256_mask",
+    "llvm.x86.avx512.mask.pcmpeq.q.512" => "__builtin_ia32_pcmpeqq512_mask",
+    "llvm.x86.avx512.mask.pcmpeq.w.128" => "__builtin_ia32_pcmpeqw128_mask",
+    "llvm.x86.avx512.mask.pcmpeq.w.256" => "__builtin_ia32_pcmpeqw256_mask",
+    "llvm.x86.avx512.mask.pcmpeq.w.512" => "__builtin_ia32_pcmpeqw512_mask",
+    "llvm.x86.avx512.mask.pcmpgt.b.128" => "__builtin_ia32_pcmpgtb128_mask",
+    "llvm.x86.avx512.mask.pcmpgt.b.256" => "__builtin_ia32_pcmpgtb256_mask",
+    "llvm.x86.avx512.mask.pcmpgt.b.512" => "__builtin_ia32_pcmpgtb512_mask",
+    "llvm.x86.avx512.mask.pcmpgt.d.128" => "__builtin_ia32_pcmpgtd128_mask",
+    "llvm.x86.avx512.mask.pcmpgt.d.256" => "__builtin_ia32_pcmpgtd256_mask",
+    "llvm.x86.avx512.mask.pcmpgt.d.512" => "__builtin_ia32_pcmpgtd512_mask",
+    "llvm.x86.avx512.mask.pcmpgt.q.128" => "__builtin_ia32_pcmpgtq128_mask",
+    "llvm.x86.avx512.mask.pcmpgt.q.256" => "__builtin_ia32_pcmpgtq256_mask",
+    "llvm.x86.avx512.mask.pcmpgt.q.512" => "__builtin_ia32_pcmpgtq512_mask",
+    "llvm.x86.avx512.mask.pcmpgt.w.128" => "__builtin_ia32_pcmpgtw128_mask",
+    "llvm.x86.avx512.mask.pcmpgt.w.256" => "__builtin_ia32_pcmpgtw256_mask",
+    "llvm.x86.avx512.mask.pcmpgt.w.512" => "__builtin_ia32_pcmpgtw512_mask",
+    "llvm.x86.avx512.mask.permvar.df.256" => "__builtin_ia32_permvardf256_mask",
+    "llvm.x86.avx512.mask.permvar.df.512" => "__builtin_ia32_permvardf512_mask",
+    "llvm.x86.avx512.mask.permvar.di.256" => "__builtin_ia32_permvardi256_mask",
+    "llvm.x86.avx512.mask.permvar.di.512" => "__builtin_ia32_permvardi512_mask",
+    "llvm.x86.avx512.mask.permvar.hi.128" => "__builtin_ia32_permvarhi128_mask",
+    "llvm.x86.avx512.mask.permvar.hi.256" => "__builtin_ia32_permvarhi256_mask",
+    "llvm.x86.avx512.mask.permvar.hi.512" => "__builtin_ia32_permvarhi512_mask",
+    "llvm.x86.avx512.mask.permvar.qi.128" => "__builtin_ia32_permvarqi128_mask",
+    "llvm.x86.avx512.mask.permvar.qi.256" => "__builtin_ia32_permvarqi256_mask",
+    "llvm.x86.avx512.mask.permvar.qi.512" => "__builtin_ia32_permvarqi512_mask",
+    "llvm.x86.avx512.mask.permvar.sf.256" => "__builtin_ia32_permvarsf256_mask",
+    "llvm.x86.avx512.mask.permvar.sf.512" => "__builtin_ia32_permvarsf512_mask",
+    "llvm.x86.avx512.mask.permvar.si.256" => "__builtin_ia32_permvarsi256_mask",
+    "llvm.x86.avx512.mask.permvar.si.512" => "__builtin_ia32_permvarsi512_mask",
+    "llvm.x86.avx512.mask.pmaddubs.w.128" => "__builtin_ia32_pmaddubsw128_mask",
+    "llvm.x86.avx512.mask.pmaddubs.w.256" => "__builtin_ia32_pmaddubsw256_mask",
+    "llvm.x86.avx512.mask.pmaddubs.w.512" => "__builtin_ia32_pmaddubsw512_mask",
+    "llvm.x86.avx512.mask.pmaddw.d.128" => "__builtin_ia32_pmaddwd128_mask",
+    "llvm.x86.avx512.mask.pmaddw.d.256" => "__builtin_ia32_pmaddwd256_mask",
+    "llvm.x86.avx512.mask.pmaddw.d.512" => "__builtin_ia32_pmaddwd512_mask",
+    "llvm.x86.avx512.mask.pmaxs.b.128" => "__builtin_ia32_pmaxsb128_mask",
+    "llvm.x86.avx512.mask.pmaxs.b.256" => "__builtin_ia32_pmaxsb256_mask",
+    "llvm.x86.avx512.mask.pmaxs.b.512" => "__builtin_ia32_pmaxsb512_mask",
+    "llvm.x86.avx512.mask.pmaxs.d.128" => "__builtin_ia32_pmaxsd128_mask",
+    "llvm.x86.avx512.mask.pmaxs.d.256" => "__builtin_ia32_pmaxsd256_mask",
+    "llvm.x86.avx512.mask.pmaxs.d.512" => "__builtin_ia32_pmaxsd512_mask",
+    "llvm.x86.avx512.mask.pmaxs.q.128" => "__builtin_ia32_pmaxsq128_mask",
+    "llvm.x86.avx512.mask.pmaxs.q.256" => "__builtin_ia32_pmaxsq256_mask",
+    "llvm.x86.avx512.mask.pmaxs.q.512" => "__builtin_ia32_pmaxsq512_mask",
+    "llvm.x86.avx512.mask.pmaxs.w.128" => "__builtin_ia32_pmaxsw128_mask",
+    "llvm.x86.avx512.mask.pmaxs.w.256" => "__builtin_ia32_pmaxsw256_mask",
+    "llvm.x86.avx512.mask.pmaxs.w.512" => "__builtin_ia32_pmaxsw512_mask",
+    "llvm.x86.avx512.mask.pmaxu.b.128" => "__builtin_ia32_pmaxub128_mask",
+    "llvm.x86.avx512.mask.pmaxu.b.256" => "__builtin_ia32_pmaxub256_mask",
+    "llvm.x86.avx512.mask.pmaxu.b.512" => "__builtin_ia32_pmaxub512_mask",
+    "llvm.x86.avx512.mask.pmaxu.d.128" => "__builtin_ia32_pmaxud128_mask",
+    "llvm.x86.avx512.mask.pmaxu.d.256" => "__builtin_ia32_pmaxud256_mask",
+    "llvm.x86.avx512.mask.pmaxu.d.512" => "__builtin_ia32_pmaxud512_mask",
+    "llvm.x86.avx512.mask.pmaxu.q.128" => "__builtin_ia32_pmaxuq128_mask",
+    "llvm.x86.avx512.mask.pmaxu.q.256" => "__builtin_ia32_pmaxuq256_mask",
+    "llvm.x86.avx512.mask.pmaxu.q.512" => "__builtin_ia32_pmaxuq512_mask",
+    "llvm.x86.avx512.mask.pmaxu.w.128" => "__builtin_ia32_pmaxuw128_mask",
+    "llvm.x86.avx512.mask.pmaxu.w.256" => "__builtin_ia32_pmaxuw256_mask",
+    "llvm.x86.avx512.mask.pmaxu.w.512" => "__builtin_ia32_pmaxuw512_mask",
+    "llvm.x86.avx512.mask.pmins.b.128" => "__builtin_ia32_pminsb128_mask",
+    "llvm.x86.avx512.mask.pmins.b.256" => "__builtin_ia32_pminsb256_mask",
+    "llvm.x86.avx512.mask.pmins.b.512" => "__builtin_ia32_pminsb512_mask",
+    "llvm.x86.avx512.mask.pmins.d.128" => "__builtin_ia32_pminsd128_mask",
+    "llvm.x86.avx512.mask.pmins.d.256" => "__builtin_ia32_pminsd256_mask",
+    "llvm.x86.avx512.mask.pmins.d.512" => "__builtin_ia32_pminsd512_mask",
+    "llvm.x86.avx512.mask.pmins.q.128" => "__builtin_ia32_pminsq128_mask",
+    "llvm.x86.avx512.mask.pmins.q.256" => "__builtin_ia32_pminsq256_mask",
+    "llvm.x86.avx512.mask.pmins.q.512" => "__builtin_ia32_pminsq512_mask",
+    "llvm.x86.avx512.mask.pmins.w.128" => "__builtin_ia32_pminsw128_mask",
+    "llvm.x86.avx512.mask.pmins.w.256" => "__builtin_ia32_pminsw256_mask",
+    "llvm.x86.avx512.mask.pmins.w.512" => "__builtin_ia32_pminsw512_mask",
+    "llvm.x86.avx512.mask.pminu.b.128" => "__builtin_ia32_pminub128_mask",
+    "llvm.x86.avx512.mask.pminu.b.256" => "__builtin_ia32_pminub256_mask",
+    "llvm.x86.avx512.mask.pminu.b.512" => "__builtin_ia32_pminub512_mask",
+    "llvm.x86.avx512.mask.pminu.d.128" => "__builtin_ia32_pminud128_mask",
+    "llvm.x86.avx512.mask.pminu.d.256" => "__builtin_ia32_pminud256_mask",
+    "llvm.x86.avx512.mask.pminu.d.512" => "__builtin_ia32_pminud512_mask",
+    "llvm.x86.avx512.mask.pminu.q.128" => "__builtin_ia32_pminuq128_mask",
+    "llvm.x86.avx512.mask.pminu.q.256" => "__builtin_ia32_pminuq256_mask",
+    "llvm.x86.avx512.mask.pminu.q.512" => "__builtin_ia32_pminuq512_mask",
+    "llvm.x86.avx512.mask.pminu.w.128" => "__builtin_ia32_pminuw128_mask",
+    "llvm.x86.avx512.mask.pminu.w.256" => "__builtin_ia32_pminuw256_mask",
+    "llvm.x86.avx512.mask.pminu.w.512" => "__builtin_ia32_pminuw512_mask",
+    "llvm.x86.avx512.mask.pmov.db.128" => "__builtin_ia32_pmovdb128_mask",
+    "llvm.x86.avx512.mask.pmov.db.256" => "__builtin_ia32_pmovdb256_mask",
+    "llvm.x86.avx512.mask.pmov.db.512" => "__builtin_ia32_pmovdb512_mask",
+    "llvm.x86.avx512.mask.pmov.db.mem.128" => "__builtin_ia32_pmovdb128mem_mask",
+    "llvm.x86.avx512.mask.pmov.db.mem.256" => "__builtin_ia32_pmovdb256mem_mask",
+    "llvm.x86.avx512.mask.pmov.db.mem.512" => "__builtin_ia32_pmovdb512mem_mask",
+    "llvm.x86.avx512.mask.pmov.dw.128" => "__builtin_ia32_pmovdw128_mask",
+    "llvm.x86.avx512.mask.pmov.dw.256" => "__builtin_ia32_pmovdw256_mask",
+    "llvm.x86.avx512.mask.pmov.dw.512" => "__builtin_ia32_pmovdw512_mask",
+    "llvm.x86.avx512.mask.pmov.dw.mem.128" => "__builtin_ia32_pmovdw128mem_mask",
+    "llvm.x86.avx512.mask.pmov.dw.mem.256" => "__builtin_ia32_pmovdw256mem_mask",
+    "llvm.x86.avx512.mask.pmov.dw.mem.512" => "__builtin_ia32_pmovdw512mem_mask",
+    "llvm.x86.avx512.mask.pmov.qb.128" => "__builtin_ia32_pmovqb128_mask",
+    "llvm.x86.avx512.mask.pmov.qb.256" => "__builtin_ia32_pmovqb256_mask",
+    "llvm.x86.avx512.mask.pmov.qb.512" => "__builtin_ia32_pmovqb512_mask",
+    "llvm.x86.avx512.mask.pmov.qb.mem.128" => "__builtin_ia32_pmovqb128mem_mask",
+    "llvm.x86.avx512.mask.pmov.qb.mem.256" => "__builtin_ia32_pmovqb256mem_mask",
+    "llvm.x86.avx512.mask.pmov.qb.mem.512" => "__builtin_ia32_pmovqb512mem_mask",
+    "llvm.x86.avx512.mask.pmov.qd.128" => "__builtin_ia32_pmovqd128_mask",
+    "llvm.x86.avx512.mask.pmov.qd.256" => "__builtin_ia32_pmovqd256_mask",
+    "llvm.x86.avx512.mask.pmov.qd.512" => "__builtin_ia32_pmovqd512_mask",
+    "llvm.x86.avx512.mask.pmov.qd.mem.128" => "__builtin_ia32_pmovqd128mem_mask",
+    "llvm.x86.avx512.mask.pmov.qd.mem.256" => "__builtin_ia32_pmovqd256mem_mask",
+    "llvm.x86.avx512.mask.pmov.qd.mem.512" => "__builtin_ia32_pmovqd512mem_mask",
+    "llvm.x86.avx512.mask.pmov.qw.128" => "__builtin_ia32_pmovqw128_mask",
+    "llvm.x86.avx512.mask.pmov.qw.256" => "__builtin_ia32_pmovqw256_mask",
+    "llvm.x86.avx512.mask.pmov.qw.512" => "__builtin_ia32_pmovqw512_mask",
+    "llvm.x86.avx512.mask.pmov.qw.mem.128" => "__builtin_ia32_pmovqw128mem_mask",
+    "llvm.x86.avx512.mask.pmov.qw.mem.256" => "__builtin_ia32_pmovqw256mem_mask",
+    "llvm.x86.avx512.mask.pmov.qw.mem.512" => "__builtin_ia32_pmovqw512mem_mask",
+    "llvm.x86.avx512.mask.pmov.wb.128" => "__builtin_ia32_pmovwb128_mask",
+    "llvm.x86.avx512.mask.pmov.wb.256" => "__builtin_ia32_pmovwb256_mask",
+    "llvm.x86.avx512.mask.pmov.wb.512" => "__builtin_ia32_pmovwb512_mask",
+    "llvm.x86.avx512.mask.pmov.wb.mem.128" => "__builtin_ia32_pmovwb128mem_mask",
+    "llvm.x86.avx512.mask.pmov.wb.mem.256" => "__builtin_ia32_pmovwb256mem_mask",
+    "llvm.x86.avx512.mask.pmov.wb.mem.512" => "__builtin_ia32_pmovwb512mem_mask",
+    "llvm.x86.avx512.mask.pmovs.db.128" => "__builtin_ia32_pmovsdb128_mask",
+    "llvm.x86.avx512.mask.pmovs.db.256" => "__builtin_ia32_pmovsdb256_mask",
+    "llvm.x86.avx512.mask.pmovs.db.512" => "__builtin_ia32_pmovsdb512_mask",
+    "llvm.x86.avx512.mask.pmovs.db.mem.128" => "__builtin_ia32_pmovsdb128mem_mask",
+    "llvm.x86.avx512.mask.pmovs.db.mem.256" => "__builtin_ia32_pmovsdb256mem_mask",
+    "llvm.x86.avx512.mask.pmovs.db.mem.512" => "__builtin_ia32_pmovsdb512mem_mask",
+    "llvm.x86.avx512.mask.pmovs.dw.128" => "__builtin_ia32_pmovsdw128_mask",
+    "llvm.x86.avx512.mask.pmovs.dw.256" => "__builtin_ia32_pmovsdw256_mask",
+    "llvm.x86.avx512.mask.pmovs.dw.512" => "__builtin_ia32_pmovsdw512_mask",
+    "llvm.x86.avx512.mask.pmovs.dw.mem.128" => "__builtin_ia32_pmovsdw128mem_mask",
+    "llvm.x86.avx512.mask.pmovs.dw.mem.256" => "__builtin_ia32_pmovsdw256mem_mask",
+    "llvm.x86.avx512.mask.pmovs.dw.mem.512" => "__builtin_ia32_pmovsdw512mem_mask",
+    "llvm.x86.avx512.mask.pmovs.qb.128" => "__builtin_ia32_pmovsqb128_mask",
+    "llvm.x86.avx512.mask.pmovs.qb.256" => "__builtin_ia32_pmovsqb256_mask",
+    "llvm.x86.avx512.mask.pmovs.qb.512" => "__builtin_ia32_pmovsqb512_mask",
+    "llvm.x86.avx512.mask.pmovs.qb.mem.128" => "__builtin_ia32_pmovsqb128mem_mask",
+    "llvm.x86.avx512.mask.pmovs.qb.mem.256" => "__builtin_ia32_pmovsqb256mem_mask",
+    "llvm.x86.avx512.mask.pmovs.qb.mem.512" => "__builtin_ia32_pmovsqb512mem_mask",
+    "llvm.x86.avx512.mask.pmovs.qd.128" => "__builtin_ia32_pmovsqd128_mask",
+    "llvm.x86.avx512.mask.pmovs.qd.256" => "__builtin_ia32_pmovsqd256_mask",
+    "llvm.x86.avx512.mask.pmovs.qd.512" => "__builtin_ia32_pmovsqd512_mask",
+    "llvm.x86.avx512.mask.pmovs.qd.mem.128" => "__builtin_ia32_pmovsqd128mem_mask",
+    "llvm.x86.avx512.mask.pmovs.qd.mem.256" => "__builtin_ia32_pmovsqd256mem_mask",
+    "llvm.x86.avx512.mask.pmovs.qd.mem.512" => "__builtin_ia32_pmovsqd512mem_mask",
+    "llvm.x86.avx512.mask.pmovs.qw.128" => "__builtin_ia32_pmovsqw128_mask",
+    "llvm.x86.avx512.mask.pmovs.qw.256" => "__builtin_ia32_pmovsqw256_mask",
+    "llvm.x86.avx512.mask.pmovs.qw.512" => "__builtin_ia32_pmovsqw512_mask",
+    "llvm.x86.avx512.mask.pmovs.qw.mem.128" => "__builtin_ia32_pmovsqw128mem_mask",
+    "llvm.x86.avx512.mask.pmovs.qw.mem.256" => "__builtin_ia32_pmovsqw256mem_mask",
+    "llvm.x86.avx512.mask.pmovs.qw.mem.512" => "__builtin_ia32_pmovsqw512mem_mask",
+    "llvm.x86.avx512.mask.pmovs.wb.128" => "__builtin_ia32_pmovswb128_mask",
+    "llvm.x86.avx512.mask.pmovs.wb.256" => "__builtin_ia32_pmovswb256_mask",
+    "llvm.x86.avx512.mask.pmovs.wb.512" => "__builtin_ia32_pmovswb512_mask",
+    "llvm.x86.avx512.mask.pmovs.wb.mem.128" => "__builtin_ia32_pmovswb128mem_mask",
+    "llvm.x86.avx512.mask.pmovs.wb.mem.256" => "__builtin_ia32_pmovswb256mem_mask",
+    "llvm.x86.avx512.mask.pmovs.wb.mem.512" => "__builtin_ia32_pmovswb512mem_mask",
+    "llvm.x86.avx512.mask.pmovsxb.d.128" => "__builtin_ia32_pmovsxbd128_mask",
+    "llvm.x86.avx512.mask.pmovsxb.d.256" => "__builtin_ia32_pmovsxbd256_mask",
+    "llvm.x86.avx512.mask.pmovsxb.d.512" => "__builtin_ia32_pmovsxbd512_mask",
+    "llvm.x86.avx512.mask.pmovsxb.q.128" => "__builtin_ia32_pmovsxbq128_mask",
+    "llvm.x86.avx512.mask.pmovsxb.q.256" => "__builtin_ia32_pmovsxbq256_mask",
+    "llvm.x86.avx512.mask.pmovsxb.q.512" => "__builtin_ia32_pmovsxbq512_mask",
+    "llvm.x86.avx512.mask.pmovsxb.w.128" => "__builtin_ia32_pmovsxbw128_mask",
+    "llvm.x86.avx512.mask.pmovsxb.w.256" => "__builtin_ia32_pmovsxbw256_mask",
+    "llvm.x86.avx512.mask.pmovsxb.w.512" => "__builtin_ia32_pmovsxbw512_mask",
+    "llvm.x86.avx512.mask.pmovsxd.q.128" => "__builtin_ia32_pmovsxdq128_mask",
+    "llvm.x86.avx512.mask.pmovsxd.q.256" => "__builtin_ia32_pmovsxdq256_mask",
+    "llvm.x86.avx512.mask.pmovsxd.q.512" => "__builtin_ia32_pmovsxdq512_mask",
+    "llvm.x86.avx512.mask.pmovsxw.d.128" => "__builtin_ia32_pmovsxwd128_mask",
+    "llvm.x86.avx512.mask.pmovsxw.d.256" => "__builtin_ia32_pmovsxwd256_mask",
+    "llvm.x86.avx512.mask.pmovsxw.d.512" => "__builtin_ia32_pmovsxwd512_mask",
+    "llvm.x86.avx512.mask.pmovsxw.q.128" => "__builtin_ia32_pmovsxwq128_mask",
+    "llvm.x86.avx512.mask.pmovsxw.q.256" => "__builtin_ia32_pmovsxwq256_mask",
+    "llvm.x86.avx512.mask.pmovsxw.q.512" => "__builtin_ia32_pmovsxwq512_mask",
+    "llvm.x86.avx512.mask.pmovus.db.128" => "__builtin_ia32_pmovusdb128_mask",
+    "llvm.x86.avx512.mask.pmovus.db.256" => "__builtin_ia32_pmovusdb256_mask",
+    "llvm.x86.avx512.mask.pmovus.db.512" => "__builtin_ia32_pmovusdb512_mask",
+    "llvm.x86.avx512.mask.pmovus.db.mem.128" => "__builtin_ia32_pmovusdb128mem_mask",
+    "llvm.x86.avx512.mask.pmovus.db.mem.256" => "__builtin_ia32_pmovusdb256mem_mask",
+    "llvm.x86.avx512.mask.pmovus.db.mem.512" => "__builtin_ia32_pmovusdb512mem_mask",
+    "llvm.x86.avx512.mask.pmovus.dw.128" => "__builtin_ia32_pmovusdw128_mask",
+    "llvm.x86.avx512.mask.pmovus.dw.256" => "__builtin_ia32_pmovusdw256_mask",
+    "llvm.x86.avx512.mask.pmovus.dw.512" => "__builtin_ia32_pmovusdw512_mask",
+    "llvm.x86.avx512.mask.pmovus.dw.mem.128" => "__builtin_ia32_pmovusdw128mem_mask",
+    "llvm.x86.avx512.mask.pmovus.dw.mem.256" => "__builtin_ia32_pmovusdw256mem_mask",
+    "llvm.x86.avx512.mask.pmovus.dw.mem.512" => "__builtin_ia32_pmovusdw512mem_mask",
+    "llvm.x86.avx512.mask.pmovus.qb.128" => "__builtin_ia32_pmovusqb128_mask",
+    "llvm.x86.avx512.mask.pmovus.qb.256" => "__builtin_ia32_pmovusqb256_mask",
+    "llvm.x86.avx512.mask.pmovus.qb.512" => "__builtin_ia32_pmovusqb512_mask",
+    "llvm.x86.avx512.mask.pmovus.qb.mem.128" => "__builtin_ia32_pmovusqb128mem_mask",
+    "llvm.x86.avx512.mask.pmovus.qb.mem.256" => "__builtin_ia32_pmovusqb256mem_mask",
+    "llvm.x86.avx512.mask.pmovus.qb.mem.512" => "__builtin_ia32_pmovusqb512mem_mask",
+    "llvm.x86.avx512.mask.pmovus.qd.128" => "__builtin_ia32_pmovusqd128_mask",
+    "llvm.x86.avx512.mask.pmovus.qd.256" => "__builtin_ia32_pmovusqd256_mask",
+    "llvm.x86.avx512.mask.pmovus.qd.512" => "__builtin_ia32_pmovusqd512_mask",
+    "llvm.x86.avx512.mask.pmovus.qd.mem.128" => "__builtin_ia32_pmovusqd128mem_mask",
+    "llvm.x86.avx512.mask.pmovus.qd.mem.256" => "__builtin_ia32_pmovusqd256mem_mask",
+    "llvm.x86.avx512.mask.pmovus.qd.mem.512" => "__builtin_ia32_pmovusqd512mem_mask",
+    "llvm.x86.avx512.mask.pmovus.qw.128" => "__builtin_ia32_pmovusqw128_mask",
+    "llvm.x86.avx512.mask.pmovus.qw.256" => "__builtin_ia32_pmovusqw256_mask",
+    "llvm.x86.avx512.mask.pmovus.qw.512" => "__builtin_ia32_pmovusqw512_mask",
+    "llvm.x86.avx512.mask.pmovus.qw.mem.128" => "__builtin_ia32_pmovusqw128mem_mask",
+    "llvm.x86.avx512.mask.pmovus.qw.mem.256" => "__builtin_ia32_pmovusqw256mem_mask",
+    "llvm.x86.avx512.mask.pmovus.qw.mem.512" => "__builtin_ia32_pmovusqw512mem_mask",
+    "llvm.x86.avx512.mask.pmovus.wb.128" => "__builtin_ia32_pmovuswb128_mask",
+    "llvm.x86.avx512.mask.pmovus.wb.256" => "__builtin_ia32_pmovuswb256_mask",
+    "llvm.x86.avx512.mask.pmovus.wb.512" => "__builtin_ia32_pmovuswb512_mask",
+    "llvm.x86.avx512.mask.pmovus.wb.mem.128" => "__builtin_ia32_pmovuswb128mem_mask",
+    "llvm.x86.avx512.mask.pmovus.wb.mem.256" => "__builtin_ia32_pmovuswb256mem_mask",
+    "llvm.x86.avx512.mask.pmovus.wb.mem.512" => "__builtin_ia32_pmovuswb512mem_mask",
+    "llvm.x86.avx512.mask.pmovzxb.d.128" => "__builtin_ia32_pmovzxbd128_mask",
+    "llvm.x86.avx512.mask.pmovzxb.d.256" => "__builtin_ia32_pmovzxbd256_mask",
+    "llvm.x86.avx512.mask.pmovzxb.d.512" => "__builtin_ia32_pmovzxbd512_mask",
+    "llvm.x86.avx512.mask.pmovzxb.q.128" => "__builtin_ia32_pmovzxbq128_mask",
+    "llvm.x86.avx512.mask.pmovzxb.q.256" => "__builtin_ia32_pmovzxbq256_mask",
+    "llvm.x86.avx512.mask.pmovzxb.q.512" => "__builtin_ia32_pmovzxbq512_mask",
+    "llvm.x86.avx512.mask.pmovzxb.w.128" => "__builtin_ia32_pmovzxbw128_mask",
+    "llvm.x86.avx512.mask.pmovzxb.w.256" => "__builtin_ia32_pmovzxbw256_mask",
+    "llvm.x86.avx512.mask.pmovzxb.w.512" => "__builtin_ia32_pmovzxbw512_mask",
+    "llvm.x86.avx512.mask.pmovzxd.q.128" => "__builtin_ia32_pmovzxdq128_mask",
+    "llvm.x86.avx512.mask.pmovzxd.q.256" => "__builtin_ia32_pmovzxdq256_mask",
+    "llvm.x86.avx512.mask.pmovzxd.q.512" => "__builtin_ia32_pmovzxdq512_mask",
+    "llvm.x86.avx512.mask.pmovzxw.d.128" => "__builtin_ia32_pmovzxwd128_mask",
+    "llvm.x86.avx512.mask.pmovzxw.d.256" => "__builtin_ia32_pmovzxwd256_mask",
+    "llvm.x86.avx512.mask.pmovzxw.d.512" => "__builtin_ia32_pmovzxwd512_mask",
+    "llvm.x86.avx512.mask.pmovzxw.q.128" => "__builtin_ia32_pmovzxwq128_mask",
+    "llvm.x86.avx512.mask.pmovzxw.q.256" => "__builtin_ia32_pmovzxwq256_mask",
+    "llvm.x86.avx512.mask.pmovzxw.q.512" => "__builtin_ia32_pmovzxwq512_mask",
+    "llvm.x86.avx512.mask.pmul.dq.128" => "__builtin_ia32_pmuldq128_mask",
+    "llvm.x86.avx512.mask.pmul.dq.256" => "__builtin_ia32_pmuldq256_mask",
+    "llvm.x86.avx512.mask.pmul.dq.512" => "__builtin_ia32_pmuldq512_mask",
+    "llvm.x86.avx512.mask.pmul.hr.sw.128" => "__builtin_ia32_pmulhrsw128_mask",
+    "llvm.x86.avx512.mask.pmul.hr.sw.256" => "__builtin_ia32_pmulhrsw256_mask",
+    "llvm.x86.avx512.mask.pmul.hr.sw.512" => "__builtin_ia32_pmulhrsw512_mask",
+    "llvm.x86.avx512.mask.pmulh.w.128" => "__builtin_ia32_pmulhw128_mask",
+    "llvm.x86.avx512.mask.pmulh.w.256" => "__builtin_ia32_pmulhw256_mask",
+    "llvm.x86.avx512.mask.pmulh.w.512" => "__builtin_ia32_pmulhw512_mask",
+    "llvm.x86.avx512.mask.pmulhu.w.128" => "__builtin_ia32_pmulhuw128_mask",
+    "llvm.x86.avx512.mask.pmulhu.w.256" => "__builtin_ia32_pmulhuw256_mask",
+    "llvm.x86.avx512.mask.pmulhu.w.512" => "__builtin_ia32_pmulhuw512_mask",
+    "llvm.x86.avx512.mask.pmull.d.128" => "__builtin_ia32_pmulld128_mask",
+    "llvm.x86.avx512.mask.pmull.d.256" => "__builtin_ia32_pmulld256_mask",
+    "llvm.x86.avx512.mask.pmull.d.512" => "__builtin_ia32_pmulld512_mask",
+    "llvm.x86.avx512.mask.pmull.q.128" => "__builtin_ia32_pmullq128_mask",
+    "llvm.x86.avx512.mask.pmull.q.256" => "__builtin_ia32_pmullq256_mask",
+    "llvm.x86.avx512.mask.pmull.q.512" => "__builtin_ia32_pmullq512_mask",
+    "llvm.x86.avx512.mask.pmull.w.128" => "__builtin_ia32_pmullw128_mask",
+    "llvm.x86.avx512.mask.pmull.w.256" => "__builtin_ia32_pmullw256_mask",
+    "llvm.x86.avx512.mask.pmull.w.512" => "__builtin_ia32_pmullw512_mask",
+    "llvm.x86.avx512.mask.pmultishift.qb.128" => "__builtin_ia32_vpmultishiftqb128_mask",
+    "llvm.x86.avx512.mask.pmultishift.qb.256" => "__builtin_ia32_vpmultishiftqb256_mask",
+    "llvm.x86.avx512.mask.pmultishift.qb.512" => "__builtin_ia32_vpmultishiftqb512_mask",
+    "llvm.x86.avx512.mask.pmulu.dq.128" => "__builtin_ia32_pmuludq128_mask",
+    "llvm.x86.avx512.mask.pmulu.dq.256" => "__builtin_ia32_pmuludq256_mask",
+    "llvm.x86.avx512.mask.pmulu.dq.512" => "__builtin_ia32_pmuludq512_mask",
+    "llvm.x86.avx512.mask.prol.d.128" => "__builtin_ia32_prold128_mask",
+    "llvm.x86.avx512.mask.prol.d.256" => "__builtin_ia32_prold256_mask",
+    "llvm.x86.avx512.mask.prol.d.512" => "__builtin_ia32_prold512_mask",
+    "llvm.x86.avx512.mask.prol.q.128" => "__builtin_ia32_prolq128_mask",
+    "llvm.x86.avx512.mask.prol.q.256" => "__builtin_ia32_prolq256_mask",
+    "llvm.x86.avx512.mask.prol.q.512" => "__builtin_ia32_prolq512_mask",
+    "llvm.x86.avx512.mask.prolv.d.128" => "__builtin_ia32_prolvd128_mask",
+    "llvm.x86.avx512.mask.prolv.d.256" => "__builtin_ia32_prolvd256_mask",
+    "llvm.x86.avx512.mask.prolv.d.512" => "__builtin_ia32_prolvd512_mask",
+    "llvm.x86.avx512.mask.prolv.q.128" => "__builtin_ia32_prolvq128_mask",
+    "llvm.x86.avx512.mask.prolv.q.256" => "__builtin_ia32_prolvq256_mask",
+    "llvm.x86.avx512.mask.prolv.q.512" => "__builtin_ia32_prolvq512_mask",
+    "llvm.x86.avx512.mask.pror.d.128" => "__builtin_ia32_prord128_mask",
+    "llvm.x86.avx512.mask.pror.d.256" => "__builtin_ia32_prord256_mask",
+    "llvm.x86.avx512.mask.pror.d.512" => "__builtin_ia32_prord512_mask",
+    "llvm.x86.avx512.mask.pror.q.128" => "__builtin_ia32_prorq128_mask",
+    "llvm.x86.avx512.mask.pror.q.256" => "__builtin_ia32_prorq256_mask",
+    "llvm.x86.avx512.mask.pror.q.512" => "__builtin_ia32_prorq512_mask",
+    "llvm.x86.avx512.mask.prorv.d.128" => "__builtin_ia32_prorvd128_mask",
+    "llvm.x86.avx512.mask.prorv.d.256" => "__builtin_ia32_prorvd256_mask",
+    "llvm.x86.avx512.mask.prorv.d.512" => "__builtin_ia32_prorvd512_mask",
+    "llvm.x86.avx512.mask.prorv.q.128" => "__builtin_ia32_prorvq128_mask",
+    "llvm.x86.avx512.mask.prorv.q.256" => "__builtin_ia32_prorvq256_mask",
+    "llvm.x86.avx512.mask.prorv.q.512" => "__builtin_ia32_prorvq512_mask",
+    "llvm.x86.avx512.mask.pshuf.b.128" => "__builtin_ia32_pshufb128_mask",
+    "llvm.x86.avx512.mask.pshuf.b.256" => "__builtin_ia32_pshufb256_mask",
+    "llvm.x86.avx512.mask.pshuf.b.512" => "__builtin_ia32_pshufb512_mask",
+    "llvm.x86.avx512.mask.psll.d" => "__builtin_ia32_pslld512_mask",
+    "llvm.x86.avx512.mask.psll.d.128" => "__builtin_ia32_pslld128_mask",
+    "llvm.x86.avx512.mask.psll.d.256" => "__builtin_ia32_pslld256_mask",
+    "llvm.x86.avx512.mask.psll.di.128" => "__builtin_ia32_pslldi128_mask",
+    "llvm.x86.avx512.mask.psll.di.256" => "__builtin_ia32_pslldi256_mask",
+    "llvm.x86.avx512.mask.psll.di.512" => "__builtin_ia32_pslldi512_mask",
+    "llvm.x86.avx512.mask.psll.q" => "__builtin_ia32_psllq512_mask",
+    "llvm.x86.avx512.mask.psll.q.128" => "__builtin_ia32_psllq128_mask",
+    "llvm.x86.avx512.mask.psll.q.256" => "__builtin_ia32_psllq256_mask",
+    "llvm.x86.avx512.mask.psll.qi.128" => "__builtin_ia32_psllqi128_mask",
+    "llvm.x86.avx512.mask.psll.qi.256" => "__builtin_ia32_psllqi256_mask",
+    "llvm.x86.avx512.mask.psll.qi.512" => "__builtin_ia32_psllqi512_mask",
+    "llvm.x86.avx512.mask.psll.w.128" => "__builtin_ia32_psllw128_mask",
+    "llvm.x86.avx512.mask.psll.w.256" => "__builtin_ia32_psllw256_mask",
+    "llvm.x86.avx512.mask.psll.w.512" => "__builtin_ia32_psllw512_mask",
+    "llvm.x86.avx512.mask.psll.wi.128" => "__builtin_ia32_psllwi128_mask",
+    "llvm.x86.avx512.mask.psll.wi.256" => "__builtin_ia32_psllwi256_mask",
+    "llvm.x86.avx512.mask.psll.wi.512" => "__builtin_ia32_psllwi512_mask",
+    "llvm.x86.avx512.mask.psllv.d" => "__builtin_ia32_psllv16si_mask",
+    "llvm.x86.avx512.mask.psllv.q" => "__builtin_ia32_psllv8di_mask",
+    "llvm.x86.avx512.mask.psllv16.hi" => "__builtin_ia32_psllv16hi_mask",
+    "llvm.x86.avx512.mask.psllv2.di" => "__builtin_ia32_psllv2di_mask",
+    "llvm.x86.avx512.mask.psllv32hi" => "__builtin_ia32_psllv32hi_mask",
+    "llvm.x86.avx512.mask.psllv4.di" => "__builtin_ia32_psllv4di_mask",
+    "llvm.x86.avx512.mask.psllv4.si" => "__builtin_ia32_psllv4si_mask",
+    "llvm.x86.avx512.mask.psllv8.hi" => "__builtin_ia32_psllv8hi_mask",
+    "llvm.x86.avx512.mask.psllv8.si" => "__builtin_ia32_psllv8si_mask",
+    "llvm.x86.avx512.mask.psra.d" => "__builtin_ia32_psrad512_mask",
+    "llvm.x86.avx512.mask.psra.d.128" => "__builtin_ia32_psrad128_mask",
+    "llvm.x86.avx512.mask.psra.d.256" => "__builtin_ia32_psrad256_mask",
+    "llvm.x86.avx512.mask.psra.di.128" => "__builtin_ia32_psradi128_mask",
+    "llvm.x86.avx512.mask.psra.di.256" => "__builtin_ia32_psradi256_mask",
+    "llvm.x86.avx512.mask.psra.di.512" => "__builtin_ia32_psradi512_mask",
+    "llvm.x86.avx512.mask.psra.q" => "__builtin_ia32_psraq512_mask",
+    "llvm.x86.avx512.mask.psra.q.128" => "__builtin_ia32_psraq128_mask",
+    "llvm.x86.avx512.mask.psra.q.256" => "__builtin_ia32_psraq256_mask",
+    "llvm.x86.avx512.mask.psra.qi.128" => "__builtin_ia32_psraqi128_mask",
+    "llvm.x86.avx512.mask.psra.qi.256" => "__builtin_ia32_psraqi256_mask",
+    "llvm.x86.avx512.mask.psra.qi.512" => "__builtin_ia32_psraqi512_mask",
+    "llvm.x86.avx512.mask.psra.w.128" => "__builtin_ia32_psraw128_mask",
+    "llvm.x86.avx512.mask.psra.w.256" => "__builtin_ia32_psraw256_mask",
+    "llvm.x86.avx512.mask.psra.w.512" => "__builtin_ia32_psraw512_mask",
+    "llvm.x86.avx512.mask.psra.wi.128" => "__builtin_ia32_psrawi128_mask",
+    "llvm.x86.avx512.mask.psra.wi.256" => "__builtin_ia32_psrawi256_mask",
+    "llvm.x86.avx512.mask.psra.wi.512" => "__builtin_ia32_psrawi512_mask",
+    "llvm.x86.avx512.mask.psrav.d" => "__builtin_ia32_psrav16si_mask",
+    "llvm.x86.avx512.mask.psrav.q" => "__builtin_ia32_psrav8di_mask",
+    "llvm.x86.avx512.mask.psrav.q.128" => "__builtin_ia32_psravq128_mask",
+    "llvm.x86.avx512.mask.psrav.q.256" => "__builtin_ia32_psravq256_mask",
+    "llvm.x86.avx512.mask.psrav16.hi" => "__builtin_ia32_psrav16hi_mask",
+    "llvm.x86.avx512.mask.psrav32.hi" => "__builtin_ia32_psrav32hi_mask",
+    "llvm.x86.avx512.mask.psrav4.si" => "__builtin_ia32_psrav4si_mask",
+    "llvm.x86.avx512.mask.psrav8.hi" => "__builtin_ia32_psrav8hi_mask",
+    "llvm.x86.avx512.mask.psrav8.si" => "__builtin_ia32_psrav8si_mask",
+    "llvm.x86.avx512.mask.psrl.d" => "__builtin_ia32_psrld512_mask",
+    "llvm.x86.avx512.mask.psrl.d.128" => "__builtin_ia32_psrld128_mask",
+    "llvm.x86.avx512.mask.psrl.d.256" => "__builtin_ia32_psrld256_mask",
+    "llvm.x86.avx512.mask.psrl.di.128" => "__builtin_ia32_psrldi128_mask",
+    "llvm.x86.avx512.mask.psrl.di.256" => "__builtin_ia32_psrldi256_mask",
+    "llvm.x86.avx512.mask.psrl.di.512" => "__builtin_ia32_psrldi512_mask",
+    "llvm.x86.avx512.mask.psrl.q" => "__builtin_ia32_psrlq512_mask",
+    "llvm.x86.avx512.mask.psrl.q.128" => "__builtin_ia32_psrlq128_mask",
+    "llvm.x86.avx512.mask.psrl.q.256" => "__builtin_ia32_psrlq256_mask",
+    "llvm.x86.avx512.mask.psrl.qi.128" => "__builtin_ia32_psrlqi128_mask",
+    "llvm.x86.avx512.mask.psrl.qi.256" => "__builtin_ia32_psrlqi256_mask",
+    "llvm.x86.avx512.mask.psrl.qi.512" => "__builtin_ia32_psrlqi512_mask",
+    "llvm.x86.avx512.mask.psrl.w.128" => "__builtin_ia32_psrlw128_mask",
+    "llvm.x86.avx512.mask.psrl.w.256" => "__builtin_ia32_psrlw256_mask",
+    "llvm.x86.avx512.mask.psrl.w.512" => "__builtin_ia32_psrlw512_mask",
+    "llvm.x86.avx512.mask.psrl.wi.128" => "__builtin_ia32_psrlwi128_mask",
+    "llvm.x86.avx512.mask.psrl.wi.256" => "__builtin_ia32_psrlwi256_mask",
+    "llvm.x86.avx512.mask.psrl.wi.512" => "__builtin_ia32_psrlwi512_mask",
+    "llvm.x86.avx512.mask.psrlv.d" => "__builtin_ia32_psrlv16si_mask",
+    "llvm.x86.avx512.mask.psrlv.q" => "__builtin_ia32_psrlv8di_mask",
+    "llvm.x86.avx512.mask.psrlv16.hi" => "__builtin_ia32_psrlv16hi_mask",
+    "llvm.x86.avx512.mask.psrlv2.di" => "__builtin_ia32_psrlv2di_mask",
+    "llvm.x86.avx512.mask.psrlv32hi" => "__builtin_ia32_psrlv32hi_mask",
+    "llvm.x86.avx512.mask.psrlv4.di" => "__builtin_ia32_psrlv4di_mask",
+    "llvm.x86.avx512.mask.psrlv4.si" => "__builtin_ia32_psrlv4si_mask",
+    "llvm.x86.avx512.mask.psrlv8.hi" => "__builtin_ia32_psrlv8hi_mask",
+    "llvm.x86.avx512.mask.psrlv8.si" => "__builtin_ia32_psrlv8si_mask",
+    "llvm.x86.avx512.mask.psub.b.128" => "__builtin_ia32_psubb128_mask",
+    "llvm.x86.avx512.mask.psub.b.256" => "__builtin_ia32_psubb256_mask",
+    "llvm.x86.avx512.mask.psub.b.512" => "__builtin_ia32_psubb512_mask",
+    "llvm.x86.avx512.mask.psub.d.128" => "__builtin_ia32_psubd128_mask",
+    "llvm.x86.avx512.mask.psub.d.256" => "__builtin_ia32_psubd256_mask",
+    "llvm.x86.avx512.mask.psub.d.512" => "__builtin_ia32_psubd512_mask",
+    "llvm.x86.avx512.mask.psub.q.128" => "__builtin_ia32_psubq128_mask",
+    "llvm.x86.avx512.mask.psub.q.256" => "__builtin_ia32_psubq256_mask",
+    "llvm.x86.avx512.mask.psub.q.512" => "__builtin_ia32_psubq512_mask",
+    "llvm.x86.avx512.mask.psub.w.128" => "__builtin_ia32_psubw128_mask",
+    "llvm.x86.avx512.mask.psub.w.256" => "__builtin_ia32_psubw256_mask",
+    "llvm.x86.avx512.mask.psub.w.512" => "__builtin_ia32_psubw512_mask",
+    "llvm.x86.avx512.mask.psubs.b.128" => "__builtin_ia32_psubsb128_mask",
+    "llvm.x86.avx512.mask.psubs.b.256" => "__builtin_ia32_psubsb256_mask",
+    "llvm.x86.avx512.mask.psubs.b.512" => "__builtin_ia32_psubsb512_mask",
+    "llvm.x86.avx512.mask.psubs.w.128" => "__builtin_ia32_psubsw128_mask",
+    "llvm.x86.avx512.mask.psubs.w.256" => "__builtin_ia32_psubsw256_mask",
+    "llvm.x86.avx512.mask.psubs.w.512" => "__builtin_ia32_psubsw512_mask",
+    "llvm.x86.avx512.mask.psubus.b.128" => "__builtin_ia32_psubusb128_mask",
+    "llvm.x86.avx512.mask.psubus.b.256" => "__builtin_ia32_psubusb256_mask",
+    "llvm.x86.avx512.mask.psubus.b.512" => "__builtin_ia32_psubusb512_mask",
+    "llvm.x86.avx512.mask.psubus.w.128" => "__builtin_ia32_psubusw128_mask",
+    "llvm.x86.avx512.mask.psubus.w.256" => "__builtin_ia32_psubusw256_mask",
+    "llvm.x86.avx512.mask.psubus.w.512" => "__builtin_ia32_psubusw512_mask",
+    "llvm.x86.avx512.mask.pternlog.d.128" => "__builtin_ia32_pternlogd128_mask",
+    "llvm.x86.avx512.mask.pternlog.d.256" => "__builtin_ia32_pternlogd256_mask",
+    "llvm.x86.avx512.mask.pternlog.d.512" => "__builtin_ia32_pternlogd512_mask",
+    "llvm.x86.avx512.mask.pternlog.q.128" => "__builtin_ia32_pternlogq128_mask",
+    "llvm.x86.avx512.mask.pternlog.q.256" => "__builtin_ia32_pternlogq256_mask",
+    "llvm.x86.avx512.mask.pternlog.q.512" => "__builtin_ia32_pternlogq512_mask",
+    "llvm.x86.avx512.mask.ptestm.d.512" => "__builtin_ia32_ptestmd512",
+    "llvm.x86.avx512.mask.ptestm.q.512" => "__builtin_ia32_ptestmq512",
+    "llvm.x86.avx512.mask.range.pd.128" => "__builtin_ia32_rangepd128_mask",
+    "llvm.x86.avx512.mask.range.pd.256" => "__builtin_ia32_rangepd256_mask",
+    "llvm.x86.avx512.mask.range.pd.512" => "__builtin_ia32_rangepd512_mask",
+    "llvm.x86.avx512.mask.range.ps.128" => "__builtin_ia32_rangeps128_mask",
+    "llvm.x86.avx512.mask.range.ps.256" => "__builtin_ia32_rangeps256_mask",
+    "llvm.x86.avx512.mask.range.ps.512" => "__builtin_ia32_rangeps512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.range.sd" => "__builtin_ia32_rangesd128_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.range.ss" => "__builtin_ia32_rangess128_round_mask",
+    "llvm.x86.avx512.mask.reduce.pd.128" => "__builtin_ia32_reducepd128_mask",
+    "llvm.x86.avx512.mask.reduce.pd.256" => "__builtin_ia32_reducepd256_mask",
+    "llvm.x86.avx512.mask.reduce.pd.512" => "__builtin_ia32_reducepd512_mask",
+    "llvm.x86.avx512.mask.reduce.ps.128" => "__builtin_ia32_reduceps128_mask",
+    "llvm.x86.avx512.mask.reduce.ps.256" => "__builtin_ia32_reduceps256_mask",
+    "llvm.x86.avx512.mask.reduce.ps.512" => "__builtin_ia32_reduceps512_mask",
+    "llvm.x86.avx512.mask.reduce.sd" => "__builtin_ia32_reducesd_mask",
+    "llvm.x86.avx512.mask.reduce.ss" => "__builtin_ia32_reducess_mask",
+    "llvm.x86.avx512.mask.rndscale.pd.128" => "__builtin_ia32_rndscalepd_128_mask",
+    "llvm.x86.avx512.mask.rndscale.pd.256" => "__builtin_ia32_rndscalepd_256_mask",
+    "llvm.x86.avx512.mask.rndscale.pd.512" => "__builtin_ia32_rndscalepd_mask",
+    "llvm.x86.avx512.mask.rndscale.ps.128" => "__builtin_ia32_rndscaleps_128_mask",
+    "llvm.x86.avx512.mask.rndscale.ps.256" => "__builtin_ia32_rndscaleps_256_mask",
+    "llvm.x86.avx512.mask.rndscale.ps.512" => "__builtin_ia32_rndscaleps_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.rndscale.sd" => "__builtin_ia32_rndscalesd_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.rndscale.ss" => "__builtin_ia32_rndscaless_round_mask",
+    "llvm.x86.avx512.mask.scalef.pd.128" => "__builtin_ia32_scalefpd128_mask",
+    "llvm.x86.avx512.mask.scalef.pd.256" => "__builtin_ia32_scalefpd256_mask",
+    "llvm.x86.avx512.mask.scalef.pd.512" => "__builtin_ia32_scalefpd512_mask",
+    "llvm.x86.avx512.mask.scalef.ps.128" => "__builtin_ia32_scalefps128_mask",
+    "llvm.x86.avx512.mask.scalef.ps.256" => "__builtin_ia32_scalefps256_mask",
+    "llvm.x86.avx512.mask.scalef.ps.512" => "__builtin_ia32_scalefps512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.scalef.sd" => "__builtin_ia32_scalefsd_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.scalef.ss" => "__builtin_ia32_scalefss_round_mask",
+    "llvm.x86.avx512.mask.shuf.f32x4" => "__builtin_ia32_shuf_f32x4_mask",
+    "llvm.x86.avx512.mask.shuf.f32x4.256" => "__builtin_ia32_shuf_f32x4_256_mask",
+    "llvm.x86.avx512.mask.shuf.f64x2" => "__builtin_ia32_shuf_f64x2_mask",
+    "llvm.x86.avx512.mask.shuf.f64x2.256" => "__builtin_ia32_shuf_f64x2_256_mask",
+    "llvm.x86.avx512.mask.shuf.i32x4" => "__builtin_ia32_shuf_i32x4_mask",
+    "llvm.x86.avx512.mask.shuf.i32x4.256" => "__builtin_ia32_shuf_i32x4_256_mask",
+    "llvm.x86.avx512.mask.shuf.i64x2" => "__builtin_ia32_shuf_i64x2_mask",
+    "llvm.x86.avx512.mask.shuf.i64x2.256" => "__builtin_ia32_shuf_i64x2_256_mask",
+    "llvm.x86.avx512.mask.shuf.pd.128" => "__builtin_ia32_shufpd128_mask",
+    "llvm.x86.avx512.mask.shuf.pd.256" => "__builtin_ia32_shufpd256_mask",
+    "llvm.x86.avx512.mask.shuf.pd.512" => "__builtin_ia32_shufpd512_mask",
+    "llvm.x86.avx512.mask.shuf.ps.128" => "__builtin_ia32_shufps128_mask",
+    "llvm.x86.avx512.mask.shuf.ps.256" => "__builtin_ia32_shufps256_mask",
+    "llvm.x86.avx512.mask.shuf.ps.512" => "__builtin_ia32_shufps512_mask",
+    "llvm.x86.avx512.mask.sqrt.pd.128" => "__builtin_ia32_sqrtpd128_mask",
+    "llvm.x86.avx512.mask.sqrt.pd.256" => "__builtin_ia32_sqrtpd256_mask",
+    "llvm.x86.avx512.mask.sqrt.pd.512" => "__builtin_ia32_sqrtpd512_mask",
+    "llvm.x86.avx512.mask.sqrt.ps.128" => "__builtin_ia32_sqrtps128_mask",
+    "llvm.x86.avx512.mask.sqrt.ps.256" => "__builtin_ia32_sqrtps256_mask",
+    "llvm.x86.avx512.mask.sqrt.ps.512" => "__builtin_ia32_sqrtps512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.sqrt.sd" => "__builtin_ia32_sqrtsd_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.sqrt.ss" => "__builtin_ia32_sqrtss_round_mask",
+    "llvm.x86.avx512.mask.store.ss" => "__builtin_ia32_storess_mask",
+    "llvm.x86.avx512.mask.storeu.d.512" => "__builtin_ia32_storedqusi512_mask",
+    "llvm.x86.avx512.mask.storeu.pd.512" => "__builtin_ia32_storeupd512_mask",
+    "llvm.x86.avx512.mask.storeu.ps.512" => "__builtin_ia32_storeups512_mask",
+    "llvm.x86.avx512.mask.storeu.q.512" => "__builtin_ia32_storedqudi512_mask",
+    "llvm.x86.avx512.mask.sub.pd.128" => "__builtin_ia32_subpd128_mask",
+    "llvm.x86.avx512.mask.sub.pd.256" => "__builtin_ia32_subpd256_mask",
+    "llvm.x86.avx512.mask.sub.pd.512" => "__builtin_ia32_subpd512_mask",
+    "llvm.x86.avx512.mask.sub.ps.128" => "__builtin_ia32_subps128_mask",
+    "llvm.x86.avx512.mask.sub.ps.256" => "__builtin_ia32_subps256_mask",
+    "llvm.x86.avx512.mask.sub.ps.512" => "__builtin_ia32_subps512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.sub.sd.round" => "__builtin_ia32_subsd_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512.mask.sub.ss.round" => "__builtin_ia32_subss_round_mask",
+    "llvm.x86.avx512.mask.valign.d.128" => "__builtin_ia32_alignd128_mask",
+    "llvm.x86.avx512.mask.valign.d.256" => "__builtin_ia32_alignd256_mask",
+    "llvm.x86.avx512.mask.valign.d.512" => "__builtin_ia32_alignd512_mask",
+    "llvm.x86.avx512.mask.valign.q.128" => "__builtin_ia32_alignq128_mask",
+    "llvm.x86.avx512.mask.valign.q.256" => "__builtin_ia32_alignq256_mask",
+    "llvm.x86.avx512.mask.valign.q.512" => "__builtin_ia32_alignq512_mask",
+    "llvm.x86.avx512.mask.vcvtph2ps.128" => "__builtin_ia32_vcvtph2ps_mask",
+    "llvm.x86.avx512.mask.vcvtph2ps.256" => "__builtin_ia32_vcvtph2ps256_mask",
+    "llvm.x86.avx512.mask.vcvtph2ps.512" => "__builtin_ia32_vcvtph2ps512_mask",
+    "llvm.x86.avx512.mask.vcvtps2ph.128" => "__builtin_ia32_vcvtps2ph_mask",
+    "llvm.x86.avx512.mask.vcvtps2ph.256" => "__builtin_ia32_vcvtps2ph256_mask",
+    "llvm.x86.avx512.mask.vcvtps2ph.512" => "__builtin_ia32_vcvtps2ph512_mask",
+    "llvm.x86.avx512.mask.vextractf32x4.256" => "__builtin_ia32_extractf32x4_256_mask",
+    "llvm.x86.avx512.mask.vextractf32x4.512" => "__builtin_ia32_extractf32x4_mask",
+    "llvm.x86.avx512.mask.vextractf32x8.512" => "__builtin_ia32_extractf32x8_mask",
+    "llvm.x86.avx512.mask.vextractf64x2.256" => "__builtin_ia32_extractf64x2_256_mask",
+    "llvm.x86.avx512.mask.vextractf64x2.512" => "__builtin_ia32_extractf64x2_512_mask",
+    "llvm.x86.avx512.mask.vextractf64x4.512" => "__builtin_ia32_extractf64x4_mask",
+    "llvm.x86.avx512.mask.vextracti32x4.256" => "__builtin_ia32_extracti32x4_256_mask",
+    "llvm.x86.avx512.mask.vextracti32x4.512" => "__builtin_ia32_extracti32x4_mask",
+    "llvm.x86.avx512.mask.vextracti32x8.512" => "__builtin_ia32_extracti32x8_mask",
+    "llvm.x86.avx512.mask.vextracti64x2.256" => "__builtin_ia32_extracti64x2_256_mask",
+    "llvm.x86.avx512.mask.vextracti64x2.512" => "__builtin_ia32_extracti64x2_512_mask",
+    "llvm.x86.avx512.mask.vextracti64x4.512" => "__builtin_ia32_extracti64x4_mask",
+    "llvm.x86.avx512.mask.vfmadd.pd.128" => "__builtin_ia32_vfmaddpd128_mask",
+    "llvm.x86.avx512.mask.vfmadd.pd.256" => "__builtin_ia32_vfmaddpd256_mask",
+    "llvm.x86.avx512.mask.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_mask",
+    "llvm.x86.avx512.mask.vfmadd.ps.128" => "__builtin_ia32_vfmaddps128_mask",
+    "llvm.x86.avx512.mask.vfmadd.ps.256" => "__builtin_ia32_vfmaddps256_mask",
+    "llvm.x86.avx512.mask.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_mask",
+    "llvm.x86.avx512.mask.vfmadd.sd" => "__builtin_ia32_vfmaddsd3_mask",
+    "llvm.x86.avx512.mask.vfmadd.ss" => "__builtin_ia32_vfmaddss3_mask",
+    "llvm.x86.avx512.mask.vfmaddsub.pd.128" => "__builtin_ia32_vfmaddsubpd128_mask",
+    "llvm.x86.avx512.mask.vfmaddsub.pd.256" => "__builtin_ia32_vfmaddsubpd256_mask",
+    "llvm.x86.avx512.mask.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_mask",
+    "llvm.x86.avx512.mask.vfmaddsub.ps.128" => "__builtin_ia32_vfmaddsubps128_mask",
+    "llvm.x86.avx512.mask.vfmaddsub.ps.256" => "__builtin_ia32_vfmaddsubps256_mask",
+    "llvm.x86.avx512.mask.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_mask",
+    "llvm.x86.avx512.mask.vfnmadd.pd.128" => "__builtin_ia32_vfnmaddpd128_mask",
+    "llvm.x86.avx512.mask.vfnmadd.pd.256" => "__builtin_ia32_vfnmaddpd256_mask",
+    "llvm.x86.avx512.mask.vfnmadd.pd.512" => "__builtin_ia32_vfnmaddpd512_mask",
+    "llvm.x86.avx512.mask.vfnmadd.ps.128" => "__builtin_ia32_vfnmaddps128_mask",
+    "llvm.x86.avx512.mask.vfnmadd.ps.256" => "__builtin_ia32_vfnmaddps256_mask",
+    "llvm.x86.avx512.mask.vfnmadd.ps.512" => "__builtin_ia32_vfnmaddps512_mask",
+    "llvm.x86.avx512.mask.vfnmsub.pd.128" => "__builtin_ia32_vfnmsubpd128_mask",
+    "llvm.x86.avx512.mask.vfnmsub.pd.256" => "__builtin_ia32_vfnmsubpd256_mask",
+    "llvm.x86.avx512.mask.vfnmsub.pd.512" => "__builtin_ia32_vfnmsubpd512_mask",
+    "llvm.x86.avx512.mask.vfnmsub.ps.128" => "__builtin_ia32_vfnmsubps128_mask",
+    "llvm.x86.avx512.mask.vfnmsub.ps.256" => "__builtin_ia32_vfnmsubps256_mask",
+    "llvm.x86.avx512.mask.vfnmsub.ps.512" => "__builtin_ia32_vfnmsubps512_mask",
+    "llvm.x86.avx512.mask.vpermi2var.d.128" => "__builtin_ia32_vpermi2vard128_mask",
+    "llvm.x86.avx512.mask.vpermi2var.d.256" => "__builtin_ia32_vpermi2vard256_mask",
+    "llvm.x86.avx512.mask.vpermi2var.d.512" => "__builtin_ia32_vpermi2vard512_mask",
+    "llvm.x86.avx512.mask.vpermi2var.hi.128" => "__builtin_ia32_vpermi2varhi128_mask",
+    "llvm.x86.avx512.mask.vpermi2var.hi.256" => "__builtin_ia32_vpermi2varhi256_mask",
+    "llvm.x86.avx512.mask.vpermi2var.hi.512" => "__builtin_ia32_vpermi2varhi512_mask",
+    "llvm.x86.avx512.mask.vpermi2var.pd.128" => "__builtin_ia32_vpermi2varpd128_mask",
+    "llvm.x86.avx512.mask.vpermi2var.pd.256" => "__builtin_ia32_vpermi2varpd256_mask",
+    "llvm.x86.avx512.mask.vpermi2var.pd.512" => "__builtin_ia32_vpermi2varpd512_mask",
+    "llvm.x86.avx512.mask.vpermi2var.ps.128" => "__builtin_ia32_vpermi2varps128_mask",
+    "llvm.x86.avx512.mask.vpermi2var.ps.256" => "__builtin_ia32_vpermi2varps256_mask",
+    "llvm.x86.avx512.mask.vpermi2var.ps.512" => "__builtin_ia32_vpermi2varps512_mask",
+    "llvm.x86.avx512.mask.vpermi2var.q.128" => "__builtin_ia32_vpermi2varq128_mask",
+    "llvm.x86.avx512.mask.vpermi2var.q.256" => "__builtin_ia32_vpermi2varq256_mask",
+    "llvm.x86.avx512.mask.vpermi2var.q.512" => "__builtin_ia32_vpermi2varq512_mask",
+    "llvm.x86.avx512.mask.vpermi2var.qi.128" => "__builtin_ia32_vpermi2varqi128_mask",
+    "llvm.x86.avx512.mask.vpermi2var.qi.256" => "__builtin_ia32_vpermi2varqi256_mask",
+    "llvm.x86.avx512.mask.vpermi2var.qi.512" => "__builtin_ia32_vpermi2varqi512_mask",
+    "llvm.x86.avx512.mask.vpermilvar.pd.128" => "__builtin_ia32_vpermilvarpd_mask",
+    "llvm.x86.avx512.mask.vpermilvar.pd.256" => "__builtin_ia32_vpermilvarpd256_mask",
+    "llvm.x86.avx512.mask.vpermilvar.pd.512" => "__builtin_ia32_vpermilvarpd512_mask",
+    "llvm.x86.avx512.mask.vpermilvar.ps.128" => "__builtin_ia32_vpermilvarps_mask",
+    "llvm.x86.avx512.mask.vpermilvar.ps.256" => "__builtin_ia32_vpermilvarps256_mask",
+    "llvm.x86.avx512.mask.vpermilvar.ps.512" => "__builtin_ia32_vpermilvarps512_mask",
+    "llvm.x86.avx512.mask.vpermt.d.512" => "__builtin_ia32_vpermt2vard512_mask",
+    "llvm.x86.avx512.mask.vpermt.pd.512" => "__builtin_ia32_vpermt2varpd512_mask",
+    "llvm.x86.avx512.mask.vpermt.ps.512" => "__builtin_ia32_vpermt2varps512_mask",
+    "llvm.x86.avx512.mask.vpermt.q.512" => "__builtin_ia32_vpermt2varq512_mask",
+    "llvm.x86.avx512.mask.vpermt2var.d.128" => "__builtin_ia32_vpermt2vard128_mask",
+    "llvm.x86.avx512.mask.vpermt2var.d.256" => "__builtin_ia32_vpermt2vard256_mask",
+    "llvm.x86.avx512.mask.vpermt2var.d.512" => "__builtin_ia32_vpermt2vard512_mask",
+    "llvm.x86.avx512.mask.vpermt2var.hi.128" => "__builtin_ia32_vpermt2varhi128_mask",
+    "llvm.x86.avx512.mask.vpermt2var.hi.256" => "__builtin_ia32_vpermt2varhi256_mask",
+    "llvm.x86.avx512.mask.vpermt2var.hi.512" => "__builtin_ia32_vpermt2varhi512_mask",
+    "llvm.x86.avx512.mask.vpermt2var.pd.128" => "__builtin_ia32_vpermt2varpd128_mask",
+    "llvm.x86.avx512.mask.vpermt2var.pd.256" => "__builtin_ia32_vpermt2varpd256_mask",
+    "llvm.x86.avx512.mask.vpermt2var.pd.512" => "__builtin_ia32_vpermt2varpd512_mask",
+    "llvm.x86.avx512.mask.vpermt2var.ps.128" => "__builtin_ia32_vpermt2varps128_mask",
+    "llvm.x86.avx512.mask.vpermt2var.ps.256" => "__builtin_ia32_vpermt2varps256_mask",
+    "llvm.x86.avx512.mask.vpermt2var.ps.512" => "__builtin_ia32_vpermt2varps512_mask",
+    "llvm.x86.avx512.mask.vpermt2var.q.128" => "__builtin_ia32_vpermt2varq128_mask",
+    "llvm.x86.avx512.mask.vpermt2var.q.256" => "__builtin_ia32_vpermt2varq256_mask",
+    "llvm.x86.avx512.mask.vpermt2var.q.512" => "__builtin_ia32_vpermt2varq512_mask",
+    "llvm.x86.avx512.mask.vpermt2var.qi.128" => "__builtin_ia32_vpermt2varqi128_mask",
+    "llvm.x86.avx512.mask.vpermt2var.qi.256" => "__builtin_ia32_vpermt2varqi256_mask",
+    "llvm.x86.avx512.mask.vpermt2var.qi.512" => "__builtin_ia32_vpermt2varqi512_mask",
+    "llvm.x86.avx512.mask.vpmadd52h.uq.128" => "__builtin_ia32_vpmadd52huq128_mask",
+    "llvm.x86.avx512.mask.vpmadd52h.uq.256" => "__builtin_ia32_vpmadd52huq256_mask",
+    "llvm.x86.avx512.mask.vpmadd52h.uq.512" => "__builtin_ia32_vpmadd52huq512_mask",
+    "llvm.x86.avx512.mask.vpmadd52l.uq.128" => "__builtin_ia32_vpmadd52luq128_mask",
+    "llvm.x86.avx512.mask.vpmadd52l.uq.256" => "__builtin_ia32_vpmadd52luq256_mask",
+    "llvm.x86.avx512.mask.vpmadd52l.uq.512" => "__builtin_ia32_vpmadd52luq512_mask",
+    "llvm.x86.avx512.mask.xor.pd.128" => "__builtin_ia32_xorpd128_mask",
+    "llvm.x86.avx512.mask.xor.pd.256" => "__builtin_ia32_xorpd256_mask",
+    "llvm.x86.avx512.mask.xor.pd.512" => "__builtin_ia32_xorpd512_mask",
+    "llvm.x86.avx512.mask.xor.ps.128" => "__builtin_ia32_xorps128_mask",
+    "llvm.x86.avx512.mask.xor.ps.256" => "__builtin_ia32_xorps256_mask",
+    "llvm.x86.avx512.mask.xor.ps.512" => "__builtin_ia32_xorps512_mask",
+    "llvm.x86.avx512.mask3.vfmadd.pd.128" => "__builtin_ia32_vfmaddpd128_mask3",
+    "llvm.x86.avx512.mask3.vfmadd.pd.256" => "__builtin_ia32_vfmaddpd256_mask3",
+    "llvm.x86.avx512.mask3.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_mask3",
+    "llvm.x86.avx512.mask3.vfmadd.ps.128" => "__builtin_ia32_vfmaddps128_mask3",
+    "llvm.x86.avx512.mask3.vfmadd.ps.256" => "__builtin_ia32_vfmaddps256_mask3",
+    "llvm.x86.avx512.mask3.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_mask3",
+    "llvm.x86.avx512.mask3.vfmadd.sd" => "__builtin_ia32_vfmaddsd3_mask3",
+    "llvm.x86.avx512.mask3.vfmadd.ss" => "__builtin_ia32_vfmaddss3_mask3",
+    "llvm.x86.avx512.mask3.vfmaddsub.pd.128" => "__builtin_ia32_vfmaddsubpd128_mask3",
+    "llvm.x86.avx512.mask3.vfmaddsub.pd.256" => "__builtin_ia32_vfmaddsubpd256_mask3",
+    "llvm.x86.avx512.mask3.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_mask3",
+    "llvm.x86.avx512.mask3.vfmaddsub.ps.128" => "__builtin_ia32_vfmaddsubps128_mask3",
+    "llvm.x86.avx512.mask3.vfmaddsub.ps.256" => "__builtin_ia32_vfmaddsubps256_mask3",
+    "llvm.x86.avx512.mask3.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_mask3",
+    "llvm.x86.avx512.mask3.vfmsub.pd.128" => "__builtin_ia32_vfmsubpd128_mask3",
+    "llvm.x86.avx512.mask3.vfmsub.pd.256" => "__builtin_ia32_vfmsubpd256_mask3",
+    "llvm.x86.avx512.mask3.vfmsub.pd.512" => "__builtin_ia32_vfmsubpd512_mask3",
+    "llvm.x86.avx512.mask3.vfmsub.ps.128" => "__builtin_ia32_vfmsubps128_mask3",
+    "llvm.x86.avx512.mask3.vfmsub.ps.256" => "__builtin_ia32_vfmsubps256_mask3",
+    "llvm.x86.avx512.mask3.vfmsub.ps.512" => "__builtin_ia32_vfmsubps512_mask3",
+    "llvm.x86.avx512.mask3.vfmsubadd.pd.128" => "__builtin_ia32_vfmsubaddpd128_mask3",
+    "llvm.x86.avx512.mask3.vfmsubadd.pd.256" => "__builtin_ia32_vfmsubaddpd256_mask3",
+    "llvm.x86.avx512.mask3.vfmsubadd.pd.512" => "__builtin_ia32_vfmsubaddpd512_mask3",
+    "llvm.x86.avx512.mask3.vfmsubadd.ps.128" => "__builtin_ia32_vfmsubaddps128_mask3",
+    "llvm.x86.avx512.mask3.vfmsubadd.ps.256" => "__builtin_ia32_vfmsubaddps256_mask3",
+    "llvm.x86.avx512.mask3.vfmsubadd.ps.512" => "__builtin_ia32_vfmsubaddps512_mask3",
+    "llvm.x86.avx512.mask3.vfnmsub.pd.128" => "__builtin_ia32_vfnmsubpd128_mask3",
+    "llvm.x86.avx512.mask3.vfnmsub.pd.256" => "__builtin_ia32_vfnmsubpd256_mask3",
+    "llvm.x86.avx512.mask3.vfnmsub.pd.512" => "__builtin_ia32_vfnmsubpd512_mask3",
+    "llvm.x86.avx512.mask3.vfnmsub.ps.128" => "__builtin_ia32_vfnmsubps128_mask3",
+    "llvm.x86.avx512.mask3.vfnmsub.ps.256" => "__builtin_ia32_vfnmsubps256_mask3",
+    "llvm.x86.avx512.mask3.vfnmsub.ps.512" => "__builtin_ia32_vfnmsubps512_mask3",
+    "llvm.x86.avx512.maskz.fixupimm.pd.128" => "__builtin_ia32_fixupimmpd128_maskz",
+    "llvm.x86.avx512.maskz.fixupimm.pd.256" => "__builtin_ia32_fixupimmpd256_maskz",
+    "llvm.x86.avx512.maskz.fixupimm.pd.512" => "__builtin_ia32_fixupimmpd512_maskz",
+    "llvm.x86.avx512.maskz.fixupimm.ps.128" => "__builtin_ia32_fixupimmps128_maskz",
+    "llvm.x86.avx512.maskz.fixupimm.ps.256" => "__builtin_ia32_fixupimmps256_maskz",
+    "llvm.x86.avx512.maskz.fixupimm.ps.512" => "__builtin_ia32_fixupimmps512_maskz",
+    "llvm.x86.avx512.maskz.fixupimm.sd" => "__builtin_ia32_fixupimmsd_maskz",
+    "llvm.x86.avx512.maskz.fixupimm.ss" => "__builtin_ia32_fixupimmss_maskz",
+    "llvm.x86.avx512.maskz.pternlog.d.128" => "__builtin_ia32_pternlogd128_maskz",
+    "llvm.x86.avx512.maskz.pternlog.d.256" => "__builtin_ia32_pternlogd256_maskz",
+    "llvm.x86.avx512.maskz.pternlog.d.512" => "__builtin_ia32_pternlogd512_maskz",
+    "llvm.x86.avx512.maskz.pternlog.q.128" => "__builtin_ia32_pternlogq128_maskz",
+    "llvm.x86.avx512.maskz.pternlog.q.256" => "__builtin_ia32_pternlogq256_maskz",
+    "llvm.x86.avx512.maskz.pternlog.q.512" => "__builtin_ia32_pternlogq512_maskz",
+    "llvm.x86.avx512.maskz.vfmadd.pd.128" => "__builtin_ia32_vfmaddpd128_maskz",
+    "llvm.x86.avx512.maskz.vfmadd.pd.256" => "__builtin_ia32_vfmaddpd256_maskz",
+    "llvm.x86.avx512.maskz.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_maskz",
+    "llvm.x86.avx512.maskz.vfmadd.ps.128" => "__builtin_ia32_vfmaddps128_maskz",
+    "llvm.x86.avx512.maskz.vfmadd.ps.256" => "__builtin_ia32_vfmaddps256_maskz",
+    "llvm.x86.avx512.maskz.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_maskz",
+    "llvm.x86.avx512.maskz.vfmadd.sd" => "__builtin_ia32_vfmaddsd3_maskz",
+    "llvm.x86.avx512.maskz.vfmadd.ss" => "__builtin_ia32_vfmaddss3_maskz",
+    "llvm.x86.avx512.maskz.vfmaddsub.pd.128" => "__builtin_ia32_vfmaddsubpd128_maskz",
+    "llvm.x86.avx512.maskz.vfmaddsub.pd.256" => "__builtin_ia32_vfmaddsubpd256_maskz",
+    "llvm.x86.avx512.maskz.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_maskz",
+    "llvm.x86.avx512.maskz.vfmaddsub.ps.128" => "__builtin_ia32_vfmaddsubps128_maskz",
+    "llvm.x86.avx512.maskz.vfmaddsub.ps.256" => "__builtin_ia32_vfmaddsubps256_maskz",
+    "llvm.x86.avx512.maskz.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_maskz",
+    "llvm.x86.avx512.maskz.vpermt2var.d.128" => "__builtin_ia32_vpermt2vard128_maskz",
+    "llvm.x86.avx512.maskz.vpermt2var.d.256" => "__builtin_ia32_vpermt2vard256_maskz",
+    "llvm.x86.avx512.maskz.vpermt2var.d.512" => "__builtin_ia32_vpermt2vard512_maskz",
+    "llvm.x86.avx512.maskz.vpermt2var.hi.128" => "__builtin_ia32_vpermt2varhi128_maskz",
+    "llvm.x86.avx512.maskz.vpermt2var.hi.256" => "__builtin_ia32_vpermt2varhi256_maskz",
+    "llvm.x86.avx512.maskz.vpermt2var.hi.512" => "__builtin_ia32_vpermt2varhi512_maskz",
+    "llvm.x86.avx512.maskz.vpermt2var.pd.128" => "__builtin_ia32_vpermt2varpd128_maskz",
+    "llvm.x86.avx512.maskz.vpermt2var.pd.256" => "__builtin_ia32_vpermt2varpd256_maskz",
+    "llvm.x86.avx512.maskz.vpermt2var.pd.512" => "__builtin_ia32_vpermt2varpd512_maskz",
+    "llvm.x86.avx512.maskz.vpermt2var.ps.128" => "__builtin_ia32_vpermt2varps128_maskz",
+    "llvm.x86.avx512.maskz.vpermt2var.ps.256" => "__builtin_ia32_vpermt2varps256_maskz",
+    "llvm.x86.avx512.maskz.vpermt2var.ps.512" => "__builtin_ia32_vpermt2varps512_maskz",
+    "llvm.x86.avx512.maskz.vpermt2var.q.128" => "__builtin_ia32_vpermt2varq128_maskz",
+    "llvm.x86.avx512.maskz.vpermt2var.q.256" => "__builtin_ia32_vpermt2varq256_maskz",
+    "llvm.x86.avx512.maskz.vpermt2var.q.512" => "__builtin_ia32_vpermt2varq512_maskz",
+    "llvm.x86.avx512.maskz.vpermt2var.qi.128" => "__builtin_ia32_vpermt2varqi128_maskz",
+    "llvm.x86.avx512.maskz.vpermt2var.qi.256" => "__builtin_ia32_vpermt2varqi256_maskz",
+    "llvm.x86.avx512.maskz.vpermt2var.qi.512" => "__builtin_ia32_vpermt2varqi512_maskz",
+    "llvm.x86.avx512.maskz.vpmadd52h.uq.128" => "__builtin_ia32_vpmadd52huq128_maskz",
+    "llvm.x86.avx512.maskz.vpmadd52h.uq.256" => "__builtin_ia32_vpmadd52huq256_maskz",
+    "llvm.x86.avx512.maskz.vpmadd52h.uq.512" => "__builtin_ia32_vpmadd52huq512_maskz",
+    "llvm.x86.avx512.maskz.vpmadd52l.uq.128" => "__builtin_ia32_vpmadd52luq128_maskz",
+    "llvm.x86.avx512.maskz.vpmadd52l.uq.256" => "__builtin_ia32_vpmadd52luq256_maskz",
+    "llvm.x86.avx512.maskz.vpmadd52l.uq.512" => "__builtin_ia32_vpmadd52luq512_maskz",
+    "llvm.x86.avx512.max.pd.512" => "__builtin_ia32_maxpd512",
+    "llvm.x86.avx512.max.ps.512" => "__builtin_ia32_maxps512",
+    "llvm.x86.avx512.min.pd.512" => "__builtin_ia32_minpd512",
+    "llvm.x86.avx512.min.ps.512" => "__builtin_ia32_minps512",
+    "llvm.x86.avx512.movntdqa" => "__builtin_ia32_movntdqa512",
+    "llvm.x86.avx512.mul.pd.512" => "__builtin_ia32_mulpd512",
+    "llvm.x86.avx512.mul.ps.512" => "__builtin_ia32_mulps512",
+    "llvm.x86.avx512.packssdw.512" => "__builtin_ia32_packssdw512",
+    "llvm.x86.avx512.packsswb.512" => "__builtin_ia32_packsswb512",
+    "llvm.x86.avx512.packusdw.512" => "__builtin_ia32_packusdw512",
+    "llvm.x86.avx512.packuswb.512" => "__builtin_ia32_packuswb512",
+    "llvm.x86.avx512.pavg.b.512" => "__builtin_ia32_pavgb512",
+    "llvm.x86.avx512.pavg.w.512" => "__builtin_ia32_pavgw512",
+    "llvm.x86.avx512.pbroadcastd.512" => "__builtin_ia32_pbroadcastd512",
+    "llvm.x86.avx512.pbroadcastq.512" => "__builtin_ia32_pbroadcastq512",
+    "llvm.x86.avx512.permvar.df.256" => "__builtin_ia32_permvardf256",
+    "llvm.x86.avx512.permvar.df.512" => "__builtin_ia32_permvardf512",
+    "llvm.x86.avx512.permvar.di.256" => "__builtin_ia32_permvardi256",
+    "llvm.x86.avx512.permvar.di.512" => "__builtin_ia32_permvardi512",
+    "llvm.x86.avx512.permvar.hi.128" => "__builtin_ia32_permvarhi128",
+    "llvm.x86.avx512.permvar.hi.256" => "__builtin_ia32_permvarhi256",
+    "llvm.x86.avx512.permvar.hi.512" => "__builtin_ia32_permvarhi512",
+    "llvm.x86.avx512.permvar.qi.128" => "__builtin_ia32_permvarqi128",
+    "llvm.x86.avx512.permvar.qi.256" => "__builtin_ia32_permvarqi256",
+    "llvm.x86.avx512.permvar.qi.512" => "__builtin_ia32_permvarqi512",
+    "llvm.x86.avx512.permvar.sf.512" => "__builtin_ia32_permvarsf512",
+    "llvm.x86.avx512.permvar.si.512" => "__builtin_ia32_permvarsi512",
+    "llvm.x86.avx512.pmaddubs.w.512" => "__builtin_ia32_pmaddubsw512",
+    "llvm.x86.avx512.pmaddw.d.512" => "__builtin_ia32_pmaddwd512",
+    "llvm.x86.avx512.pmovzxbd" => "__builtin_ia32_pmovzxbd512",
+    "llvm.x86.avx512.pmovzxbq" => "__builtin_ia32_pmovzxbq512",
+    "llvm.x86.avx512.pmovzxdq" => "__builtin_ia32_pmovzxdq512",
+    "llvm.x86.avx512.pmovzxwd" => "__builtin_ia32_pmovzxwd512",
+    "llvm.x86.avx512.pmovzxwq" => "__builtin_ia32_pmovzxwq512",
+    "llvm.x86.avx512.pmul.hr.sw.512" => "__builtin_ia32_pmulhrsw512",
+    "llvm.x86.avx512.pmulh.w.512" => "__builtin_ia32_pmulhw512",
+    "llvm.x86.avx512.pmulhu.w.512" => "__builtin_ia32_pmulhuw512",
+    "llvm.x86.avx512.pmultishift.qb.128" => "__builtin_ia32_vpmultishiftqb128",
+    "llvm.x86.avx512.pmultishift.qb.256" => "__builtin_ia32_vpmultishiftqb256",
+    "llvm.x86.avx512.pmultishift.qb.512" => "__builtin_ia32_vpmultishiftqb512",
+    "llvm.x86.avx512.psad.bw.512" => "__builtin_ia32_psadbw512",
+    "llvm.x86.avx512.pshuf.b.512" => "__builtin_ia32_pshufb512",
+    "llvm.x86.avx512.psll.d.512" => "__builtin_ia32_pslld512",
+    "llvm.x86.avx512.psll.dq" => "__builtin_ia32_pslldqi512",
+    "llvm.x86.avx512.psll.dq.bs" => "__builtin_ia32_pslldqi512_byteshift",
+    "llvm.x86.avx512.psll.q.512" => "__builtin_ia32_psllq512",
+    "llvm.x86.avx512.psll.w.512" => "__builtin_ia32_psllw512",
+    "llvm.x86.avx512.pslli.d.512" => "__builtin_ia32_pslldi512",
+    "llvm.x86.avx512.pslli.q.512" => "__builtin_ia32_psllqi512",
+    "llvm.x86.avx512.pslli.w.512" => "__builtin_ia32_psllwi512",
+    "llvm.x86.avx512.psllv.d.512" => "__builtin_ia32_psllv16si",
+    "llvm.x86.avx512.psllv.q.512" => "__builtin_ia32_psllv8di",
+    "llvm.x86.avx512.psllv.w.128" => "__builtin_ia32_psllv8hi",
+    "llvm.x86.avx512.psllv.w.256" => "__builtin_ia32_psllv16hi",
+    "llvm.x86.avx512.psllv.w.512" => "__builtin_ia32_psllv32hi",
+    "llvm.x86.avx512.psra.d.512" => "__builtin_ia32_psrad512",
+    "llvm.x86.avx512.psra.q.128" => "__builtin_ia32_psraq128",
+    "llvm.x86.avx512.psra.q.256" => "__builtin_ia32_psraq256",
+    "llvm.x86.avx512.psra.q.512" => "__builtin_ia32_psraq512",
+    "llvm.x86.avx512.psra.w.512" => "__builtin_ia32_psraw512",
+    "llvm.x86.avx512.psrai.d.512" => "__builtin_ia32_psradi512",
+    "llvm.x86.avx512.psrai.q.128" => "__builtin_ia32_psraqi128",
+    "llvm.x86.avx512.psrai.q.256" => "__builtin_ia32_psraqi256",
+    "llvm.x86.avx512.psrai.q.512" => "__builtin_ia32_psraqi512",
+    "llvm.x86.avx512.psrai.w.512" => "__builtin_ia32_psrawi512",
+    "llvm.x86.avx512.psrav.d.512" => "__builtin_ia32_psrav16si",
+    "llvm.x86.avx512.psrav.q.128" => "__builtin_ia32_psravq128",
+    "llvm.x86.avx512.psrav.q.256" => "__builtin_ia32_psravq256",
+    "llvm.x86.avx512.psrav.q.512" => "__builtin_ia32_psrav8di",
+    "llvm.x86.avx512.psrav.w.128" => "__builtin_ia32_psrav8hi",
+    "llvm.x86.avx512.psrav.w.256" => "__builtin_ia32_psrav16hi",
+    "llvm.x86.avx512.psrav.w.512" => "__builtin_ia32_psrav32hi",
+    "llvm.x86.avx512.psrl.d.512" => "__builtin_ia32_psrld512",
+    "llvm.x86.avx512.psrl.dq" => "__builtin_ia32_psrldqi512",
+    "llvm.x86.avx512.psrl.dq.bs" => "__builtin_ia32_psrldqi512_byteshift",
+    "llvm.x86.avx512.psrl.q.512" => "__builtin_ia32_psrlq512",
+    "llvm.x86.avx512.psrl.w.512" => "__builtin_ia32_psrlw512",
+    "llvm.x86.avx512.psrli.d.512" => "__builtin_ia32_psrldi512",
+    "llvm.x86.avx512.psrli.q.512" => "__builtin_ia32_psrlqi512",
+    "llvm.x86.avx512.psrli.w.512" => "__builtin_ia32_psrlwi512",
+    "llvm.x86.avx512.psrlv.d.512" => "__builtin_ia32_psrlv16si",
+    "llvm.x86.avx512.psrlv.q.512" => "__builtin_ia32_psrlv8di",
+    "llvm.x86.avx512.psrlv.w.128" => "__builtin_ia32_psrlv8hi",
+    "llvm.x86.avx512.psrlv.w.256" => "__builtin_ia32_psrlv16hi",
+    "llvm.x86.avx512.psrlv.w.512" => "__builtin_ia32_psrlv32hi",
+    "llvm.x86.avx512.pternlog.d.128" => "__builtin_ia32_pternlogd128",
+    "llvm.x86.avx512.pternlog.d.256" => "__builtin_ia32_pternlogd256",
+    "llvm.x86.avx512.pternlog.d.512" => "__builtin_ia32_pternlogd512",
+    "llvm.x86.avx512.pternlog.q.128" => "__builtin_ia32_pternlogq128",
+    "llvm.x86.avx512.pternlog.q.256" => "__builtin_ia32_pternlogq256",
+    "llvm.x86.avx512.pternlog.q.512" => "__builtin_ia32_pternlogq512",
+    "llvm.x86.avx512.ptestm.b.128" => "__builtin_ia32_ptestmb128",
+    "llvm.x86.avx512.ptestm.b.256" => "__builtin_ia32_ptestmb256",
+    "llvm.x86.avx512.ptestm.b.512" => "__builtin_ia32_ptestmb512",
+    "llvm.x86.avx512.ptestm.d.128" => "__builtin_ia32_ptestmd128",
+    "llvm.x86.avx512.ptestm.d.256" => "__builtin_ia32_ptestmd256",
+    "llvm.x86.avx512.ptestm.d.512" => "__builtin_ia32_ptestmd512",
+    "llvm.x86.avx512.ptestm.q.128" => "__builtin_ia32_ptestmq128",
+    "llvm.x86.avx512.ptestm.q.256" => "__builtin_ia32_ptestmq256",
+    "llvm.x86.avx512.ptestm.q.512" => "__builtin_ia32_ptestmq512",
+    "llvm.x86.avx512.ptestm.w.128" => "__builtin_ia32_ptestmw128",
+    "llvm.x86.avx512.ptestm.w.256" => "__builtin_ia32_ptestmw256",
+    "llvm.x86.avx512.ptestm.w.512" => "__builtin_ia32_ptestmw512",
+    "llvm.x86.avx512.ptestnm.b.128" => "__builtin_ia32_ptestnmb128",
+    "llvm.x86.avx512.ptestnm.b.256" => "__builtin_ia32_ptestnmb256",
+    "llvm.x86.avx512.ptestnm.b.512" => "__builtin_ia32_ptestnmb512",
+    "llvm.x86.avx512.ptestnm.d.128" => "__builtin_ia32_ptestnmd128",
+    "llvm.x86.avx512.ptestnm.d.256" => "__builtin_ia32_ptestnmd256",
+    "llvm.x86.avx512.ptestnm.d.512" => "__builtin_ia32_ptestnmd512",
+    "llvm.x86.avx512.ptestnm.q.128" => "__builtin_ia32_ptestnmq128",
+    "llvm.x86.avx512.ptestnm.q.256" => "__builtin_ia32_ptestnmq256",
+    "llvm.x86.avx512.ptestnm.q.512" => "__builtin_ia32_ptestnmq512",
+    "llvm.x86.avx512.ptestnm.w.128" => "__builtin_ia32_ptestnmw128",
+    "llvm.x86.avx512.ptestnm.w.256" => "__builtin_ia32_ptestnmw256",
+    "llvm.x86.avx512.ptestnm.w.512" => "__builtin_ia32_ptestnmw512",
+    "llvm.x86.avx512.rcp14.pd.128" => "__builtin_ia32_rcp14pd128_mask",
+    "llvm.x86.avx512.rcp14.pd.256" => "__builtin_ia32_rcp14pd256_mask",
+    "llvm.x86.avx512.rcp14.pd.512" => "__builtin_ia32_rcp14pd512_mask",
+    "llvm.x86.avx512.rcp14.ps.128" => "__builtin_ia32_rcp14ps128_mask",
+    "llvm.x86.avx512.rcp14.ps.256" => "__builtin_ia32_rcp14ps256_mask",
+    "llvm.x86.avx512.rcp14.ps.512" => "__builtin_ia32_rcp14ps512_mask",
+    "llvm.x86.avx512.rcp14.sd" => "__builtin_ia32_rcp14sd_mask",
+    "llvm.x86.avx512.rcp14.ss" => "__builtin_ia32_rcp14ss_mask",
+    "llvm.x86.avx512.rcp28.pd" => "__builtin_ia32_rcp28pd_mask",
+    "llvm.x86.avx512.rcp28.ps" => "__builtin_ia32_rcp28ps_mask",
+    "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_mask",
+    // [DUPLICATE]: "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_round_mask",
+    "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_mask",
+    // [DUPLICATE]: "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_round_mask",
+    "llvm.x86.avx512.rndscale.sd" => "__builtin_ia32_rndscalesd",
+    "llvm.x86.avx512.rndscale.ss" => "__builtin_ia32_rndscaless",
+    "llvm.x86.avx512.rsqrt14.pd.128" => "__builtin_ia32_rsqrt14pd128_mask",
+    "llvm.x86.avx512.rsqrt14.pd.256" => "__builtin_ia32_rsqrt14pd256_mask",
+    "llvm.x86.avx512.rsqrt14.pd.512" => "__builtin_ia32_rsqrt14pd512_mask",
+    "llvm.x86.avx512.rsqrt14.ps.128" => "__builtin_ia32_rsqrt14ps128_mask",
+    "llvm.x86.avx512.rsqrt14.ps.256" => "__builtin_ia32_rsqrt14ps256_mask",
+    "llvm.x86.avx512.rsqrt14.ps.512" => "__builtin_ia32_rsqrt14ps512_mask",
+    "llvm.x86.avx512.rsqrt14.sd" => "__builtin_ia32_rsqrt14sd_mask",
+    "llvm.x86.avx512.rsqrt14.ss" => "__builtin_ia32_rsqrt14ss_mask",
+    "llvm.x86.avx512.rsqrt28.pd" => "__builtin_ia32_rsqrt28pd_mask",
+    "llvm.x86.avx512.rsqrt28.ps" => "__builtin_ia32_rsqrt28ps_mask",
+    "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_mask",
+    // [DUPLICATE]: "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_round_mask",
+    "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_mask",
+    // [DUPLICATE]: "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_round_mask",
+    "llvm.x86.avx512.scatter.dpd.512" => "__builtin_ia32_scattersiv8df",
+    "llvm.x86.avx512.scatter.dpi.512" => "__builtin_ia32_scattersiv16si",
+    "llvm.x86.avx512.scatter.dpq.512" => "__builtin_ia32_scattersiv8di",
+    "llvm.x86.avx512.scatter.dps.512" => "__builtin_ia32_scattersiv16sf",
+    "llvm.x86.avx512.scatter.qpd.512" => "__builtin_ia32_scatterdiv8df",
+    "llvm.x86.avx512.scatter.qpi.512" => "__builtin_ia32_scatterdiv16si",
+    "llvm.x86.avx512.scatter.qpq.512" => "__builtin_ia32_scatterdiv8di",
+    "llvm.x86.avx512.scatter.qps.512" => "__builtin_ia32_scatterdiv16sf",
+    "llvm.x86.avx512.scatterdiv2.df" => "__builtin_ia32_scatterdiv2df",
+    "llvm.x86.avx512.scatterdiv2.di" => "__builtin_ia32_scatterdiv2di",
+    "llvm.x86.avx512.scatterdiv4.df" => "__builtin_ia32_scatterdiv4df",
+    "llvm.x86.avx512.scatterdiv4.di" => "__builtin_ia32_scatterdiv4di",
+    "llvm.x86.avx512.scatterdiv4.sf" => "__builtin_ia32_scatterdiv4sf",
+    "llvm.x86.avx512.scatterdiv4.si" => "__builtin_ia32_scatterdiv4si",
+    "llvm.x86.avx512.scatterdiv8.sf" => "__builtin_ia32_scatterdiv8sf",
+    "llvm.x86.avx512.scatterdiv8.si" => "__builtin_ia32_scatterdiv8si",
+    "llvm.x86.avx512.scatterpf.dpd.512" => "__builtin_ia32_scatterpfdpd",
+    "llvm.x86.avx512.scatterpf.dps.512" => "__builtin_ia32_scatterpfdps",
+    "llvm.x86.avx512.scatterpf.qpd.512" => "__builtin_ia32_scatterpfqpd",
+    "llvm.x86.avx512.scatterpf.qps.512" => "__builtin_ia32_scatterpfqps",
+    "llvm.x86.avx512.scattersiv2.df" => "__builtin_ia32_scattersiv2df",
+    "llvm.x86.avx512.scattersiv2.di" => "__builtin_ia32_scattersiv2di",
+    "llvm.x86.avx512.scattersiv4.df" => "__builtin_ia32_scattersiv4df",
+    "llvm.x86.avx512.scattersiv4.di" => "__builtin_ia32_scattersiv4di",
+    "llvm.x86.avx512.scattersiv4.sf" => "__builtin_ia32_scattersiv4sf",
+    "llvm.x86.avx512.scattersiv4.si" => "__builtin_ia32_scattersiv4si",
+    "llvm.x86.avx512.scattersiv8.sf" => "__builtin_ia32_scattersiv8sf",
+    "llvm.x86.avx512.scattersiv8.si" => "__builtin_ia32_scattersiv8si",
+    "llvm.x86.avx512.sqrt.pd.512" => "__builtin_ia32_sqrtpd512_mask",
+    "llvm.x86.avx512.sqrt.ps.512" => "__builtin_ia32_sqrtps512_mask",
+    "llvm.x86.avx512.sqrt.sd" => "__builtin_ia32_sqrtrndsd",
+    "llvm.x86.avx512.sqrt.ss" => "__builtin_ia32_sqrtrndss",
+    "llvm.x86.avx512.sub.pd.512" => "__builtin_ia32_subpd512",
+    "llvm.x86.avx512.sub.ps.512" => "__builtin_ia32_subps512",
+    "llvm.x86.avx512.vbroadcast.sd.512" => "__builtin_ia32_vbroadcastsd512",
+    "llvm.x86.avx512.vbroadcast.sd.pd.512" => "__builtin_ia32_vbroadcastsd_pd512",
+    "llvm.x86.avx512.vbroadcast.ss.512" => "__builtin_ia32_vbroadcastss512",
+    "llvm.x86.avx512.vbroadcast.ss.ps.512" => "__builtin_ia32_vbroadcastss_ps512",
+    "llvm.x86.avx512.vcomi.sd" => "__builtin_ia32_vcomisd",
+    "llvm.x86.avx512.vcomi.ss" => "__builtin_ia32_vcomiss",
+    "llvm.x86.avx512.vcvtsd2si32" => "__builtin_ia32_vcvtsd2si32",
+    "llvm.x86.avx512.vcvtsd2si64" => "__builtin_ia32_vcvtsd2si64",
+    "llvm.x86.avx512.vcvtsd2usi32" => "__builtin_ia32_vcvtsd2usi32",
+    "llvm.x86.avx512.vcvtsd2usi64" => "__builtin_ia32_vcvtsd2usi64",
+    "llvm.x86.avx512.vcvtss2si32" => "__builtin_ia32_vcvtss2si32",
+    "llvm.x86.avx512.vcvtss2si64" => "__builtin_ia32_vcvtss2si64",
+    "llvm.x86.avx512.vcvtss2usi32" => "__builtin_ia32_vcvtss2usi32",
+    "llvm.x86.avx512.vcvtss2usi64" => "__builtin_ia32_vcvtss2usi64",
+    "llvm.x86.avx512.vpdpbusd.128" => "__builtin_ia32_vpdpbusd128",
+    "llvm.x86.avx512.vpdpbusd.256" => "__builtin_ia32_vpdpbusd256",
+    "llvm.x86.avx512.vpdpbusd.512" => "__builtin_ia32_vpdpbusd512",
+    "llvm.x86.avx512.vpdpbusds.128" => "__builtin_ia32_vpdpbusds128",
+    "llvm.x86.avx512.vpdpbusds.256" => "__builtin_ia32_vpdpbusds256",
+    "llvm.x86.avx512.vpdpbusds.512" => "__builtin_ia32_vpdpbusds512",
+    "llvm.x86.avx512.vpdpwssd.128" => "__builtin_ia32_vpdpwssd128",
+    "llvm.x86.avx512.vpdpwssd.256" => "__builtin_ia32_vpdpwssd256",
+    "llvm.x86.avx512.vpdpwssd.512" => "__builtin_ia32_vpdpwssd512",
+    "llvm.x86.avx512.vpdpwssds.128" => "__builtin_ia32_vpdpwssds128",
+    "llvm.x86.avx512.vpdpwssds.256" => "__builtin_ia32_vpdpwssds256",
+    "llvm.x86.avx512.vpdpwssds.512" => "__builtin_ia32_vpdpwssds512",
+    "llvm.x86.avx512.vpermi2var.d.128" => "__builtin_ia32_vpermi2vard128",
+    "llvm.x86.avx512.vpermi2var.d.256" => "__builtin_ia32_vpermi2vard256",
+    "llvm.x86.avx512.vpermi2var.d.512" => "__builtin_ia32_vpermi2vard512",
+    "llvm.x86.avx512.vpermi2var.hi.128" => "__builtin_ia32_vpermi2varhi128",
+    "llvm.x86.avx512.vpermi2var.hi.256" => "__builtin_ia32_vpermi2varhi256",
+    "llvm.x86.avx512.vpermi2var.hi.512" => "__builtin_ia32_vpermi2varhi512",
+    "llvm.x86.avx512.vpermi2var.pd.128" => "__builtin_ia32_vpermi2varpd128",
+    "llvm.x86.avx512.vpermi2var.pd.256" => "__builtin_ia32_vpermi2varpd256",
+    "llvm.x86.avx512.vpermi2var.pd.512" => "__builtin_ia32_vpermi2varpd512",
+    "llvm.x86.avx512.vpermi2var.ps.128" => "__builtin_ia32_vpermi2varps128",
+    "llvm.x86.avx512.vpermi2var.ps.256" => "__builtin_ia32_vpermi2varps256",
+    "llvm.x86.avx512.vpermi2var.ps.512" => "__builtin_ia32_vpermi2varps512",
+    "llvm.x86.avx512.vpermi2var.q.128" => "__builtin_ia32_vpermi2varq128",
+    "llvm.x86.avx512.vpermi2var.q.256" => "__builtin_ia32_vpermi2varq256",
+    "llvm.x86.avx512.vpermi2var.q.512" => "__builtin_ia32_vpermi2varq512",
+    "llvm.x86.avx512.vpermi2var.qi.128" => "__builtin_ia32_vpermi2varqi128",
+    "llvm.x86.avx512.vpermi2var.qi.256" => "__builtin_ia32_vpermi2varqi256",
+    "llvm.x86.avx512.vpermi2var.qi.512" => "__builtin_ia32_vpermi2varqi512",
+    "llvm.x86.avx512.vpermilvar.pd.512" => "__builtin_ia32_vpermilvarpd512",
+    "llvm.x86.avx512.vpermilvar.ps.512" => "__builtin_ia32_vpermilvarps512",
+    "llvm.x86.avx512.vpmadd52h.uq.128" => "__builtin_ia32_vpmadd52huq128",
+    "llvm.x86.avx512.vpmadd52h.uq.256" => "__builtin_ia32_vpmadd52huq256",
+    "llvm.x86.avx512.vpmadd52h.uq.512" => "__builtin_ia32_vpmadd52huq512",
+    "llvm.x86.avx512.vpmadd52l.uq.128" => "__builtin_ia32_vpmadd52luq128",
+    "llvm.x86.avx512.vpmadd52l.uq.256" => "__builtin_ia32_vpmadd52luq256",
+    "llvm.x86.avx512.vpmadd52l.uq.512" => "__builtin_ia32_vpmadd52luq512",
+    "llvm.x86.avx512bf16.cvtne2ps2bf16.128" => "__builtin_ia32_cvtne2ps2bf16_128",
+    "llvm.x86.avx512bf16.cvtne2ps2bf16.256" => "__builtin_ia32_cvtne2ps2bf16_256",
+    "llvm.x86.avx512bf16.cvtne2ps2bf16.512" => "__builtin_ia32_cvtne2ps2bf16_512",
+    "llvm.x86.avx512bf16.cvtneps2bf16.256" => "__builtin_ia32_cvtneps2bf16_256",
+    "llvm.x86.avx512bf16.cvtneps2bf16.512" => "__builtin_ia32_cvtneps2bf16_512",
+    "llvm.x86.avx512bf16.dpbf16ps.128" => "__builtin_ia32_dpbf16ps_128",
+    "llvm.x86.avx512bf16.dpbf16ps.256" => "__builtin_ia32_dpbf16ps_256",
+    "llvm.x86.avx512bf16.dpbf16ps.512" => "__builtin_ia32_dpbf16ps_512",
+    "llvm.x86.avx512fp16.add.ph.512" => "__builtin_ia32_addph512",
+    "llvm.x86.avx512fp16.div.ph.512" => "__builtin_ia32_divph512",
+    // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.add.sh.round" => "__builtin_ia32_addsh_round_mask",
+    "llvm.x86.avx512fp16.mask.cmp.sh" => "__builtin_ia32_cmpsh_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.div.sh.round" => "__builtin_ia32_divsh_round_mask",
+    "llvm.x86.avx512fp16.mask.fpclass.sh" => "__builtin_ia32_fpclasssh_mask",
+    "llvm.x86.avx512fp16.mask.getexp.ph.128" => "__builtin_ia32_getexpph128_mask",
+    "llvm.x86.avx512fp16.mask.getexp.ph.256" => "__builtin_ia32_getexpph256_mask",
+    "llvm.x86.avx512fp16.mask.getexp.ph.512" => "__builtin_ia32_getexpph512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.getexp.sh" => "__builtin_ia32_getexpsh128_round_mask",
+    "llvm.x86.avx512fp16.mask.getmant.ph.128" => "__builtin_ia32_getmantph128_mask",
+    "llvm.x86.avx512fp16.mask.getmant.ph.256" => "__builtin_ia32_getmantph256_mask",
+    "llvm.x86.avx512fp16.mask.getmant.ph.512" => "__builtin_ia32_getmantph512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.getmant.sh" => "__builtin_ia32_getmantsh_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.max.sh.round" => "__builtin_ia32_maxsh_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.min.sh.round" => "__builtin_ia32_minsh_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.mul.sh.round" => "__builtin_ia32_mulsh_round_mask",
+    "llvm.x86.avx512fp16.mask.rcp.ph.128" => "__builtin_ia32_rcpph128_mask",
+    "llvm.x86.avx512fp16.mask.rcp.ph.256" => "__builtin_ia32_rcpph256_mask",
+    "llvm.x86.avx512fp16.mask.rcp.ph.512" => "__builtin_ia32_rcpph512_mask",
+    "llvm.x86.avx512fp16.mask.rcp.sh" => "__builtin_ia32_rcpsh_mask",
+    "llvm.x86.avx512fp16.mask.reduce.ph.128" => "__builtin_ia32_reduceph128_mask",
+    "llvm.x86.avx512fp16.mask.reduce.ph.256" => "__builtin_ia32_reduceph256_mask",
+    "llvm.x86.avx512fp16.mask.reduce.ph.512" => "__builtin_ia32_reduceph512_mask",
+    "llvm.x86.avx512fp16.mask.reduce.sh" => "__builtin_ia32_reducesh_mask",
+    "llvm.x86.avx512fp16.mask.rndscale.ph.128" => "__builtin_ia32_rndscaleph_128_mask",
+    "llvm.x86.avx512fp16.mask.rndscale.ph.256" => "__builtin_ia32_rndscaleph_256_mask",
+    "llvm.x86.avx512fp16.mask.rndscale.ph.512" => "__builtin_ia32_rndscaleph_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.rndscale.sh" => "__builtin_ia32_rndscalesh_round_mask",
+    "llvm.x86.avx512fp16.mask.rsqrt.ph.128" => "__builtin_ia32_rsqrtph128_mask",
+    "llvm.x86.avx512fp16.mask.rsqrt.ph.256" => "__builtin_ia32_rsqrtph256_mask",
+    "llvm.x86.avx512fp16.mask.rsqrt.ph.512" => "__builtin_ia32_rsqrtph512_mask",
+    "llvm.x86.avx512fp16.mask.rsqrt.sh" => "__builtin_ia32_rsqrtsh_mask",
+    "llvm.x86.avx512fp16.mask.scalef.ph.128" => "__builtin_ia32_scalefph128_mask",
+    "llvm.x86.avx512fp16.mask.scalef.ph.256" => "__builtin_ia32_scalefph256_mask",
+    "llvm.x86.avx512fp16.mask.scalef.ph.512" => "__builtin_ia32_scalefph512_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.scalef.sh" => "__builtin_ia32_scalefsh_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.sub.sh.round" => "__builtin_ia32_subsh_round_mask",
+    "llvm.x86.avx512fp16.mask.vcvtdq2ph.128" => "__builtin_ia32_vcvtdq2ph128_mask",
+    "llvm.x86.avx512fp16.mask.vcvtpd2ph.128" => "__builtin_ia32_vcvtpd2ph128_mask",
+    "llvm.x86.avx512fp16.mask.vcvtpd2ph.256" => "__builtin_ia32_vcvtpd2ph256_mask",
+    "llvm.x86.avx512fp16.mask.vcvtpd2ph.512" => "__builtin_ia32_vcvtpd2ph512_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2dq.128" => "__builtin_ia32_vcvtph2dq128_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2dq.256" => "__builtin_ia32_vcvtph2dq256_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2dq.512" => "__builtin_ia32_vcvtph2dq512_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2pd.128" => "__builtin_ia32_vcvtph2pd128_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2pd.256" => "__builtin_ia32_vcvtph2pd256_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2pd.512" => "__builtin_ia32_vcvtph2pd512_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2psx.128" => "__builtin_ia32_vcvtph2psx128_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2psx.256" => "__builtin_ia32_vcvtph2psx256_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2psx.512" => "__builtin_ia32_vcvtph2psx512_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2qq.128" => "__builtin_ia32_vcvtph2qq128_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2qq.256" => "__builtin_ia32_vcvtph2qq256_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2qq.512" => "__builtin_ia32_vcvtph2qq512_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2udq.128" => "__builtin_ia32_vcvtph2udq128_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2udq.256" => "__builtin_ia32_vcvtph2udq256_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2udq.512" => "__builtin_ia32_vcvtph2udq512_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2uqq.128" => "__builtin_ia32_vcvtph2uqq128_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2uqq.256" => "__builtin_ia32_vcvtph2uqq256_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2uqq.512" => "__builtin_ia32_vcvtph2uqq512_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2uw.128" => "__builtin_ia32_vcvtph2uw128_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2uw.256" => "__builtin_ia32_vcvtph2uw256_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2uw.512" => "__builtin_ia32_vcvtph2uw512_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2w.128" => "__builtin_ia32_vcvtph2w128_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2w.256" => "__builtin_ia32_vcvtph2w256_mask",
+    "llvm.x86.avx512fp16.mask.vcvtph2w.512" => "__builtin_ia32_vcvtph2w512_mask",
+    "llvm.x86.avx512fp16.mask.vcvtps2phx.128" => "__builtin_ia32_vcvtps2phx128_mask",
+    "llvm.x86.avx512fp16.mask.vcvtps2phx.256" => "__builtin_ia32_vcvtps2phx256_mask",
+    "llvm.x86.avx512fp16.mask.vcvtps2phx.512" => "__builtin_ia32_vcvtps2phx512_mask",
+    "llvm.x86.avx512fp16.mask.vcvtqq2ph.128" => "__builtin_ia32_vcvtqq2ph128_mask",
+    "llvm.x86.avx512fp16.mask.vcvtqq2ph.256" => "__builtin_ia32_vcvtqq2ph256_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.vcvtsd2sh.round" => "__builtin_ia32_vcvtsd2sh_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.vcvtsh2sd.round" => "__builtin_ia32_vcvtsh2sd_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.vcvtsh2ss.round" => "__builtin_ia32_vcvtsh2ss_round_mask",
+    // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.vcvtss2sh.round" => "__builtin_ia32_vcvtss2sh_round_mask",
+    "llvm.x86.avx512fp16.mask.vcvttph2dq.128" => "__builtin_ia32_vcvttph2dq128_mask",
+    "llvm.x86.avx512fp16.mask.vcvttph2dq.256" => "__builtin_ia32_vcvttph2dq256_mask",
+    "llvm.x86.avx512fp16.mask.vcvttph2dq.512" => "__builtin_ia32_vcvttph2dq512_mask",
+    "llvm.x86.avx512fp16.mask.vcvttph2qq.128" => "__builtin_ia32_vcvttph2qq128_mask",
+    "llvm.x86.avx512fp16.mask.vcvttph2qq.256" => "__builtin_ia32_vcvttph2qq256_mask",
+    "llvm.x86.avx512fp16.mask.vcvttph2qq.512" => "__builtin_ia32_vcvttph2qq512_mask",
+    "llvm.x86.avx512fp16.mask.vcvttph2udq.128" => "__builtin_ia32_vcvttph2udq128_mask",
+    "llvm.x86.avx512fp16.mask.vcvttph2udq.256" => "__builtin_ia32_vcvttph2udq256_mask",
+    "llvm.x86.avx512fp16.mask.vcvttph2udq.512" => "__builtin_ia32_vcvttph2udq512_mask",
+    "llvm.x86.avx512fp16.mask.vcvttph2uqq.128" => "__builtin_ia32_vcvttph2uqq128_mask",
+    "llvm.x86.avx512fp16.mask.vcvttph2uqq.256" => "__builtin_ia32_vcvttph2uqq256_mask",
+    "llvm.x86.avx512fp16.mask.vcvttph2uqq.512" => "__builtin_ia32_vcvttph2uqq512_mask",
+    "llvm.x86.avx512fp16.mask.vcvttph2uw.128" => "__builtin_ia32_vcvttph2uw128_mask",
+    "llvm.x86.avx512fp16.mask.vcvttph2uw.256" => "__builtin_ia32_vcvttph2uw256_mask",
+    "llvm.x86.avx512fp16.mask.vcvttph2uw.512" => "__builtin_ia32_vcvttph2uw512_mask",
+    "llvm.x86.avx512fp16.mask.vcvttph2w.128" => "__builtin_ia32_vcvttph2w128_mask",
+    "llvm.x86.avx512fp16.mask.vcvttph2w.256" => "__builtin_ia32_vcvttph2w256_mask",
+    "llvm.x86.avx512fp16.mask.vcvttph2w.512" => "__builtin_ia32_vcvttph2w512_mask",
+    "llvm.x86.avx512fp16.mask.vcvtudq2ph.128" => "__builtin_ia32_vcvtudq2ph128_mask",
+    "llvm.x86.avx512fp16.mask.vcvtuqq2ph.128" => "__builtin_ia32_vcvtuqq2ph128_mask",
+    "llvm.x86.avx512fp16.mask.vcvtuqq2ph.256" => "__builtin_ia32_vcvtuqq2ph256_mask",
+    "llvm.x86.avx512fp16.mask.vfcmadd.cph.128" => "__builtin_ia32_vfcmaddcph128_mask",
+    "llvm.x86.avx512fp16.mask.vfcmadd.cph.256" => "__builtin_ia32_vfcmaddcph256_mask",
+    "llvm.x86.avx512fp16.mask.vfcmadd.cph.512" => "__builtin_ia32_vfcmaddcph512_mask3",
+    "llvm.x86.avx512fp16.mask.vfcmadd.csh" => "__builtin_ia32_vfcmaddcsh_mask",
+    "llvm.x86.avx512fp16.mask.vfcmul.cph.128" => "__builtin_ia32_vfcmulcph128_mask",
+    "llvm.x86.avx512fp16.mask.vfcmul.cph.256" => "__builtin_ia32_vfcmulcph256_mask",
+    "llvm.x86.avx512fp16.mask.vfcmul.cph.512" => "__builtin_ia32_vfcmulcph512_mask",
+    "llvm.x86.avx512fp16.mask.vfcmul.csh" => "__builtin_ia32_vfcmulcsh_mask",
+    "llvm.x86.avx512fp16.mask.vfmadd.cph.128" => "__builtin_ia32_vfmaddcph128_mask",
+    "llvm.x86.avx512fp16.mask.vfmadd.cph.256" => "__builtin_ia32_vfmaddcph256_mask",
+    "llvm.x86.avx512fp16.mask.vfmadd.cph.512" => "__builtin_ia32_vfmaddcph512_mask3",
+    "llvm.x86.avx512fp16.mask.vfmadd.csh" => "__builtin_ia32_vfmaddcsh_mask",
+    "llvm.x86.avx512fp16.mask.vfmul.cph.128" => "__builtin_ia32_vfmulcph128_mask",
+    "llvm.x86.avx512fp16.mask.vfmul.cph.256" => "__builtin_ia32_vfmulcph256_mask",
+    "llvm.x86.avx512fp16.mask.vfmul.cph.512" => "__builtin_ia32_vfmulcph512_mask",
+    "llvm.x86.avx512fp16.mask.vfmul.csh" => "__builtin_ia32_vfmulcsh_mask",
+    "llvm.x86.avx512fp16.maskz.vfcmadd.cph.128" => "__builtin_ia32_vfcmaddcph128_maskz",
+    "llvm.x86.avx512fp16.maskz.vfcmadd.cph.256" => "__builtin_ia32_vfcmaddcph256_maskz",
+    "llvm.x86.avx512fp16.maskz.vfcmadd.cph.512" => "__builtin_ia32_vfcmaddcph512_maskz",
+    "llvm.x86.avx512fp16.maskz.vfcmadd.csh" => "__builtin_ia32_vfcmaddcsh_maskz",
+    "llvm.x86.avx512fp16.maskz.vfmadd.cph.128" => "__builtin_ia32_vfmaddcph128_maskz",
+    "llvm.x86.avx512fp16.maskz.vfmadd.cph.256" => "__builtin_ia32_vfmaddcph256_maskz",
+    "llvm.x86.avx512fp16.maskz.vfmadd.cph.512" => "__builtin_ia32_vfmaddcph512_maskz",
+    "llvm.x86.avx512fp16.maskz.vfmadd.csh" => "__builtin_ia32_vfmaddcsh_maskz",
+    "llvm.x86.avx512fp16.max.ph.128" => "__builtin_ia32_maxph128",
+    "llvm.x86.avx512fp16.max.ph.256" => "__builtin_ia32_maxph256",
+    "llvm.x86.avx512fp16.max.ph.512" => "__builtin_ia32_maxph512",
+    "llvm.x86.avx512fp16.min.ph.128" => "__builtin_ia32_minph128",
+    "llvm.x86.avx512fp16.min.ph.256" => "__builtin_ia32_minph256",
+    "llvm.x86.avx512fp16.min.ph.512" => "__builtin_ia32_minph512",
+    "llvm.x86.avx512fp16.mul.ph.512" => "__builtin_ia32_mulph512",
+    "llvm.x86.avx512fp16.sub.ph.512" => "__builtin_ia32_subph512",
+    "llvm.x86.avx512fp16.vcomi.sh" => "__builtin_ia32_vcomish",
+    "llvm.x86.avx512fp16.vcvtsh2si32" => "__builtin_ia32_vcvtsh2si32",
+    "llvm.x86.avx512fp16.vcvtsh2si64" => "__builtin_ia32_vcvtsh2si64",
+    "llvm.x86.avx512fp16.vcvtsh2usi32" => "__builtin_ia32_vcvtsh2usi32",
+    "llvm.x86.avx512fp16.vcvtsh2usi64" => "__builtin_ia32_vcvtsh2usi64",
+    "llvm.x86.avx512fp16.vcvtsi2sh" => "__builtin_ia32_vcvtsi2sh",
+    "llvm.x86.avx512fp16.vcvtsi642sh" => "__builtin_ia32_vcvtsi642sh",
+    "llvm.x86.avx512fp16.vcvttsh2si32" => "__builtin_ia32_vcvttsh2si32",
+    "llvm.x86.avx512fp16.vcvttsh2si64" => "__builtin_ia32_vcvttsh2si64",
+    "llvm.x86.avx512fp16.vcvttsh2usi32" => "__builtin_ia32_vcvttsh2usi32",
+    "llvm.x86.avx512fp16.vcvttsh2usi64" => "__builtin_ia32_vcvttsh2usi64",
+    "llvm.x86.avx512fp16.vcvtusi2sh" => "__builtin_ia32_vcvtusi2sh",
+    "llvm.x86.avx512fp16.vcvtusi642sh" => "__builtin_ia32_vcvtusi642sh",
+    "llvm.x86.avx512fp16.vfmaddsub.ph.128" => "__builtin_ia32_vfmaddsubph",
+    "llvm.x86.avx512fp16.vfmaddsub.ph.256" => "__builtin_ia32_vfmaddsubph256",
+    "llvm.x86.axor32" => "__builtin_ia32_axor32",
+    "llvm.x86.axor64" => "__builtin_ia32_axor64",
+    "llvm.x86.bmi.bextr.32" => "__builtin_ia32_bextr_u32",
+    "llvm.x86.bmi.bextr.64" => "__builtin_ia32_bextr_u64",
+    "llvm.x86.bmi.bzhi.32" => "__builtin_ia32_bzhi_si",
+    "llvm.x86.bmi.bzhi.64" => "__builtin_ia32_bzhi_di",
+    "llvm.x86.bmi.pdep.32" => "__builtin_ia32_pdep_si",
+    "llvm.x86.bmi.pdep.64" => "__builtin_ia32_pdep_di",
+    "llvm.x86.bmi.pext.32" => "__builtin_ia32_pext_si",
+    "llvm.x86.bmi.pext.64" => "__builtin_ia32_pext_di",
+    "llvm.x86.cldemote" => "__builtin_ia32_cldemote",
+    "llvm.x86.clflushopt" => "__builtin_ia32_clflushopt",
+    "llvm.x86.clrssbsy" => "__builtin_ia32_clrssbsy",
+    "llvm.x86.clui" => "__builtin_ia32_clui",
+    "llvm.x86.clwb" => "__builtin_ia32_clwb",
+    "llvm.x86.clzero" => "__builtin_ia32_clzero",
+    "llvm.x86.cmpccxadd32" => "__builtin_ia32_cmpccxadd32",
+    "llvm.x86.cmpccxadd64" => "__builtin_ia32_cmpccxadd64",
+    "llvm.x86.directstore32" => "__builtin_ia32_directstore_u32",
+    "llvm.x86.directstore64" => "__builtin_ia32_directstore_u64",
+    "llvm.x86.enqcmd" => "__builtin_ia32_enqcmd",
+    "llvm.x86.enqcmds" => "__builtin_ia32_enqcmds",
+    "llvm.x86.flags.read.u32" => "__builtin_ia32_readeflags_u32",
+    "llvm.x86.flags.read.u64" => "__builtin_ia32_readeflags_u64",
+    "llvm.x86.flags.write.u32" => "__builtin_ia32_writeeflags_u32",
+    "llvm.x86.flags.write.u64" => "__builtin_ia32_writeeflags_u64",
+    "llvm.x86.fma.mask.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_mask",
+    "llvm.x86.fma.mask.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_mask",
+    "llvm.x86.fma.mask.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_mask",
+    "llvm.x86.fma.mask.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_mask",
+    "llvm.x86.fma.mask.vfmsub.pd.512" => "__builtin_ia32_vfmsubpd512_mask",
+    "llvm.x86.fma.mask.vfmsub.ps.512" => "__builtin_ia32_vfmsubps512_mask",
+    "llvm.x86.fma.mask.vfmsubadd.pd.512" => "__builtin_ia32_vfmsubaddpd512_mask",
+    "llvm.x86.fma.mask.vfmsubadd.ps.512" => "__builtin_ia32_vfmsubaddps512_mask",
+    "llvm.x86.fma.mask.vfnmadd.pd.512" => "__builtin_ia32_vfnmaddpd512_mask",
+    "llvm.x86.fma.mask.vfnmadd.ps.512" => "__builtin_ia32_vfnmaddps512_mask",
+    "llvm.x86.fma.mask.vfnmsub.pd.512" => "__builtin_ia32_vfnmsubpd512_mask",
+    "llvm.x86.fma.mask.vfnmsub.ps.512" => "__builtin_ia32_vfnmsubps512_mask",
+    "llvm.x86.fma.vfmadd.pd" => "__builtin_ia32_vfmaddpd",
+    "llvm.x86.fma.vfmadd.pd.256" => "__builtin_ia32_vfmaddpd256",
+    "llvm.x86.fma.vfmadd.ps" => "__builtin_ia32_vfmaddps",
+    "llvm.x86.fma.vfmadd.ps.256" => "__builtin_ia32_vfmaddps256",
+    "llvm.x86.fma.vfmadd.sd" => "__builtin_ia32_vfmaddsd",
+    "llvm.x86.fma.vfmadd.ss" => "__builtin_ia32_vfmaddss",
+    "llvm.x86.fma.vfmaddsub.pd" => "__builtin_ia32_vfmaddsubpd",
+    "llvm.x86.fma.vfmaddsub.pd.256" => "__builtin_ia32_vfmaddsubpd256",
+    "llvm.x86.fma.vfmaddsub.ps" => "__builtin_ia32_vfmaddsubps",
+    "llvm.x86.fma.vfmaddsub.ps.256" => "__builtin_ia32_vfmaddsubps256",
+    "llvm.x86.fma.vfmsub.pd" => "__builtin_ia32_vfmsubpd",
+    "llvm.x86.fma.vfmsub.pd.256" => "__builtin_ia32_vfmsubpd256",
+    "llvm.x86.fma.vfmsub.ps" => "__builtin_ia32_vfmsubps",
+    "llvm.x86.fma.vfmsub.ps.256" => "__builtin_ia32_vfmsubps256",
+    "llvm.x86.fma.vfmsub.sd" => "__builtin_ia32_vfmsubsd",
+    "llvm.x86.fma.vfmsub.ss" => "__builtin_ia32_vfmsubss",
+    "llvm.x86.fma.vfmsubadd.pd" => "__builtin_ia32_vfmsubaddpd",
+    "llvm.x86.fma.vfmsubadd.pd.256" => "__builtin_ia32_vfmsubaddpd256",
+    "llvm.x86.fma.vfmsubadd.ps" => "__builtin_ia32_vfmsubaddps",
+    "llvm.x86.fma.vfmsubadd.ps.256" => "__builtin_ia32_vfmsubaddps256",
+    "llvm.x86.fma.vfnmadd.pd" => "__builtin_ia32_vfnmaddpd",
+    "llvm.x86.fma.vfnmadd.pd.256" => "__builtin_ia32_vfnmaddpd256",
+    "llvm.x86.fma.vfnmadd.ps" => "__builtin_ia32_vfnmaddps",
+    "llvm.x86.fma.vfnmadd.ps.256" => "__builtin_ia32_vfnmaddps256",
+    "llvm.x86.fma.vfnmadd.sd" => "__builtin_ia32_vfnmaddsd",
+    "llvm.x86.fma.vfnmadd.ss" => "__builtin_ia32_vfnmaddss",
+    "llvm.x86.fma.vfnmsub.pd" => "__builtin_ia32_vfnmsubpd",
+    "llvm.x86.fma.vfnmsub.pd.256" => "__builtin_ia32_vfnmsubpd256",
+    "llvm.x86.fma.vfnmsub.ps" => "__builtin_ia32_vfnmsubps",
+    "llvm.x86.fma.vfnmsub.ps.256" => "__builtin_ia32_vfnmsubps256",
+    "llvm.x86.fma.vfnmsub.sd" => "__builtin_ia32_vfnmsubsd",
+    "llvm.x86.fma.vfnmsub.ss" => "__builtin_ia32_vfnmsubss",
+    "llvm.x86.fxrstor" => "__builtin_ia32_fxrstor",
+    "llvm.x86.fxrstor64" => "__builtin_ia32_fxrstor64",
+    "llvm.x86.fxsave" => "__builtin_ia32_fxsave",
+    "llvm.x86.fxsave64" => "__builtin_ia32_fxsave64",
+    "llvm.x86.incsspd" => "__builtin_ia32_incsspd",
+    "llvm.x86.incsspq" => "__builtin_ia32_incsspq",
+    "llvm.x86.invpcid" => "__builtin_ia32_invpcid",
+    "llvm.x86.ldtilecfg" => "__builtin_ia32_tile_loadconfig",
+    "llvm.x86.ldtilecfg.internal" => "__builtin_ia32_tile_loadconfig_internal",
+    "llvm.x86.llwpcb" => "__builtin_ia32_llwpcb",
+    "llvm.x86.loadiwkey" => "__builtin_ia32_loadiwkey",
+    "llvm.x86.lwpins32" => "__builtin_ia32_lwpins32",
+    "llvm.x86.lwpins64" => "__builtin_ia32_lwpins64",
+    "llvm.x86.lwpval32" => "__builtin_ia32_lwpval32",
+    "llvm.x86.lwpval64" => "__builtin_ia32_lwpval64",
+    "llvm.x86.mmx.emms" => "__builtin_ia32_emms",
+    "llvm.x86.mmx.femms" => "__builtin_ia32_femms",
+    "llvm.x86.monitorx" => "__builtin_ia32_monitorx",
+    "llvm.x86.movdir64b" => "__builtin_ia32_movdir64b",
+    "llvm.x86.mwaitx" => "__builtin_ia32_mwaitx",
+    "llvm.x86.pclmulqdq" => "__builtin_ia32_pclmulqdq128",
+    "llvm.x86.pclmulqdq.256" => "__builtin_ia32_pclmulqdq256",
+    "llvm.x86.pclmulqdq.512" => "__builtin_ia32_pclmulqdq512",
+    "llvm.x86.ptwrite32" => "__builtin_ia32_ptwrite32",
+    "llvm.x86.ptwrite64" => "__builtin_ia32_ptwrite64",
+    "llvm.x86.rdfsbase.32" => "__builtin_ia32_rdfsbase32",
+    "llvm.x86.rdfsbase.64" => "__builtin_ia32_rdfsbase64",
+    "llvm.x86.rdgsbase.32" => "__builtin_ia32_rdgsbase32",
+    "llvm.x86.rdgsbase.64" => "__builtin_ia32_rdgsbase64",
+    "llvm.x86.rdpid" => "__builtin_ia32_rdpid",
+    "llvm.x86.rdpkru" => "__builtin_ia32_rdpkru",
+    "llvm.x86.rdpmc" => "__builtin_ia32_rdpmc",
+    "llvm.x86.rdpru" => "__builtin_ia32_rdpru",
+    "llvm.x86.rdsspd" => "__builtin_ia32_rdsspd",
+    "llvm.x86.rdsspq" => "__builtin_ia32_rdsspq",
+    "llvm.x86.rdtsc" => "__builtin_ia32_rdtsc",
+    "llvm.x86.rdtscp" => "__builtin_ia32_rdtscp",
+    "llvm.x86.rstorssp" => "__builtin_ia32_rstorssp",
+    "llvm.x86.saveprevssp" => "__builtin_ia32_saveprevssp",
+    "llvm.x86.senduipi" => "__builtin_ia32_senduipi",
+    "llvm.x86.serialize" => "__builtin_ia32_serialize",
+    "llvm.x86.setssbsy" => "__builtin_ia32_setssbsy",
+    "llvm.x86.sha1msg1" => "__builtin_ia32_sha1msg1",
+    "llvm.x86.sha1msg2" => "__builtin_ia32_sha1msg2",
+    "llvm.x86.sha1nexte" => "__builtin_ia32_sha1nexte",
+    "llvm.x86.sha1rnds4" => "__builtin_ia32_sha1rnds4",
+    "llvm.x86.sha256msg1" => "__builtin_ia32_sha256msg1",
+    "llvm.x86.sha256msg2" => "__builtin_ia32_sha256msg2",
+    "llvm.x86.sha256rnds2" => "__builtin_ia32_sha256rnds2",
+    "llvm.x86.slwpcb" => "__builtin_ia32_slwpcb",
+    "llvm.x86.sse.add.ss" => "__builtin_ia32_addss",
+    "llvm.x86.sse.cmp.ps" => "__builtin_ia32_cmpps",
+    "llvm.x86.sse.cmp.ss" => "__builtin_ia32_cmpss",
+    "llvm.x86.sse.comieq.ss" => "__builtin_ia32_comieq",
+    "llvm.x86.sse.comige.ss" => "__builtin_ia32_comige",
+    "llvm.x86.sse.comigt.ss" => "__builtin_ia32_comigt",
+    "llvm.x86.sse.comile.ss" => "__builtin_ia32_comile",
+    "llvm.x86.sse.comilt.ss" => "__builtin_ia32_comilt",
+    "llvm.x86.sse.comineq.ss" => "__builtin_ia32_comineq",
+    "llvm.x86.sse.cvtsi2ss" => "__builtin_ia32_cvtsi2ss",
+    "llvm.x86.sse.cvtsi642ss" => "__builtin_ia32_cvtsi642ss",
+    "llvm.x86.sse.cvtss2si" => "__builtin_ia32_cvtss2si",
+    "llvm.x86.sse.cvtss2si64" => "__builtin_ia32_cvtss2si64",
+    "llvm.x86.sse.cvttss2si" => "__builtin_ia32_cvttss2si",
+    "llvm.x86.sse.cvttss2si64" => "__builtin_ia32_cvttss2si64",
+    "llvm.x86.sse.div.ss" => "__builtin_ia32_divss",
+    "llvm.x86.sse.max.ps" => "__builtin_ia32_maxps",
+    "llvm.x86.sse.max.ss" => "__builtin_ia32_maxss",
+    "llvm.x86.sse.min.ps" => "__builtin_ia32_minps",
+    "llvm.x86.sse.min.ss" => "__builtin_ia32_minss",
+    "llvm.x86.sse.movmsk.ps" => "__builtin_ia32_movmskps",
+    "llvm.x86.sse.mul.ss" => "__builtin_ia32_mulss",
+    "llvm.x86.sse.rcp.ps" => "__builtin_ia32_rcpps",
+    "llvm.x86.sse.rcp.ss" => "__builtin_ia32_rcpss",
+    "llvm.x86.sse.rsqrt.ps" => "__builtin_ia32_rsqrtps",
+    "llvm.x86.sse.rsqrt.ss" => "__builtin_ia32_rsqrtss",
+    "llvm.x86.sse.sfence" => "__builtin_ia32_sfence",
+    "llvm.x86.sse.sqrt.ps" => "__builtin_ia32_sqrtps",
+    "llvm.x86.sse.sqrt.ss" => "__builtin_ia32_sqrtss",
+    "llvm.x86.sse.storeu.ps" => "__builtin_ia32_storeups",
+    "llvm.x86.sse.sub.ss" => "__builtin_ia32_subss",
+    "llvm.x86.sse.ucomieq.ss" => "__builtin_ia32_ucomieq",
+    "llvm.x86.sse.ucomige.ss" => "__builtin_ia32_ucomige",
+    "llvm.x86.sse.ucomigt.ss" => "__builtin_ia32_ucomigt",
+    "llvm.x86.sse.ucomile.ss" => "__builtin_ia32_ucomile",
+    "llvm.x86.sse.ucomilt.ss" => "__builtin_ia32_ucomilt",
+    "llvm.x86.sse.ucomineq.ss" => "__builtin_ia32_ucomineq",
+    "llvm.x86.sse2.add.sd" => "__builtin_ia32_addsd",
+    "llvm.x86.sse2.clflush" => "__builtin_ia32_clflush",
+    "llvm.x86.sse2.cmp.pd" => "__builtin_ia32_cmppd",
+    "llvm.x86.sse2.cmp.sd" => "__builtin_ia32_cmpsd",
+    "llvm.x86.sse2.comieq.sd" => "__builtin_ia32_comisdeq",
+    "llvm.x86.sse2.comige.sd" => "__builtin_ia32_comisdge",
+    "llvm.x86.sse2.comigt.sd" => "__builtin_ia32_comisdgt",
+    "llvm.x86.sse2.comile.sd" => "__builtin_ia32_comisdle",
+    "llvm.x86.sse2.comilt.sd" => "__builtin_ia32_comisdlt",
+    "llvm.x86.sse2.comineq.sd" => "__builtin_ia32_comisdneq",
+    "llvm.x86.sse2.cvtdq2pd" => "__builtin_ia32_cvtdq2pd",
+    "llvm.x86.sse2.cvtdq2ps" => "__builtin_ia32_cvtdq2ps",
+    "llvm.x86.sse2.cvtpd2dq" => "__builtin_ia32_cvtpd2dq",
+    "llvm.x86.sse2.cvtpd2ps" => "__builtin_ia32_cvtpd2ps",
+    "llvm.x86.sse2.cvtps2dq" => "__builtin_ia32_cvtps2dq",
+    "llvm.x86.sse2.cvtps2pd" => "__builtin_ia32_cvtps2pd",
+    "llvm.x86.sse2.cvtsd2si" => "__builtin_ia32_cvtsd2si",
+    "llvm.x86.sse2.cvtsd2si64" => "__builtin_ia32_cvtsd2si64",
+    "llvm.x86.sse2.cvtsd2ss" => "__builtin_ia32_cvtsd2ss",
+    "llvm.x86.sse2.cvtsi2sd" => "__builtin_ia32_cvtsi2sd",
+    "llvm.x86.sse2.cvtsi642sd" => "__builtin_ia32_cvtsi642sd",
+    "llvm.x86.sse2.cvtss2sd" => "__builtin_ia32_cvtss2sd",
+    "llvm.x86.sse2.cvttpd2dq" => "__builtin_ia32_cvttpd2dq",
+    "llvm.x86.sse2.cvttps2dq" => "__builtin_ia32_cvttps2dq",
+    "llvm.x86.sse2.cvttsd2si" => "__builtin_ia32_cvttsd2si",
+    "llvm.x86.sse2.cvttsd2si64" => "__builtin_ia32_cvttsd2si64",
+    "llvm.x86.sse2.div.sd" => "__builtin_ia32_divsd",
+    "llvm.x86.sse2.lfence" => "__builtin_ia32_lfence",
+    "llvm.x86.sse2.maskmov.dqu" => "__builtin_ia32_maskmovdqu",
+    "llvm.x86.sse2.max.pd" => "__builtin_ia32_maxpd",
+    "llvm.x86.sse2.max.sd" => "__builtin_ia32_maxsd",
+    "llvm.x86.sse2.mfence" => "__builtin_ia32_mfence",
+    "llvm.x86.sse2.min.pd" => "__builtin_ia32_minpd",
+    "llvm.x86.sse2.min.sd" => "__builtin_ia32_minsd",
+    "llvm.x86.sse2.movmsk.pd" => "__builtin_ia32_movmskpd",
+    "llvm.x86.sse2.mul.sd" => "__builtin_ia32_mulsd",
+    "llvm.x86.sse2.packssdw.128" => "__builtin_ia32_packssdw128",
+    "llvm.x86.sse2.packsswb.128" => "__builtin_ia32_packsswb128",
+    "llvm.x86.sse2.packuswb.128" => "__builtin_ia32_packuswb128",
+    "llvm.x86.sse2.padds.b" => "__builtin_ia32_paddsb128",
+    "llvm.x86.sse2.padds.w" => "__builtin_ia32_paddsw128",
+    "llvm.x86.sse2.paddus.b" => "__builtin_ia32_paddusb128",
+    "llvm.x86.sse2.paddus.w" => "__builtin_ia32_paddusw128",
+    "llvm.x86.sse2.pause" => "__builtin_ia32_pause",
+    "llvm.x86.sse2.pavg.b" => "__builtin_ia32_pavgb128",
+    "llvm.x86.sse2.pavg.w" => "__builtin_ia32_pavgw128",
+    "llvm.x86.sse2.pmadd.wd" => "__builtin_ia32_pmaddwd128",
+    "llvm.x86.sse2.pmaxs.w" => "__builtin_ia32_pmaxsw128",
+    "llvm.x86.sse2.pmaxu.b" => "__builtin_ia32_pmaxub128",
+    "llvm.x86.sse2.pmins.w" => "__builtin_ia32_pminsw128",
+    "llvm.x86.sse2.pminu.b" => "__builtin_ia32_pminub128",
+    "llvm.x86.sse2.pmovmskb.128" => "__builtin_ia32_pmovmskb128",
+    "llvm.x86.sse2.pmulh.w" => "__builtin_ia32_pmulhw128",
+    "llvm.x86.sse2.pmulhu.w" => "__builtin_ia32_pmulhuw128",
+    "llvm.x86.sse2.pmulu.dq" => "__builtin_ia32_pmuludq128",
+    "llvm.x86.sse2.psad.bw" => "__builtin_ia32_psadbw128",
+    "llvm.x86.sse2.pshuf.d" => "__builtin_ia32_pshufd",
+    "llvm.x86.sse2.pshufh.w" => "__builtin_ia32_pshufhw",
+    "llvm.x86.sse2.pshufl.w" => "__builtin_ia32_pshuflw",
+    "llvm.x86.sse2.psll.d" => "__builtin_ia32_pslld128",
+    "llvm.x86.sse2.psll.dq" => "__builtin_ia32_pslldqi128",
+    "llvm.x86.sse2.psll.dq.bs" => "__builtin_ia32_pslldqi128_byteshift",
+    "llvm.x86.sse2.psll.q" => "__builtin_ia32_psllq128",
+    "llvm.x86.sse2.psll.w" => "__builtin_ia32_psllw128",
+    "llvm.x86.sse2.pslli.d" => "__builtin_ia32_pslldi128",
+    "llvm.x86.sse2.pslli.q" => "__builtin_ia32_psllqi128",
+    "llvm.x86.sse2.pslli.w" => "__builtin_ia32_psllwi128",
+    "llvm.x86.sse2.psra.d" => "__builtin_ia32_psrad128",
+    "llvm.x86.sse2.psra.w" => "__builtin_ia32_psraw128",
+    "llvm.x86.sse2.psrai.d" => "__builtin_ia32_psradi128",
+    "llvm.x86.sse2.psrai.w" => "__builtin_ia32_psrawi128",
+    "llvm.x86.sse2.psrl.d" => "__builtin_ia32_psrld128",
+    "llvm.x86.sse2.psrl.dq" => "__builtin_ia32_psrldqi128",
+    "llvm.x86.sse2.psrl.dq.bs" => "__builtin_ia32_psrldqi128_byteshift",
+    "llvm.x86.sse2.psrl.q" => "__builtin_ia32_psrlq128",
+    "llvm.x86.sse2.psrl.w" => "__builtin_ia32_psrlw128",
+    "llvm.x86.sse2.psrli.d" => "__builtin_ia32_psrldi128",
+    "llvm.x86.sse2.psrli.q" => "__builtin_ia32_psrlqi128",
+    "llvm.x86.sse2.psrli.w" => "__builtin_ia32_psrlwi128",
+    "llvm.x86.sse2.psubs.b" => "__builtin_ia32_psubsb128",
+    "llvm.x86.sse2.psubs.w" => "__builtin_ia32_psubsw128",
+    "llvm.x86.sse2.psubus.b" => "__builtin_ia32_psubusb128",
+    "llvm.x86.sse2.psubus.w" => "__builtin_ia32_psubusw128",
+    "llvm.x86.sse2.sqrt.pd" => "__builtin_ia32_sqrtpd",
+    "llvm.x86.sse2.sqrt.sd" => "__builtin_ia32_sqrtsd",
+    "llvm.x86.sse2.storel.dq" => "__builtin_ia32_storelv4si",
+    "llvm.x86.sse2.storeu.dq" => "__builtin_ia32_storedqu",
+    "llvm.x86.sse2.storeu.pd" => "__builtin_ia32_storeupd",
+    "llvm.x86.sse2.sub.sd" => "__builtin_ia32_subsd",
+    "llvm.x86.sse2.ucomieq.sd" => "__builtin_ia32_ucomisdeq",
+    "llvm.x86.sse2.ucomige.sd" => "__builtin_ia32_ucomisdge",
+    "llvm.x86.sse2.ucomigt.sd" => "__builtin_ia32_ucomisdgt",
+    "llvm.x86.sse2.ucomile.sd" => "__builtin_ia32_ucomisdle",
+    "llvm.x86.sse2.ucomilt.sd" => "__builtin_ia32_ucomisdlt",
+    "llvm.x86.sse2.ucomineq.sd" => "__builtin_ia32_ucomisdneq",
+    "llvm.x86.sse3.addsub.pd" => "__builtin_ia32_addsubpd",
+    "llvm.x86.sse3.addsub.ps" => "__builtin_ia32_addsubps",
+    "llvm.x86.sse3.hadd.pd" => "__builtin_ia32_haddpd",
+    "llvm.x86.sse3.hadd.ps" => "__builtin_ia32_haddps",
+    "llvm.x86.sse3.hsub.pd" => "__builtin_ia32_hsubpd",
+    "llvm.x86.sse3.hsub.ps" => "__builtin_ia32_hsubps",
+    "llvm.x86.sse3.ldu.dq" => "__builtin_ia32_lddqu",
+    "llvm.x86.sse3.monitor" => "__builtin_ia32_monitor",
+    "llvm.x86.sse3.mwait" => "__builtin_ia32_mwait",
+    "llvm.x86.sse41.blendpd" => "__builtin_ia32_blendpd",
+    "llvm.x86.sse41.blendps" => "__builtin_ia32_blendps",
+    "llvm.x86.sse41.blendvpd" => "__builtin_ia32_blendvpd",
+    "llvm.x86.sse41.blendvps" => "__builtin_ia32_blendvps",
+    "llvm.x86.sse41.dppd" => "__builtin_ia32_dppd",
+    "llvm.x86.sse41.dpps" => "__builtin_ia32_dpps",
+    "llvm.x86.sse41.extractps" => "__builtin_ia32_extractps128",
+    "llvm.x86.sse41.insertps" => "__builtin_ia32_insertps128",
+    "llvm.x86.sse41.movntdqa" => "__builtin_ia32_movntdqa",
+    "llvm.x86.sse41.mpsadbw" => "__builtin_ia32_mpsadbw128",
+    "llvm.x86.sse41.packusdw" => "__builtin_ia32_packusdw128",
+    "llvm.x86.sse41.pblendvb" => "__builtin_ia32_pblendvb128",
+    "llvm.x86.sse41.pblendw" => "__builtin_ia32_pblendw128",
+    "llvm.x86.sse41.phminposuw" => "__builtin_ia32_phminposuw128",
+    "llvm.x86.sse41.pmaxsb" => "__builtin_ia32_pmaxsb128",
+    "llvm.x86.sse41.pmaxsd" => "__builtin_ia32_pmaxsd128",
+    "llvm.x86.sse41.pmaxud" => "__builtin_ia32_pmaxud128",
+    "llvm.x86.sse41.pmaxuw" => "__builtin_ia32_pmaxuw128",
+    "llvm.x86.sse41.pminsb" => "__builtin_ia32_pminsb128",
+    "llvm.x86.sse41.pminsd" => "__builtin_ia32_pminsd128",
+    "llvm.x86.sse41.pminud" => "__builtin_ia32_pminud128",
+    "llvm.x86.sse41.pminuw" => "__builtin_ia32_pminuw128",
+    "llvm.x86.sse41.pmovsxbd" => "__builtin_ia32_pmovsxbd128",
+    "llvm.x86.sse41.pmovsxbq" => "__builtin_ia32_pmovsxbq128",
+    "llvm.x86.sse41.pmovsxbw" => "__builtin_ia32_pmovsxbw128",
+    "llvm.x86.sse41.pmovsxdq" => "__builtin_ia32_pmovsxdq128",
+    "llvm.x86.sse41.pmovsxwd" => "__builtin_ia32_pmovsxwd128",
+    "llvm.x86.sse41.pmovsxwq" => "__builtin_ia32_pmovsxwq128",
+    "llvm.x86.sse41.pmovzxbd" => "__builtin_ia32_pmovzxbd128",
+    "llvm.x86.sse41.pmovzxbq" => "__builtin_ia32_pmovzxbq128",
+    "llvm.x86.sse41.pmovzxbw" => "__builtin_ia32_pmovzxbw128",
+    "llvm.x86.sse41.pmovzxdq" => "__builtin_ia32_pmovzxdq128",
+    "llvm.x86.sse41.pmovzxwd" => "__builtin_ia32_pmovzxwd128",
+    "llvm.x86.sse41.pmovzxwq" => "__builtin_ia32_pmovzxwq128",
+    "llvm.x86.sse41.pmuldq" => "__builtin_ia32_pmuldq128",
+    "llvm.x86.sse41.ptestc" => "__builtin_ia32_ptestc128",
+    "llvm.x86.sse41.ptestnzc" => "__builtin_ia32_ptestnzc128",
+    "llvm.x86.sse41.ptestz" => "__builtin_ia32_ptestz128",
+    "llvm.x86.sse41.round.pd" => "__builtin_ia32_roundpd",
+    "llvm.x86.sse41.round.ps" => "__builtin_ia32_roundps",
+    "llvm.x86.sse41.round.sd" => "__builtin_ia32_roundsd",
+    "llvm.x86.sse41.round.ss" => "__builtin_ia32_roundss",
+    "llvm.x86.sse42.crc32.32.16" => "__builtin_ia32_crc32hi",
+    "llvm.x86.sse42.crc32.32.32" => "__builtin_ia32_crc32si",
+    "llvm.x86.sse42.crc32.32.8" => "__builtin_ia32_crc32qi",
+    "llvm.x86.sse42.crc32.64.64" => "__builtin_ia32_crc32di",
+    "llvm.x86.sse42.pcmpestri128" => "__builtin_ia32_pcmpestri128",
+    "llvm.x86.sse42.pcmpestria128" => "__builtin_ia32_pcmpestria128",
+    "llvm.x86.sse42.pcmpestric128" => "__builtin_ia32_pcmpestric128",
+    "llvm.x86.sse42.pcmpestrio128" => "__builtin_ia32_pcmpestrio128",
+    "llvm.x86.sse42.pcmpestris128" => "__builtin_ia32_pcmpestris128",
+    "llvm.x86.sse42.pcmpestriz128" => "__builtin_ia32_pcmpestriz128",
+    "llvm.x86.sse42.pcmpestrm128" => "__builtin_ia32_pcmpestrm128",
+    "llvm.x86.sse42.pcmpistri128" => "__builtin_ia32_pcmpistri128",
+    "llvm.x86.sse42.pcmpistria128" => "__builtin_ia32_pcmpistria128",
+    "llvm.x86.sse42.pcmpistric128" => "__builtin_ia32_pcmpistric128",
+    "llvm.x86.sse42.pcmpistrio128" => "__builtin_ia32_pcmpistrio128",
+    "llvm.x86.sse42.pcmpistris128" => "__builtin_ia32_pcmpistris128",
+    "llvm.x86.sse42.pcmpistriz128" => "__builtin_ia32_pcmpistriz128",
+    "llvm.x86.sse42.pcmpistrm128" => "__builtin_ia32_pcmpistrm128",
+    "llvm.x86.sse4a.extrq" => "__builtin_ia32_extrq",
+    "llvm.x86.sse4a.extrqi" => "__builtin_ia32_extrqi",
+    "llvm.x86.sse4a.insertq" => "__builtin_ia32_insertq",
+    "llvm.x86.sse4a.insertqi" => "__builtin_ia32_insertqi",
+    "llvm.x86.sse4a.movnt.sd" => "__builtin_ia32_movntsd",
+    "llvm.x86.sse4a.movnt.ss" => "__builtin_ia32_movntss",
+    "llvm.x86.ssse3.pabs.b.128" => "__builtin_ia32_pabsb128",
+    "llvm.x86.ssse3.pabs.d.128" => "__builtin_ia32_pabsd128",
+    "llvm.x86.ssse3.pabs.w.128" => "__builtin_ia32_pabsw128",
+    "llvm.x86.ssse3.phadd.d.128" => "__builtin_ia32_phaddd128",
+    "llvm.x86.ssse3.phadd.sw.128" => "__builtin_ia32_phaddsw128",
+    "llvm.x86.ssse3.phadd.w.128" => "__builtin_ia32_phaddw128",
+    "llvm.x86.ssse3.phsub.d.128" => "__builtin_ia32_phsubd128",
+    "llvm.x86.ssse3.phsub.sw.128" => "__builtin_ia32_phsubsw128",
+    "llvm.x86.ssse3.phsub.w.128" => "__builtin_ia32_phsubw128",
+    "llvm.x86.ssse3.pmadd.ub.sw.128" => "__builtin_ia32_pmaddubsw128",
+    "llvm.x86.ssse3.pmul.hr.sw.128" => "__builtin_ia32_pmulhrsw128",
+    "llvm.x86.ssse3.pshuf.b.128" => "__builtin_ia32_pshufb128",
+    "llvm.x86.ssse3.psign.b.128" => "__builtin_ia32_psignb128",
+    "llvm.x86.ssse3.psign.d.128" => "__builtin_ia32_psignd128",
+    "llvm.x86.ssse3.psign.w.128" => "__builtin_ia32_psignw128",
+    "llvm.x86.sttilecfg" => "__builtin_ia32_tile_storeconfig",
+    "llvm.x86.stui" => "__builtin_ia32_stui",
+    "llvm.x86.subborrow.u32" => "__builtin_ia32_subborrow_u32",
+    "llvm.x86.subborrow.u64" => "__builtin_ia32_subborrow_u64",
+    "llvm.x86.tbm.bextri.u32" => "__builtin_ia32_bextri_u32",
+    "llvm.x86.tbm.bextri.u64" => "__builtin_ia32_bextri_u64",
+    "llvm.x86.tcmmimfp16ps" => "__builtin_ia32_tcmmimfp16ps",
+    "llvm.x86.tcmmimfp16ps.internal" => "__builtin_ia32_tcmmimfp16ps_internal",
+    "llvm.x86.tcmmrlfp16ps" => "__builtin_ia32_tcmmrlfp16ps",
+    "llvm.x86.tcmmrlfp16ps.internal" => "__builtin_ia32_tcmmrlfp16ps_internal",
+    "llvm.x86.tdpbf16ps" => "__builtin_ia32_tdpbf16ps",
+    "llvm.x86.tdpbf16ps.internal" => "__builtin_ia32_tdpbf16ps_internal",
+    "llvm.x86.tdpbssd" => "__builtin_ia32_tdpbssd",
+    "llvm.x86.tdpbssd.internal" => "__builtin_ia32_tdpbssd_internal",
+    "llvm.x86.tdpbsud" => "__builtin_ia32_tdpbsud",
+    "llvm.x86.tdpbsud.internal" => "__builtin_ia32_tdpbsud_internal",
+    "llvm.x86.tdpbusd" => "__builtin_ia32_tdpbusd",
+    "llvm.x86.tdpbusd.internal" => "__builtin_ia32_tdpbusd_internal",
+    "llvm.x86.tdpbuud" => "__builtin_ia32_tdpbuud",
+    "llvm.x86.tdpbuud.internal" => "__builtin_ia32_tdpbuud_internal",
+    "llvm.x86.tdpfp16ps" => "__builtin_ia32_tdpfp16ps",
+    "llvm.x86.tdpfp16ps.internal" => "__builtin_ia32_tdpfp16ps_internal",
+    "llvm.x86.testui" => "__builtin_ia32_testui",
+    "llvm.x86.tileloadd64" => "__builtin_ia32_tileloadd64",
+    "llvm.x86.tileloadd64.internal" => "__builtin_ia32_tileloadd64_internal",
+    "llvm.x86.tileloaddt164" => "__builtin_ia32_tileloaddt164",
+    "llvm.x86.tileloaddt164.internal" => "__builtin_ia32_tileloaddt164_internal",
+    "llvm.x86.tilerelease" => "__builtin_ia32_tilerelease",
+    "llvm.x86.tilestored64" => "__builtin_ia32_tilestored64",
+    "llvm.x86.tilestored64.internal" => "__builtin_ia32_tilestored64_internal",
+    "llvm.x86.tilezero" => "__builtin_ia32_tilezero",
+    "llvm.x86.tilezero.internal" => "__builtin_ia32_tilezero_internal",
+    "llvm.x86.tpause" => "__builtin_ia32_tpause",
+    "llvm.x86.umonitor" => "__builtin_ia32_umonitor",
+    "llvm.x86.umwait" => "__builtin_ia32_umwait",
+    "llvm.x86.urdmsr" => "__builtin_ia32_urdmsr",
+    "llvm.x86.uwrmsr" => "__builtin_ia32_uwrmsr",
+    "llvm.x86.vbcstnebf162ps128" => "__builtin_ia32_vbcstnebf162ps128",
+    "llvm.x86.vbcstnebf162ps256" => "__builtin_ia32_vbcstnebf162ps256",
+    "llvm.x86.vbcstnesh2ps128" => "__builtin_ia32_vbcstnesh2ps128",
+    "llvm.x86.vbcstnesh2ps256" => "__builtin_ia32_vbcstnesh2ps256",
+    "llvm.x86.vcvtneebf162ps128" => "__builtin_ia32_vcvtneebf162ps128",
+    "llvm.x86.vcvtneebf162ps256" => "__builtin_ia32_vcvtneebf162ps256",
+    "llvm.x86.vcvtneeph2ps128" => "__builtin_ia32_vcvtneeph2ps128",
+    "llvm.x86.vcvtneeph2ps256" => "__builtin_ia32_vcvtneeph2ps256",
+    "llvm.x86.vcvtneobf162ps128" => "__builtin_ia32_vcvtneobf162ps128",
+    "llvm.x86.vcvtneobf162ps256" => "__builtin_ia32_vcvtneobf162ps256",
+    "llvm.x86.vcvtneoph2ps128" => "__builtin_ia32_vcvtneoph2ps128",
+    "llvm.x86.vcvtneoph2ps256" => "__builtin_ia32_vcvtneoph2ps256",
+    "llvm.x86.vcvtneps2bf16128" => "__builtin_ia32_vcvtneps2bf16128",
+    "llvm.x86.vcvtneps2bf16256" => "__builtin_ia32_vcvtneps2bf16256",
+    "llvm.x86.vcvtph2ps.128" => "__builtin_ia32_vcvtph2ps",
+    "llvm.x86.vcvtph2ps.256" => "__builtin_ia32_vcvtph2ps256",
+    "llvm.x86.vcvtps2ph.128" => "__builtin_ia32_vcvtps2ph",
+    "llvm.x86.vcvtps2ph.256" => "__builtin_ia32_vcvtps2ph256",
+    "llvm.x86.vgf2p8affineinvqb.128" => "__builtin_ia32_vgf2p8affineinvqb_v16qi",
+    "llvm.x86.vgf2p8affineinvqb.256" => "__builtin_ia32_vgf2p8affineinvqb_v32qi",
+    "llvm.x86.vgf2p8affineinvqb.512" => "__builtin_ia32_vgf2p8affineinvqb_v64qi",
+    "llvm.x86.vgf2p8affineqb.128" => "__builtin_ia32_vgf2p8affineqb_v16qi",
+    "llvm.x86.vgf2p8affineqb.256" => "__builtin_ia32_vgf2p8affineqb_v32qi",
+    "llvm.x86.vgf2p8affineqb.512" => "__builtin_ia32_vgf2p8affineqb_v64qi",
+    "llvm.x86.vgf2p8mulb.128" => "__builtin_ia32_vgf2p8mulb_v16qi",
+    "llvm.x86.vgf2p8mulb.256" => "__builtin_ia32_vgf2p8mulb_v32qi",
+    "llvm.x86.vgf2p8mulb.512" => "__builtin_ia32_vgf2p8mulb_v64qi",
+    "llvm.x86.vsha512msg1" => "__builtin_ia32_vsha512msg1",
+    "llvm.x86.vsha512msg2" => "__builtin_ia32_vsha512msg2",
+    "llvm.x86.vsha512rnds2" => "__builtin_ia32_vsha512rnds2",
+    "llvm.x86.vsm3msg1" => "__builtin_ia32_vsm3msg1",
+    "llvm.x86.vsm3msg2" => "__builtin_ia32_vsm3msg2",
+    "llvm.x86.vsm3rnds2" => "__builtin_ia32_vsm3rnds2",
+    "llvm.x86.vsm4key4128" => "__builtin_ia32_vsm4key4128",
+    "llvm.x86.vsm4key4256" => "__builtin_ia32_vsm4key4256",
+    "llvm.x86.vsm4rnds4128" => "__builtin_ia32_vsm4rnds4128",
+    "llvm.x86.vsm4rnds4256" => "__builtin_ia32_vsm4rnds4256",
+    "llvm.x86.wbinvd" => "__builtin_ia32_wbinvd",
+    "llvm.x86.wbnoinvd" => "__builtin_ia32_wbnoinvd",
+    "llvm.x86.wrfsbase.32" => "__builtin_ia32_wrfsbase32",
+    "llvm.x86.wrfsbase.64" => "__builtin_ia32_wrfsbase64",
+    "llvm.x86.wrgsbase.32" => "__builtin_ia32_wrgsbase32",
+    "llvm.x86.wrgsbase.64" => "__builtin_ia32_wrgsbase64",
+    "llvm.x86.wrpkru" => "__builtin_ia32_wrpkru",
+    "llvm.x86.wrssd" => "__builtin_ia32_wrssd",
+    "llvm.x86.wrssq" => "__builtin_ia32_wrssq",
+    "llvm.x86.wrussd" => "__builtin_ia32_wrussd",
+    "llvm.x86.wrussq" => "__builtin_ia32_wrussq",
+    "llvm.x86.xabort" => "__builtin_ia32_xabort",
+    "llvm.x86.xbegin" => "__builtin_ia32_xbegin",
+    "llvm.x86.xend" => "__builtin_ia32_xend",
+    "llvm.x86.xop.vfrcz.pd" => "__builtin_ia32_vfrczpd",
+    "llvm.x86.xop.vfrcz.pd.256" => "__builtin_ia32_vfrczpd256",
+    "llvm.x86.xop.vfrcz.ps" => "__builtin_ia32_vfrczps",
+    "llvm.x86.xop.vfrcz.ps.256" => "__builtin_ia32_vfrczps256",
+    "llvm.x86.xop.vfrcz.sd" => "__builtin_ia32_vfrczsd",
+    "llvm.x86.xop.vfrcz.ss" => "__builtin_ia32_vfrczss",
+    "llvm.x86.xop.vpcmov" => "__builtin_ia32_vpcmov",
+    "llvm.x86.xop.vpcmov.256" => "__builtin_ia32_vpcmov_256",
+    "llvm.x86.xop.vpcomb" => "__builtin_ia32_vpcomb",
+    "llvm.x86.xop.vpcomd" => "__builtin_ia32_vpcomd",
+    "llvm.x86.xop.vpcomq" => "__builtin_ia32_vpcomq",
+    "llvm.x86.xop.vpcomub" => "__builtin_ia32_vpcomub",
+    "llvm.x86.xop.vpcomud" => "__builtin_ia32_vpcomud",
+    "llvm.x86.xop.vpcomuq" => "__builtin_ia32_vpcomuq",
+    "llvm.x86.xop.vpcomuw" => "__builtin_ia32_vpcomuw",
+    "llvm.x86.xop.vpcomw" => "__builtin_ia32_vpcomw",
+    "llvm.x86.xop.vpermil2pd" => "__builtin_ia32_vpermil2pd",
+    "llvm.x86.xop.vpermil2pd.256" => "__builtin_ia32_vpermil2pd256",
+    "llvm.x86.xop.vpermil2ps" => "__builtin_ia32_vpermil2ps",
+    "llvm.x86.xop.vpermil2ps.256" => "__builtin_ia32_vpermil2ps256",
+    "llvm.x86.xop.vphaddbd" => "__builtin_ia32_vphaddbd",
+    "llvm.x86.xop.vphaddbq" => "__builtin_ia32_vphaddbq",
+    "llvm.x86.xop.vphaddbw" => "__builtin_ia32_vphaddbw",
+    "llvm.x86.xop.vphadddq" => "__builtin_ia32_vphadddq",
+    "llvm.x86.xop.vphaddubd" => "__builtin_ia32_vphaddubd",
+    "llvm.x86.xop.vphaddubq" => "__builtin_ia32_vphaddubq",
+    "llvm.x86.xop.vphaddubw" => "__builtin_ia32_vphaddubw",
+    "llvm.x86.xop.vphaddudq" => "__builtin_ia32_vphaddudq",
+    "llvm.x86.xop.vphadduwd" => "__builtin_ia32_vphadduwd",
+    "llvm.x86.xop.vphadduwq" => "__builtin_ia32_vphadduwq",
+    "llvm.x86.xop.vphaddwd" => "__builtin_ia32_vphaddwd",
+    "llvm.x86.xop.vphaddwq" => "__builtin_ia32_vphaddwq",
+    "llvm.x86.xop.vphsubbw" => "__builtin_ia32_vphsubbw",
+    "llvm.x86.xop.vphsubdq" => "__builtin_ia32_vphsubdq",
+    "llvm.x86.xop.vphsubwd" => "__builtin_ia32_vphsubwd",
+    "llvm.x86.xop.vpmacsdd" => "__builtin_ia32_vpmacsdd",
+    "llvm.x86.xop.vpmacsdqh" => "__builtin_ia32_vpmacsdqh",
+    "llvm.x86.xop.vpmacsdql" => "__builtin_ia32_vpmacsdql",
+    "llvm.x86.xop.vpmacssdd" => "__builtin_ia32_vpmacssdd",
+    "llvm.x86.xop.vpmacssdqh" => "__builtin_ia32_vpmacssdqh",
+    "llvm.x86.xop.vpmacssdql" => "__builtin_ia32_vpmacssdql",
+    "llvm.x86.xop.vpmacsswd" => "__builtin_ia32_vpmacsswd",
+    "llvm.x86.xop.vpmacssww" => "__builtin_ia32_vpmacssww",
+    "llvm.x86.xop.vpmacswd" => "__builtin_ia32_vpmacswd",
+    "llvm.x86.xop.vpmacsww" => "__builtin_ia32_vpmacsww",
+    "llvm.x86.xop.vpmadcsswd" => "__builtin_ia32_vpmadcsswd",
+    "llvm.x86.xop.vpmadcswd" => "__builtin_ia32_vpmadcswd",
+    "llvm.x86.xop.vpperm" => "__builtin_ia32_vpperm",
+    "llvm.x86.xop.vprotb" => "__builtin_ia32_vprotb",
+    "llvm.x86.xop.vprotbi" => "__builtin_ia32_vprotbi",
+    "llvm.x86.xop.vprotd" => "__builtin_ia32_vprotd",
+    "llvm.x86.xop.vprotdi" => "__builtin_ia32_vprotdi",
+    "llvm.x86.xop.vprotq" => "__builtin_ia32_vprotq",
+    "llvm.x86.xop.vprotqi" => "__builtin_ia32_vprotqi",
+    "llvm.x86.xop.vprotw" => "__builtin_ia32_vprotw",
+    "llvm.x86.xop.vprotwi" => "__builtin_ia32_vprotwi",
+    "llvm.x86.xop.vpshab" => "__builtin_ia32_vpshab",
+    "llvm.x86.xop.vpshad" => "__builtin_ia32_vpshad",
+    "llvm.x86.xop.vpshaq" => "__builtin_ia32_vpshaq",
+    "llvm.x86.xop.vpshaw" => "__builtin_ia32_vpshaw",
+    "llvm.x86.xop.vpshlb" => "__builtin_ia32_vpshlb",
+    "llvm.x86.xop.vpshld" => "__builtin_ia32_vpshld",
+    "llvm.x86.xop.vpshlq" => "__builtin_ia32_vpshlq",
+    "llvm.x86.xop.vpshlw" => "__builtin_ia32_vpshlw",
+    "llvm.x86.xresldtrk" => "__builtin_ia32_xresldtrk",
+    "llvm.x86.xsusldtrk" => "__builtin_ia32_xsusldtrk",
+    "llvm.x86.xtest" => "__builtin_ia32_xtest",
+    // xcore
+    "llvm.xcore.bitrev" => "__builtin_bitrev",
+    "llvm.xcore.getid" => "__builtin_getid",
+    "llvm.xcore.getps" => "__builtin_getps",
+    "llvm.xcore.setps" => "__builtin_setps",
+    _ => unimplemented!("***** unsupported LLVM intrinsic {}", name),
+}
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs
new file mode 100644
index 00000000000..0eebd21001a
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs
@@ -0,0 +1,1557 @@
+use std::borrow::Cow;
+
+use gccjit::{CType, Context, Field, Function, FunctionPtrType, RValue, ToRValue, Type};
+use rustc_codegen_ssa::traits::BuilderMethods;
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+
+fn encode_key_128_type<'a, 'gcc, 'tcx>(
+    builder: &Builder<'a, 'gcc, 'tcx>,
+) -> (Type<'gcc>, Field<'gcc>, Field<'gcc>) {
+    let m128i = builder.context.new_vector_type(builder.i64_type, 2);
+    let field1 = builder.context.new_field(None, builder.u32_type, "field1");
+    let field2 = builder.context.new_field(None, m128i, "field2");
+    let field3 = builder.context.new_field(None, m128i, "field3");
+    let field4 = builder.context.new_field(None, m128i, "field4");
+    let field5 = builder.context.new_field(None, m128i, "field5");
+    let field6 = builder.context.new_field(None, m128i, "field6");
+    let field7 = builder.context.new_field(None, m128i, "field7");
+    let encode_type = builder.context.new_struct_type(
+        None,
+        "EncodeKey128Output",
+        &[field1, field2, field3, field4, field5, field6, field7],
+    );
+    #[cfg(feature = "master")]
+    encode_type.as_type().set_packed();
+    (encode_type.as_type(), field1, field2)
+}
+
+fn encode_key_256_type<'a, 'gcc, 'tcx>(
+    builder: &Builder<'a, 'gcc, 'tcx>,
+) -> (Type<'gcc>, Field<'gcc>, Field<'gcc>) {
+    let m128i = builder.context.new_vector_type(builder.i64_type, 2);
+    let field1 = builder.context.new_field(None, builder.u32_type, "field1");
+    let field2 = builder.context.new_field(None, m128i, "field2");
+    let field3 = builder.context.new_field(None, m128i, "field3");
+    let field4 = builder.context.new_field(None, m128i, "field4");
+    let field5 = builder.context.new_field(None, m128i, "field5");
+    let field6 = builder.context.new_field(None, m128i, "field6");
+    let field7 = builder.context.new_field(None, m128i, "field7");
+    let field8 = builder.context.new_field(None, m128i, "field8");
+    let encode_type = builder.context.new_struct_type(
+        None,
+        "EncodeKey256Output",
+        &[field1, field2, field3, field4, field5, field6, field7, field8],
+    );
+    #[cfg(feature = "master")]
+    encode_type.as_type().set_packed();
+    (encode_type.as_type(), field1, field2)
+}
+
+fn aes_output_type<'a, 'gcc, 'tcx>(
+    builder: &Builder<'a, 'gcc, 'tcx>,
+) -> (Type<'gcc>, Field<'gcc>, Field<'gcc>) {
+    let m128i = builder.context.new_vector_type(builder.i64_type, 2);
+    let field1 = builder.context.new_field(None, builder.u8_type, "field1");
+    let field2 = builder.context.new_field(None, m128i, "field2");
+    let aes_output_type = builder.context.new_struct_type(None, "AesOutput", &[field1, field2]);
+    let typ = aes_output_type.as_type();
+    #[cfg(feature = "master")]
+    typ.set_packed();
+    (typ, field1, field2)
+}
+
+fn wide_aes_output_type<'a, 'gcc, 'tcx>(
+    builder: &Builder<'a, 'gcc, 'tcx>,
+) -> (Type<'gcc>, Field<'gcc>, Field<'gcc>) {
+    let m128i = builder.context.new_vector_type(builder.i64_type, 2);
+    let field1 = builder.context.new_field(None, builder.u8_type, "field1");
+    let field2 = builder.context.new_field(None, m128i, "field2");
+    let field3 = builder.context.new_field(None, m128i, "field3");
+    let field4 = builder.context.new_field(None, m128i, "field4");
+    let field5 = builder.context.new_field(None, m128i, "field5");
+    let field6 = builder.context.new_field(None, m128i, "field6");
+    let field7 = builder.context.new_field(None, m128i, "field7");
+    let field8 = builder.context.new_field(None, m128i, "field8");
+    let field9 = builder.context.new_field(None, m128i, "field9");
+    let aes_output_type = builder.context.new_struct_type(
+        None,
+        "WideAesOutput",
+        &[field1, field2, field3, field4, field5, field6, field7, field8, field9],
+    );
+    #[cfg(feature = "master")]
+    aes_output_type.as_type().set_packed();
+    (aes_output_type.as_type(), field1, field2)
+}
+
+#[cfg_attr(not(feature = "master"), allow(unused_variables))]
+pub fn adjust_function<'gcc>(
+    context: &'gcc Context<'gcc>,
+    func_name: &str,
+    func_ptr: RValue<'gcc>,
+    args: &[RValue<'gcc>],
+) -> RValue<'gcc> {
+    // FIXME: we should not need this hack: this is required because both _mm_fcmadd_sch
+    // and _mm_mask3_fcmadd_round_sch calls llvm.x86.avx512fp16.mask.vfcmadd.csh and we
+    // seem to need to map this one LLVM intrinsic to 2 different GCC builtins.
+    #[cfg(feature = "master")]
+    match func_name {
+        "__builtin_ia32_vfcmaddcsh_mask3_round" => {
+            if format!("{:?}", args[3]).ends_with("255") {
+                return context
+                    .get_target_builtin_function("__builtin_ia32_vfcmaddcsh_mask_round")
+                    .get_address(None);
+            }
+        }
+        "__builtin_ia32_vfmaddcsh_mask3_round" => {
+            if format!("{:?}", args[3]).ends_with("255") {
+                return context
+                    .get_target_builtin_function("__builtin_ia32_vfmaddcsh_mask_round")
+                    .get_address(None);
+            }
+        }
+        _ => (),
+    }
+
+    func_ptr
+}
+
+pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(
+    builder: &Builder<'a, 'gcc, 'tcx>,
+    gcc_func: FunctionPtrType<'gcc>,
+    mut args: Cow<'b, [RValue<'gcc>]>,
+    func_name: &str,
+) -> Cow<'b, [RValue<'gcc>]> {
+    // TODO: this might not be a good way to workaround the missing tile builtins.
+    if func_name == "__builtin_trap" {
+        return vec![].into();
+    }
+
+    // Some LLVM intrinsics do not map 1-to-1 to GCC intrinsics, so we add the missing
+    // arguments here.
+    if gcc_func.get_param_count() != args.len() {
+        match func_name {
+            // NOTE: the following intrinsics have a different number of parameters in LLVM and GCC.
+            "__builtin_ia32_prold512_mask"
+            | "__builtin_ia32_pmuldq512_mask"
+            | "__builtin_ia32_pmuludq512_mask"
+            | "__builtin_ia32_pmaxsd512_mask"
+            | "__builtin_ia32_pmaxsq512_mask"
+            | "__builtin_ia32_pmaxsq256_mask"
+            | "__builtin_ia32_pmaxsq128_mask"
+            | "__builtin_ia32_pmaxud512_mask"
+            | "__builtin_ia32_pmaxuq512_mask"
+            | "__builtin_ia32_pminsd512_mask"
+            | "__builtin_ia32_pminsq512_mask"
+            | "__builtin_ia32_pminsq256_mask"
+            | "__builtin_ia32_pminsq128_mask"
+            | "__builtin_ia32_pminud512_mask"
+            | "__builtin_ia32_pminuq512_mask"
+            | "__builtin_ia32_prolq512_mask"
+            | "__builtin_ia32_prorq512_mask"
+            | "__builtin_ia32_pslldi512_mask"
+            | "__builtin_ia32_psrldi512_mask"
+            | "__builtin_ia32_psllqi512_mask"
+            | "__builtin_ia32_psrlqi512_mask"
+            | "__builtin_ia32_pslld512_mask"
+            | "__builtin_ia32_psrld512_mask"
+            | "__builtin_ia32_psllq512_mask"
+            | "__builtin_ia32_psrlq512_mask"
+            | "__builtin_ia32_psrad512_mask"
+            | "__builtin_ia32_psraq512_mask"
+            | "__builtin_ia32_psradi512_mask"
+            | "__builtin_ia32_psraqi512_mask"
+            | "__builtin_ia32_psrav16si_mask"
+            | "__builtin_ia32_psrav8di_mask"
+            | "__builtin_ia32_prolvd512_mask"
+            | "__builtin_ia32_prorvd512_mask"
+            | "__builtin_ia32_prolvq512_mask"
+            | "__builtin_ia32_prorvq512_mask"
+            | "__builtin_ia32_psllv16si_mask"
+            | "__builtin_ia32_psrlv16si_mask"
+            | "__builtin_ia32_psllv8di_mask"
+            | "__builtin_ia32_psrlv8di_mask"
+            | "__builtin_ia32_permvarsi512_mask"
+            | "__builtin_ia32_vpermilvarps512_mask"
+            | "__builtin_ia32_vpermilvarpd512_mask"
+            | "__builtin_ia32_permvardi512_mask"
+            | "__builtin_ia32_permvarsf512_mask"
+            | "__builtin_ia32_permvarqi512_mask"
+            | "__builtin_ia32_permvarqi256_mask"
+            | "__builtin_ia32_permvarqi128_mask"
+            | "__builtin_ia32_vpmultishiftqb512_mask"
+            | "__builtin_ia32_vpmultishiftqb256_mask"
+            | "__builtin_ia32_vpmultishiftqb128_mask" => {
+                let mut new_args = args.to_vec();
+                let arg3_type = gcc_func.get_param_type(2);
+                let first_arg = builder
+                    .current_func()
+                    .new_local(None, arg3_type, "undefined_for_intrinsic")
+                    .to_rvalue();
+                new_args.push(first_arg);
+                let arg4_type = gcc_func.get_param_type(3);
+                let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
+                new_args.push(minus_one);
+                args = new_args.into();
+            }
+            "__builtin_ia32_pmaxuq256_mask"
+            | "__builtin_ia32_pmaxuq128_mask"
+            | "__builtin_ia32_pminuq256_mask"
+            | "__builtin_ia32_pminuq128_mask"
+            | "__builtin_ia32_prold256_mask"
+            | "__builtin_ia32_prold128_mask"
+            | "__builtin_ia32_prord512_mask"
+            | "__builtin_ia32_prord256_mask"
+            | "__builtin_ia32_prord128_mask"
+            | "__builtin_ia32_prolq256_mask"
+            | "__builtin_ia32_prolq128_mask"
+            | "__builtin_ia32_prorq256_mask"
+            | "__builtin_ia32_prorq128_mask"
+            | "__builtin_ia32_psraq256_mask"
+            | "__builtin_ia32_psraq128_mask"
+            | "__builtin_ia32_psraqi256_mask"
+            | "__builtin_ia32_psraqi128_mask"
+            | "__builtin_ia32_psravq256_mask"
+            | "__builtin_ia32_psravq128_mask"
+            | "__builtin_ia32_prolvd256_mask"
+            | "__builtin_ia32_prolvd128_mask"
+            | "__builtin_ia32_prorvd256_mask"
+            | "__builtin_ia32_prorvd128_mask"
+            | "__builtin_ia32_prolvq256_mask"
+            | "__builtin_ia32_prolvq128_mask"
+            | "__builtin_ia32_prorvq256_mask"
+            | "__builtin_ia32_prorvq128_mask"
+            | "__builtin_ia32_permvardi256_mask"
+            | "__builtin_ia32_permvardf512_mask"
+            | "__builtin_ia32_permvardf256_mask"
+            | "__builtin_ia32_pmulhuw512_mask"
+            | "__builtin_ia32_pmulhw512_mask"
+            | "__builtin_ia32_pmulhrsw512_mask"
+            | "__builtin_ia32_pmaxuw512_mask"
+            | "__builtin_ia32_pmaxub512_mask"
+            | "__builtin_ia32_pmaxsw512_mask"
+            | "__builtin_ia32_pmaxsb512_mask"
+            | "__builtin_ia32_pminuw512_mask"
+            | "__builtin_ia32_pminub512_mask"
+            | "__builtin_ia32_pminsw512_mask"
+            | "__builtin_ia32_pminsb512_mask"
+            | "__builtin_ia32_pmaddwd512_mask"
+            | "__builtin_ia32_pmaddubsw512_mask"
+            | "__builtin_ia32_packssdw512_mask"
+            | "__builtin_ia32_packsswb512_mask"
+            | "__builtin_ia32_packusdw512_mask"
+            | "__builtin_ia32_packuswb512_mask"
+            | "__builtin_ia32_pavgw512_mask"
+            | "__builtin_ia32_pavgb512_mask"
+            | "__builtin_ia32_psllw512_mask"
+            | "__builtin_ia32_psllwi512_mask"
+            | "__builtin_ia32_psllv32hi_mask"
+            | "__builtin_ia32_psrlw512_mask"
+            | "__builtin_ia32_psrlwi512_mask"
+            | "__builtin_ia32_psllv16hi_mask"
+            | "__builtin_ia32_psllv8hi_mask"
+            | "__builtin_ia32_psrlv32hi_mask"
+            | "__builtin_ia32_psraw512_mask"
+            | "__builtin_ia32_psrawi512_mask"
+            | "__builtin_ia32_psrlv16hi_mask"
+            | "__builtin_ia32_psrlv8hi_mask"
+            | "__builtin_ia32_psrav32hi_mask"
+            | "__builtin_ia32_permvarhi512_mask"
+            | "__builtin_ia32_pshufb512_mask"
+            | "__builtin_ia32_psrav16hi_mask"
+            | "__builtin_ia32_psrav8hi_mask"
+            | "__builtin_ia32_permvarhi256_mask"
+            | "__builtin_ia32_permvarhi128_mask"
+            | "__builtin_ia32_maxph128_mask"
+            | "__builtin_ia32_maxph256_mask"
+            | "__builtin_ia32_minph128_mask"
+            | "__builtin_ia32_minph256_mask" => {
+                let mut new_args = args.to_vec();
+                let arg3_type = gcc_func.get_param_type(2);
+                let vector_type = arg3_type.dyncast_vector().expect("vector type");
+                let zero = builder.context.new_rvalue_zero(vector_type.get_element_type());
+                let num_units = vector_type.get_num_units();
+                let first_arg =
+                    builder.context.new_rvalue_from_vector(None, arg3_type, &vec![zero; num_units]);
+                new_args.push(first_arg);
+                let arg4_type = gcc_func.get_param_type(3);
+                let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
+                new_args.push(minus_one);
+                args = new_args.into();
+            }
+            "__builtin_ia32_dbpsadbw512_mask"
+            | "__builtin_ia32_dbpsadbw256_mask"
+            | "__builtin_ia32_dbpsadbw128_mask" => {
+                let mut new_args = args.to_vec();
+                let arg4_type = gcc_func.get_param_type(3);
+                let vector_type = arg4_type.dyncast_vector().expect("vector type");
+                let zero = builder.context.new_rvalue_zero(vector_type.get_element_type());
+                let num_units = vector_type.get_num_units();
+                let first_arg =
+                    builder.context.new_rvalue_from_vector(None, arg4_type, &vec![zero; num_units]);
+                new_args.push(first_arg);
+                let arg5_type = gcc_func.get_param_type(4);
+                let minus_one = builder.context.new_rvalue_from_int(arg5_type, -1);
+                new_args.push(minus_one);
+                args = new_args.into();
+            }
+            "__builtin_ia32_vplzcntd_512_mask"
+            | "__builtin_ia32_vplzcntd_256_mask"
+            | "__builtin_ia32_vplzcntd_128_mask"
+            | "__builtin_ia32_vplzcntq_512_mask"
+            | "__builtin_ia32_vplzcntq_256_mask"
+            | "__builtin_ia32_vplzcntq_128_mask"
+            | "__builtin_ia32_cvtqq2pd128_mask"
+            | "__builtin_ia32_cvtqq2pd256_mask"
+            | "__builtin_ia32_cvtqq2ps256_mask"
+            | "__builtin_ia32_cvtuqq2pd128_mask"
+            | "__builtin_ia32_cvtuqq2pd256_mask"
+            | "__builtin_ia32_cvtuqq2ps256_mask"
+            | "__builtin_ia32_vcvtw2ph128_mask"
+            | "__builtin_ia32_vcvtw2ph256_mask"
+            | "__builtin_ia32_vcvtuw2ph128_mask"
+            | "__builtin_ia32_vcvtuw2ph256_mask"
+            | "__builtin_ia32_vcvtdq2ph256_mask"
+            | "__builtin_ia32_vcvtudq2ph256_mask" => {
+                let mut new_args = args.to_vec();
+                // Remove last arg as it doesn't seem to be used in GCC and is always false.
+                new_args.pop();
+                let arg2_type = gcc_func.get_param_type(1);
+                let vector_type = arg2_type.dyncast_vector().expect("vector type");
+                let zero = builder.context.new_rvalue_zero(vector_type.get_element_type());
+                let num_units = vector_type.get_num_units();
+                let first_arg =
+                    builder.context.new_rvalue_from_vector(None, arg2_type, &vec![zero; num_units]);
+                new_args.push(first_arg);
+                let arg3_type = gcc_func.get_param_type(2);
+                let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1);
+                new_args.push(minus_one);
+                args = new_args.into();
+            }
+            "__builtin_ia32_vpconflictsi_512_mask"
+            | "__builtin_ia32_vpconflictsi_256_mask"
+            | "__builtin_ia32_vpconflictsi_128_mask"
+            | "__builtin_ia32_vpconflictdi_512_mask"
+            | "__builtin_ia32_vpconflictdi_256_mask"
+            | "__builtin_ia32_vpconflictdi_128_mask" => {
+                let mut new_args = args.to_vec();
+                let arg2_type = gcc_func.get_param_type(1);
+                let vector_type = arg2_type.dyncast_vector().expect("vector type");
+                let zero = builder.context.new_rvalue_zero(vector_type.get_element_type());
+                let num_units = vector_type.get_num_units();
+                let first_arg =
+                    builder.context.new_rvalue_from_vector(None, arg2_type, &vec![zero; num_units]);
+                new_args.push(first_arg);
+                let arg3_type = gcc_func.get_param_type(2);
+                let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1);
+                new_args.push(minus_one);
+                args = new_args.into();
+            }
+            "__builtin_ia32_pternlogd512_mask"
+            | "__builtin_ia32_pternlogd256_mask"
+            | "__builtin_ia32_pternlogd128_mask"
+            | "__builtin_ia32_pternlogq512_mask"
+            | "__builtin_ia32_pternlogq256_mask"
+            | "__builtin_ia32_pternlogq128_mask" => {
+                let mut new_args = args.to_vec();
+                let arg5_type = gcc_func.get_param_type(4);
+                let minus_one = builder.context.new_rvalue_from_int(arg5_type, -1);
+                new_args.push(minus_one);
+                args = new_args.into();
+            }
+            "__builtin_ia32_vfmaddps512_mask" | "__builtin_ia32_vfmaddpd512_mask" => {
+                let mut new_args = args.to_vec();
+
+                let mut last_arg = None;
+                if args.len() == 4 {
+                    last_arg = new_args.pop();
+                }
+
+                let arg4_type = gcc_func.get_param_type(3);
+                let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
+                new_args.push(minus_one);
+
+                if args.len() == 3 {
+                    // Both llvm.fma.v16f32 and llvm.x86.avx512.vfmadd.ps.512 maps to
+                    // the same GCC intrinsic, but the former has 3 parameters and the
+                    // latter has 4 so it doesn't require this additional argument.
+                    let arg5_type = gcc_func.get_param_type(4);
+                    new_args.push(builder.context.new_rvalue_from_int(arg5_type, 4));
+                }
+
+                if let Some(last_arg) = last_arg {
+                    new_args.push(last_arg);
+                }
+
+                args = new_args.into();
+            }
+            "__builtin_ia32_addps512_mask"
+            | "__builtin_ia32_addpd512_mask"
+            | "__builtin_ia32_subps512_mask"
+            | "__builtin_ia32_subpd512_mask"
+            | "__builtin_ia32_mulps512_mask"
+            | "__builtin_ia32_mulpd512_mask"
+            | "__builtin_ia32_divps512_mask"
+            | "__builtin_ia32_divpd512_mask"
+            | "__builtin_ia32_maxps512_mask"
+            | "__builtin_ia32_maxpd512_mask"
+            | "__builtin_ia32_minps512_mask"
+            | "__builtin_ia32_minpd512_mask" => {
+                let mut new_args = args.to_vec();
+                let last_arg = new_args.pop().expect("last arg");
+                let arg3_type = gcc_func.get_param_type(2);
+                let undefined = builder
+                    .current_func()
+                    .new_local(None, arg3_type, "undefined_for_intrinsic")
+                    .to_rvalue();
+                new_args.push(undefined);
+                let arg4_type = gcc_func.get_param_type(3);
+                let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
+                new_args.push(minus_one);
+                new_args.push(last_arg);
+                args = new_args.into();
+            }
+            "__builtin_ia32_vfmaddsubps512_mask"
+            | "__builtin_ia32_vfmaddsubpd512_mask"
+            | "__builtin_ia32_cmpsh_mask_round"
+            | "__builtin_ia32_vfmaddph512_mask"
+            | "__builtin_ia32_vfmaddsubph512_mask" => {
+                let mut new_args = args.to_vec();
+                let last_arg = new_args.pop().expect("last arg");
+                let arg4_type = gcc_func.get_param_type(3);
+                let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
+                new_args.push(minus_one);
+                new_args.push(last_arg);
+                args = new_args.into();
+            }
+            "__builtin_ia32_vpermi2vard512_mask"
+            | "__builtin_ia32_vpermi2vard256_mask"
+            | "__builtin_ia32_vpermi2vard128_mask"
+            | "__builtin_ia32_vpermi2varq512_mask"
+            | "__builtin_ia32_vpermi2varq256_mask"
+            | "__builtin_ia32_vpermi2varq128_mask"
+            | "__builtin_ia32_vpermi2varps512_mask"
+            | "__builtin_ia32_vpermi2varps256_mask"
+            | "__builtin_ia32_vpermi2varps128_mask"
+            | "__builtin_ia32_vpermi2varpd512_mask"
+            | "__builtin_ia32_vpermi2varpd256_mask"
+            | "__builtin_ia32_vpermi2varpd128_mask"
+            | "__builtin_ia32_vpmadd52huq512_mask"
+            | "__builtin_ia32_vpmadd52luq512_mask"
+            | "__builtin_ia32_vfmaddsubph128_mask"
+            | "__builtin_ia32_vfmaddsubph256_mask" => {
+                let mut new_args = args.to_vec();
+                let arg4_type = gcc_func.get_param_type(3);
+                let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
+                new_args.push(minus_one);
+                args = new_args.into();
+            }
+            "__builtin_ia32_cvtdq2ps512_mask"
+            | "__builtin_ia32_cvtudq2ps512_mask"
+            | "__builtin_ia32_sqrtps512_mask"
+            | "__builtin_ia32_sqrtpd512_mask" => {
+                let mut new_args = args.to_vec();
+                let last_arg = new_args.pop().expect("last arg");
+                let arg2_type = gcc_func.get_param_type(1);
+                let undefined = builder
+                    .current_func()
+                    .new_local(None, arg2_type, "undefined_for_intrinsic")
+                    .to_rvalue();
+                new_args.push(undefined);
+                let arg3_type = gcc_func.get_param_type(2);
+                let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1);
+                new_args.push(minus_one);
+                new_args.push(last_arg);
+                args = new_args.into();
+            }
+            "__builtin_ia32_stmxcsr" => {
+                args = vec![].into();
+            }
+            "__builtin_ia32_addcarryx_u64"
+            | "__builtin_ia32_sbb_u64"
+            | "__builtin_ia32_addcarryx_u32"
+            | "__builtin_ia32_sbb_u32" => {
+                let mut new_args = args.to_vec();
+                let arg2_type = gcc_func.get_param_type(1);
+                let variable = builder.current_func().new_local(None, arg2_type, "addcarryResult");
+                new_args.push(variable.get_address(None));
+                args = new_args.into();
+            }
+            "__builtin_ia32_vpermt2varqi512_mask"
+            | "__builtin_ia32_vpermt2varqi256_mask"
+            | "__builtin_ia32_vpermt2varqi128_mask"
+            | "__builtin_ia32_vpermt2varhi512_mask"
+            | "__builtin_ia32_vpermt2varhi256_mask"
+            | "__builtin_ia32_vpermt2varhi128_mask" => {
+                let new_args = args.to_vec();
+                let arg4_type = gcc_func.get_param_type(3);
+                let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
+                args = vec![new_args[1], new_args[0], new_args[2], minus_one].into();
+            }
+            "__builtin_ia32_xrstor"
+            | "__builtin_ia32_xrstor64"
+            | "__builtin_ia32_xsavec"
+            | "__builtin_ia32_xsavec64"
+            | "__builtin_ia32_xsave"
+            | "__builtin_ia32_xsave64"
+            | "__builtin_ia32_xsaveopt"
+            | "__builtin_ia32_xsaveopt64" => {
+                let new_args = args.to_vec();
+                let thirty_two = builder.context.new_rvalue_from_int(new_args[1].get_type(), 32);
+                let arg2 = (new_args[1] << thirty_two) | new_args[2];
+                let arg2_type = gcc_func.get_param_type(1);
+                let arg2 = builder.context.new_cast(None, arg2, arg2_type);
+                args = vec![new_args[0], arg2].into();
+            }
+            // These builtins are sent one more argument than needed.
+            "__builtin_prefetch" => {
+                let mut new_args = args.to_vec();
+                new_args.pop();
+                args = new_args.into();
+            }
+            // The GCC version returns one value of the tuple through a pointer.
+            "__builtin_ia32_rdrand64_step" => {
+                let arg = builder.current_func().new_local(
+                    None,
+                    builder.ulonglong_type,
+                    "return_rdrand_arg",
+                );
+                args = vec![arg.get_address(None)].into();
+            }
+            "__builtin_ia32_cvtqq2pd512_mask"
+            | "__builtin_ia32_cvtqq2ps512_mask"
+            | "__builtin_ia32_cvtuqq2pd512_mask"
+            | "__builtin_ia32_cvtuqq2ps512_mask"
+            | "__builtin_ia32_sqrtph512_mask_round"
+            | "__builtin_ia32_vcvtw2ph512_mask_round"
+            | "__builtin_ia32_vcvtuw2ph512_mask_round"
+            | "__builtin_ia32_vcvtdq2ph512_mask_round"
+            | "__builtin_ia32_vcvtudq2ph512_mask_round"
+            | "__builtin_ia32_vcvtqq2ph512_mask_round"
+            | "__builtin_ia32_vcvtuqq2ph512_mask_round" => {
+                let mut old_args = args.to_vec();
+                let mut new_args = vec![];
+                new_args.push(old_args.swap_remove(0));
+                let arg2_type = gcc_func.get_param_type(1);
+                let vector_type = arg2_type.dyncast_vector().expect("vector type");
+                let zero = builder.context.new_rvalue_zero(vector_type.get_element_type());
+                let num_units = vector_type.get_num_units();
+                let first_arg =
+                    builder.context.new_rvalue_from_vector(None, arg2_type, &vec![zero; num_units]);
+                new_args.push(first_arg);
+                let arg3_type = gcc_func.get_param_type(2);
+                let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1);
+                new_args.push(minus_one);
+                new_args.push(old_args.swap_remove(0));
+                args = new_args.into();
+            }
+            "__builtin_ia32_addph512_mask_round"
+            | "__builtin_ia32_subph512_mask_round"
+            | "__builtin_ia32_mulph512_mask_round"
+            | "__builtin_ia32_divph512_mask_round"
+            | "__builtin_ia32_maxph512_mask_round"
+            | "__builtin_ia32_minph512_mask_round" => {
+                let mut new_args = args.to_vec();
+                let last_arg = new_args.pop().expect("last arg");
+
+                let arg3_type = gcc_func.get_param_type(2);
+                let vector_type = arg3_type.dyncast_vector().expect("vector type");
+                let zero = builder.context.new_rvalue_zero(vector_type.get_element_type());
+                let num_units = vector_type.get_num_units();
+                let first_arg =
+                    builder.context.new_rvalue_from_vector(None, arg3_type, &vec![zero; num_units]);
+                new_args.push(first_arg);
+
+                let arg4_type = gcc_func.get_param_type(3);
+                let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
+                new_args.push(minus_one);
+                new_args.push(last_arg);
+                args = new_args.into();
+            }
+            // NOTE: the LLVM intrinsics receive 3 floats, but the GCC builtin requires 3 vectors.
+            "__builtin_ia32_vfmaddsh3_mask" => {
+                let new_args = args.to_vec();
+                let arg1_type = gcc_func.get_param_type(0);
+                let arg2_type = gcc_func.get_param_type(1);
+                let arg3_type = gcc_func.get_param_type(2);
+                let arg4_type = gcc_func.get_param_type(3);
+                let a = builder.context.new_rvalue_from_vector(None, arg1_type, &[new_args[0]; 8]);
+                let b = builder.context.new_rvalue_from_vector(None, arg2_type, &[new_args[1]; 8]);
+                let c = builder.context.new_rvalue_from_vector(None, arg3_type, &[new_args[2]; 8]);
+                let arg4 = builder.context.new_rvalue_from_int(arg4_type, -1);
+                args = vec![a, b, c, arg4, new_args[3]].into();
+            }
+            "__builtin_ia32_encodekey128_u32" => {
+                let mut new_args = args.to_vec();
+                let m128i = builder.context.new_vector_type(builder.i64_type, 2);
+                let array_type = builder.context.new_array_type(None, m128i, 6);
+                let result = builder.current_func().new_local(None, array_type, "result");
+                new_args.push(result.get_address(None));
+                args = new_args.into();
+            }
+            "__builtin_ia32_encodekey256_u32" => {
+                let mut new_args = args.to_vec();
+                let m128i = builder.context.new_vector_type(builder.i64_type, 2);
+                let array_type = builder.context.new_array_type(None, m128i, 7);
+                let result = builder.current_func().new_local(None, array_type, "result");
+                new_args.push(result.get_address(None));
+                args = new_args.into();
+            }
+            "__builtin_ia32_aesenc128kl_u8"
+            | "__builtin_ia32_aesdec128kl_u8"
+            | "__builtin_ia32_aesenc256kl_u8"
+            | "__builtin_ia32_aesdec256kl_u8" => {
+                let mut new_args = vec![];
+                let m128i = builder.context.new_vector_type(builder.i64_type, 2);
+                let result = builder.current_func().new_local(None, m128i, "result");
+                new_args.push(result.get_address(None));
+                new_args.extend(args.to_vec());
+                args = new_args.into();
+            }
+            "__builtin_ia32_aesencwide128kl_u8"
+            | "__builtin_ia32_aesdecwide128kl_u8"
+            | "__builtin_ia32_aesencwide256kl_u8"
+            | "__builtin_ia32_aesdecwide256kl_u8" => {
+                let mut new_args = vec![];
+
+                let mut old_args = args.to_vec();
+                let handle = old_args.swap_remove(0); // Called __P in GCC.
+                let first_value = old_args.swap_remove(0);
+
+                let element_type = first_value.get_type();
+                let array_type = builder.context.new_array_type(None, element_type, 8);
+                let result = builder.current_func().new_local(None, array_type, "result");
+                new_args.push(result.get_address(None));
+
+                let array = builder.current_func().new_local(None, array_type, "array");
+                let input = builder.context.new_array_constructor(
+                    None,
+                    array_type,
+                    &[
+                        first_value,
+                        old_args.swap_remove(0),
+                        old_args.swap_remove(0),
+                        old_args.swap_remove(0),
+                        old_args.swap_remove(0),
+                        old_args.swap_remove(0),
+                        old_args.swap_remove(0),
+                        old_args.swap_remove(0),
+                    ],
+                );
+                builder.llbb().add_assignment(None, array, input);
+                let input_ptr = array.get_address(None);
+                let arg2_type = gcc_func.get_param_type(1);
+                let input_ptr = builder.context.new_cast(None, input_ptr, arg2_type);
+                new_args.push(input_ptr);
+
+                new_args.push(handle);
+                args = new_args.into();
+            }
+            _ => (),
+        }
+    } else {
+        match func_name {
+            "__builtin_ia32_rndscaless_mask_round"
+            | "__builtin_ia32_rndscalesd_mask_round"
+            | "__builtin_ia32_reducesh_mask_round" => {
+                let new_args = args.to_vec();
+                let arg3_type = gcc_func.get_param_type(2);
+                let arg3 = builder.context.new_cast(None, new_args[4], arg3_type);
+                let arg4_type = gcc_func.get_param_type(3);
+                let arg4 = builder.context.new_bitcast(None, new_args[2], arg4_type);
+                args = vec![new_args[0], new_args[1], arg3, arg4, new_args[3], new_args[5]].into();
+            }
+            // NOTE: the LLVM intrinsics receive 3 floats, but the GCC builtin requires 3 vectors.
+            // FIXME: the intrinsics like _mm_mask_fmadd_sd should probably directly call the GCC
+            // intrinsic to avoid this.
+            "__builtin_ia32_vfmaddss3_round" => {
+                let new_args = args.to_vec();
+                let arg1_type = gcc_func.get_param_type(0);
+                let arg2_type = gcc_func.get_param_type(1);
+                let arg3_type = gcc_func.get_param_type(2);
+                let a = builder.context.new_rvalue_from_vector(None, arg1_type, &[new_args[0]; 4]);
+                let b = builder.context.new_rvalue_from_vector(None, arg2_type, &[new_args[1]; 4]);
+                let c = builder.context.new_rvalue_from_vector(None, arg3_type, &[new_args[2]; 4]);
+                args = vec![a, b, c, new_args[3]].into();
+            }
+            "__builtin_ia32_vfmaddsd3_round" => {
+                let new_args = args.to_vec();
+                let arg1_type = gcc_func.get_param_type(0);
+                let arg2_type = gcc_func.get_param_type(1);
+                let arg3_type = gcc_func.get_param_type(2);
+                let a = builder.context.new_rvalue_from_vector(None, arg1_type, &[new_args[0]; 2]);
+                let b = builder.context.new_rvalue_from_vector(None, arg2_type, &[new_args[1]; 2]);
+                let c = builder.context.new_rvalue_from_vector(None, arg3_type, &[new_args[2]; 2]);
+                args = vec![a, b, c, new_args[3]].into();
+            }
+            "__builtin_ia32_ldmxcsr" => {
+                // The builtin __builtin_ia32_ldmxcsr takes an integer value while llvm.x86.sse.ldmxcsr takes a pointer,
+                // so dereference the pointer.
+                let mut new_args = args.to_vec();
+                let uint_ptr_type = builder.uint_type.make_pointer();
+                let arg1 = builder.context.new_cast(None, args[0], uint_ptr_type);
+                new_args[0] = arg1.dereference(None).to_rvalue();
+                args = new_args.into();
+            }
+            "__builtin_ia32_rcp14sd_mask"
+            | "__builtin_ia32_rcp14ss_mask"
+            | "__builtin_ia32_rsqrt14sd_mask"
+            | "__builtin_ia32_rsqrt14ss_mask" => {
+                let new_args = args.to_vec();
+                args = vec![new_args[1], new_args[0], new_args[2], new_args[3]].into();
+            }
+            "__builtin_ia32_sqrtsd_mask_round" | "__builtin_ia32_sqrtss_mask_round" => {
+                let new_args = args.to_vec();
+                args = vec![new_args[1], new_args[0], new_args[2], new_args[3], new_args[4]].into();
+            }
+            "__builtin_ia32_vpshrdv_v8di"
+            | "__builtin_ia32_vpshrdv_v4di"
+            | "__builtin_ia32_vpshrdv_v2di"
+            | "__builtin_ia32_vpshrdv_v16si"
+            | "__builtin_ia32_vpshrdv_v8si"
+            | "__builtin_ia32_vpshrdv_v4si"
+            | "__builtin_ia32_vpshrdv_v32hi"
+            | "__builtin_ia32_vpshrdv_v16hi"
+            | "__builtin_ia32_vpshrdv_v8hi" => {
+                // The first two arguments are reversed, compared to LLVM.
+                let new_args = args.to_vec();
+                args = vec![new_args[1], new_args[0], new_args[2]].into();
+            }
+            "__builtin_ia32_rangesd128_mask_round"
+            | "__builtin_ia32_rangess128_mask_round"
+            | "__builtin_ia32_reducesd_mask_round"
+            | "__builtin_ia32_reducess_mask_round" => {
+                let new_args = args.to_vec();
+                args = vec![
+                    new_args[0],
+                    new_args[1],
+                    new_args[4],
+                    new_args[2],
+                    new_args[3],
+                    new_args[5],
+                ]
+                .into();
+            }
+            "__builtin_ia32_rndscalesh_mask_round" => {
+                let new_args = args.to_vec();
+                args = vec![
+                    new_args[0],
+                    new_args[1],
+                    new_args[4],
+                    new_args[2],
+                    new_args[3],
+                    new_args[5],
+                ]
+                .into();
+            }
+            "fma" => {
+                let mut new_args = args.to_vec();
+                new_args[0] = builder.context.new_cast(None, new_args[0], builder.double_type);
+                new_args[1] = builder.context.new_cast(None, new_args[1], builder.double_type);
+                new_args[2] = builder.context.new_cast(None, new_args[2], builder.double_type);
+                args = new_args.into();
+            }
+            "__builtin_ia32_sqrtsh_mask_round"
+            | "__builtin_ia32_vcvtss2sh_mask_round"
+            | "__builtin_ia32_vcvtsd2sh_mask_round"
+            | "__builtin_ia32_vcvtsh2ss_mask_round"
+            | "__builtin_ia32_vcvtsh2sd_mask_round"
+            | "__builtin_ia32_rcpsh_mask"
+            | "__builtin_ia32_rsqrtsh_mask" => {
+                // The first two arguments are inverted, so swap them.
+                let mut new_args = args.to_vec();
+                new_args.swap(0, 1);
+                args = new_args.into();
+            }
+            _ => (),
+        }
+    }
+
+    args
+}
+
+pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>(
+    builder: &Builder<'a, 'gcc, 'tcx>,
+    mut return_value: RValue<'gcc>,
+    func_name: &str,
+    args: &[RValue<'gcc>],
+    args_adjusted: bool,
+    orig_args: &[RValue<'gcc>],
+) -> RValue<'gcc> {
+    match func_name {
+        "__builtin_ia32_vfmaddss3_round"
+        | "__builtin_ia32_vfmaddsd3_round"
+        | "__builtin_ia32_vfmaddsh3_mask" => {
+            #[cfg(feature = "master")]
+            {
+                let zero = builder.context.new_rvalue_zero(builder.int_type);
+                return_value =
+                    builder.context.new_vector_access(None, return_value, zero).to_rvalue();
+            }
+        }
+        "__builtin_ia32_addcarryx_u64"
+        | "__builtin_ia32_sbb_u64"
+        | "__builtin_ia32_addcarryx_u32"
+        | "__builtin_ia32_sbb_u32" => {
+            // Both llvm.x86.addcarry.32 and llvm.x86.addcarryx.u32 points to the same GCC builtin,
+            // but only the former requires adjusting the return value.
+            // Those 2 LLVM intrinsics differ by their argument count, that's why we check if the
+            // arguments were adjusted.
+            if args_adjusted {
+                let last_arg = args.last().expect("last arg");
+                let field1 = builder.context.new_field(None, builder.u8_type, "carryFlag");
+                let field2 = builder.context.new_field(None, args[1].get_type(), "carryResult");
+                let struct_type =
+                    builder.context.new_struct_type(None, "addcarryResult", &[field1, field2]);
+                return_value = builder.context.new_struct_constructor(
+                    None,
+                    struct_type.as_type(),
+                    None,
+                    &[return_value, last_arg.dereference(None).to_rvalue()],
+                );
+            }
+        }
+        "__builtin_ia32_stmxcsr" => {
+            // The builtin __builtin_ia32_stmxcsr returns a value while llvm.x86.sse.stmxcsr writes
+            // the result in its pointer argument.
+            // We removed the argument since __builtin_ia32_stmxcsr takes no arguments, so we need
+            // to get back the original argument to get the pointer we need to write the result to.
+            let uint_ptr_type = builder.uint_type.make_pointer();
+            let ptr = builder.context.new_cast(None, orig_args[0], uint_ptr_type);
+            builder.llbb().add_assignment(None, ptr.dereference(None), return_value);
+            // The return value was assigned to the result pointer above. In order to not call the
+            // builtin twice, we overwrite the return value with a dummy value.
+            return_value = builder.context.new_rvalue_zero(builder.int_type);
+        }
+        "__builtin_ia32_rdrand64_step" => {
+            let random_number = args[0].dereference(None).to_rvalue();
+            let success_variable =
+                builder.current_func().new_local(None, return_value.get_type(), "success");
+            builder.llbb().add_assignment(None, success_variable, return_value);
+
+            let field1 = builder.context.new_field(None, random_number.get_type(), "random_number");
+            let field2 = builder.context.new_field(None, return_value.get_type(), "success");
+            let struct_type =
+                builder.context.new_struct_type(None, "rdrand_result", &[field1, field2]);
+            return_value = builder.context.new_struct_constructor(
+                None,
+                struct_type.as_type(),
+                None,
+                &[random_number, success_variable.to_rvalue()],
+            );
+        }
+        "fma" => {
+            let f16_type = builder.context.new_c_type(CType::Float16);
+            return_value = builder.context.new_cast(None, return_value, f16_type);
+        }
+        "__builtin_ia32_encodekey128_u32" => {
+            // The builtin __builtin_ia32_encodekey128_u32 writes the result in its pointer argument while
+            // llvm.x86.encodekey128 returns a value.
+            // We added a result pointer argument and now need to assign its value to the return_value expected by
+            // the LLVM intrinsic.
+            let (encode_type, field1, field2) = encode_key_128_type(builder);
+            let result = builder.current_func().new_local(None, encode_type, "result");
+            let field1 = result.access_field(None, field1);
+            builder.llbb().add_assignment(None, field1, return_value);
+            let field2 = result.access_field(None, field2);
+            let field2_type = field2.to_rvalue().get_type();
+            let array_type = builder.context.new_array_type(None, field2_type, 6);
+            let ptr = builder.context.new_cast(None, args[2], array_type.make_pointer());
+            let field2_ptr =
+                builder.context.new_cast(None, field2.get_address(None), array_type.make_pointer());
+            builder.llbb().add_assignment(
+                None,
+                field2_ptr.dereference(None),
+                ptr.dereference(None),
+            );
+            return_value = result.to_rvalue();
+        }
+        "__builtin_ia32_encodekey256_u32" => {
+            // The builtin __builtin_ia32_encodekey256_u32 writes the result in its pointer argument while
+            // llvm.x86.encodekey256 returns a value.
+            // We added a result pointer argument and now need to assign its value to the return_value expected by
+            // the LLVM intrinsic.
+            let (encode_type, field1, field2) = encode_key_256_type(builder);
+            let result = builder.current_func().new_local(None, encode_type, "result");
+            let field1 = result.access_field(None, field1);
+            builder.llbb().add_assignment(None, field1, return_value);
+            let field2 = result.access_field(None, field2);
+            let field2_type = field2.to_rvalue().get_type();
+            let array_type = builder.context.new_array_type(None, field2_type, 7);
+            let ptr = builder.context.new_cast(None, args[3], array_type.make_pointer());
+            let field2_ptr =
+                builder.context.new_cast(None, field2.get_address(None), array_type.make_pointer());
+            builder.llbb().add_assignment(
+                None,
+                field2_ptr.dereference(None),
+                ptr.dereference(None),
+            );
+            return_value = result.to_rvalue();
+        }
+        "__builtin_ia32_aesdec128kl_u8"
+        | "__builtin_ia32_aesenc128kl_u8"
+        | "__builtin_ia32_aesdec256kl_u8"
+        | "__builtin_ia32_aesenc256kl_u8" => {
+            // The builtin for aesdec/aesenc writes the result in its pointer argument while
+            // llvm.x86.aesdec128kl returns a value.
+            // We added a result pointer argument and now need to assign its value to the return_value expected by
+            // the LLVM intrinsic.
+            let (aes_output_type, field1, field2) = aes_output_type(builder);
+            let result = builder.current_func().new_local(None, aes_output_type, "result");
+            let field1 = result.access_field(None, field1);
+            builder.llbb().add_assignment(None, field1, return_value);
+            let field2 = result.access_field(None, field2);
+            let ptr = builder.context.new_cast(
+                None,
+                args[0],
+                field2.to_rvalue().get_type().make_pointer(),
+            );
+            builder.llbb().add_assignment(None, field2, ptr.dereference(None));
+            return_value = result.to_rvalue();
+        }
+        "__builtin_ia32_aesencwide128kl_u8"
+        | "__builtin_ia32_aesdecwide128kl_u8"
+        | "__builtin_ia32_aesencwide256kl_u8"
+        | "__builtin_ia32_aesdecwide256kl_u8" => {
+            // The builtin for aesdecwide/aesencwide writes the result in its pointer argument while
+            // llvm.x86.aesencwide128kl returns a value.
+            // We added a result pointer argument and now need to assign its value to the return_value expected by
+            // the LLVM intrinsic.
+            let (aes_output_type, field1, field2) = wide_aes_output_type(builder);
+            let result = builder.current_func().new_local(None, aes_output_type, "result");
+            let field1 = result.access_field(None, field1);
+            builder.llbb().add_assignment(None, field1, return_value);
+            let field2 = result.access_field(None, field2);
+            let field2_type = field2.to_rvalue().get_type();
+            let array_type = builder.context.new_array_type(None, field2_type, 8);
+            let ptr = builder.context.new_cast(None, args[0], array_type.make_pointer());
+            let field2_ptr =
+                builder.context.new_cast(None, field2.get_address(None), array_type.make_pointer());
+            builder.llbb().add_assignment(
+                None,
+                field2_ptr.dereference(None),
+                ptr.dereference(None),
+            );
+            return_value = result.to_rvalue();
+        }
+        _ => (),
+    }
+
+    return_value
+}
+
+pub fn ignore_arg_cast(func_name: &str, index: usize, args_len: usize) -> bool {
+    // FIXME(antoyo): find a way to refactor in order to avoid this hack.
+    match func_name {
+        // NOTE: these intrinsics have missing parameters before the last one, so ignore the
+        // last argument type check.
+        "__builtin_ia32_maxps512_mask"
+        | "__builtin_ia32_maxpd512_mask"
+        | "__builtin_ia32_minps512_mask"
+        | "__builtin_ia32_minpd512_mask"
+        | "__builtin_ia32_sqrtps512_mask"
+        | "__builtin_ia32_sqrtpd512_mask"
+        | "__builtin_ia32_addps512_mask"
+        | "__builtin_ia32_addpd512_mask"
+        | "__builtin_ia32_subps512_mask"
+        | "__builtin_ia32_subpd512_mask"
+        | "__builtin_ia32_mulps512_mask"
+        | "__builtin_ia32_mulpd512_mask"
+        | "__builtin_ia32_divps512_mask"
+        | "__builtin_ia32_divpd512_mask"
+        | "__builtin_ia32_vfmaddsubps512_mask"
+        | "__builtin_ia32_vfmaddsubpd512_mask"
+        | "__builtin_ia32_cvtdq2ps512_mask"
+        | "__builtin_ia32_cvtudq2ps512_mask" => {
+            if index == args_len - 1 {
+                return true;
+            }
+        }
+        "__builtin_ia32_rndscaless_mask_round" | "__builtin_ia32_rndscalesd_mask_round" => {
+            if index == 2 || index == 3 {
+                return true;
+            }
+        }
+        "__builtin_ia32_vfmaddps512_mask" | "__builtin_ia32_vfmaddpd512_mask" => {
+            // Since there are two LLVM intrinsics that map to each of these GCC builtins and only
+            // one of them has a missing parameter before the last one, we check the number of
+            // arguments to distinguish those cases.
+            if args_len == 4 && index == args_len - 1 {
+                return true;
+            }
+        }
+        // NOTE: the LLVM intrinsic receives 3 floats, but the GCC builtin requires 3 vectors.
+        "__builtin_ia32_vfmaddss3_round" | "__builtin_ia32_vfmaddsd3_round" => return true,
+        "__builtin_ia32_vplzcntd_512_mask"
+        | "__builtin_ia32_vplzcntd_256_mask"
+        | "__builtin_ia32_vplzcntd_128_mask"
+        | "__builtin_ia32_vplzcntq_512_mask"
+        | "__builtin_ia32_vplzcntq_256_mask"
+        | "__builtin_ia32_vplzcntq_128_mask" => {
+            if index == args_len - 1 {
+                return true;
+            }
+        }
+        _ => (),
+    }
+
+    false
+}
+
+#[cfg(not(feature = "master"))]
+pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> {
+    let gcc_name = match name {
+        "llvm.x86.sse2.pause" => {
+            // NOTE: pause is only a hint, so we use a dummy built-in because target built-ins
+            // are not supported in libgccjit 12.
+            "__builtin_inff"
+        }
+        "llvm.x86.xgetbv" => "__builtin_trap",
+        _ => unimplemented!("unsupported LLVM intrinsic {}", name),
+    };
+    let func = cx.context.get_builtin_function(gcc_name);
+    cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
+    return func;
+}
+
+#[cfg(feature = "master")]
+pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> {
+    let gcc_name = match name {
+        "llvm.prefetch" => {
+            let gcc_name = "__builtin_prefetch";
+            let func = cx.context.get_builtin_function(gcc_name);
+            cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
+            return func;
+        }
+
+        "llvm.aarch64.isb" => {
+            // FIXME: GCC doesn't support __builtin_arm_isb yet, check if this builtin is OK.
+            let gcc_name = "__atomic_thread_fence";
+            let func = cx.context.get_builtin_function(gcc_name);
+            cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
+            return func;
+        }
+
+        "llvm.x86.xgetbv" => "__builtin_ia32_xgetbv",
+        // NOTE: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html
+        "llvm.sqrt.v2f64" => "__builtin_ia32_sqrtpd",
+        "llvm.x86.avx512.pmul.dq.512" => "__builtin_ia32_pmuldq512_mask",
+        "llvm.x86.avx512.pmulu.dq.512" => "__builtin_ia32_pmuludq512_mask",
+        "llvm.x86.avx512.max.ps.512" => "__builtin_ia32_maxps512_mask",
+        "llvm.x86.avx512.max.pd.512" => "__builtin_ia32_maxpd512_mask",
+        "llvm.x86.avx512.min.ps.512" => "__builtin_ia32_minps512_mask",
+        "llvm.x86.avx512.min.pd.512" => "__builtin_ia32_minpd512_mask",
+        "llvm.fma.v16f32" => "__builtin_ia32_vfmaddps512_mask",
+        "llvm.fma.v8f64" => "__builtin_ia32_vfmaddpd512_mask",
+        "llvm.x86.avx512.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_mask",
+        "llvm.x86.avx512.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_mask",
+        "llvm.x86.avx512.pternlog.d.512" => "__builtin_ia32_pternlogd512_mask",
+        "llvm.x86.avx512.pternlog.d.256" => "__builtin_ia32_pternlogd256_mask",
+        "llvm.x86.avx512.pternlog.d.128" => "__builtin_ia32_pternlogd128_mask",
+        "llvm.x86.avx512.pternlog.q.512" => "__builtin_ia32_pternlogq512_mask",
+        "llvm.x86.avx512.pternlog.q.256" => "__builtin_ia32_pternlogq256_mask",
+        "llvm.x86.avx512.pternlog.q.128" => "__builtin_ia32_pternlogq128_mask",
+        "llvm.x86.avx512.add.ps.512" => "__builtin_ia32_addps512_mask",
+        "llvm.x86.avx512.add.pd.512" => "__builtin_ia32_addpd512_mask",
+        "llvm.x86.avx512.sub.ps.512" => "__builtin_ia32_subps512_mask",
+        "llvm.x86.avx512.sub.pd.512" => "__builtin_ia32_subpd512_mask",
+        "llvm.x86.avx512.mul.ps.512" => "__builtin_ia32_mulps512_mask",
+        "llvm.x86.avx512.mul.pd.512" => "__builtin_ia32_mulpd512_mask",
+        "llvm.x86.avx512.div.ps.512" => "__builtin_ia32_divps512_mask",
+        "llvm.x86.avx512.div.pd.512" => "__builtin_ia32_divpd512_mask",
+        "llvm.x86.avx512.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_mask",
+        "llvm.x86.avx512.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_mask",
+        "llvm.x86.avx512.sitofp.round.v16f32.v16i32" => "__builtin_ia32_cvtdq2ps512_mask",
+        "llvm.x86.avx512.uitofp.round.v16f32.v16i32" => "__builtin_ia32_cvtudq2ps512_mask",
+        "llvm.x86.avx512.mask.ucmp.d.512" => "__builtin_ia32_ucmpd512_mask",
+        "llvm.x86.avx512.mask.ucmp.d.256" => "__builtin_ia32_ucmpd256_mask",
+        "llvm.x86.avx512.mask.ucmp.d.128" => "__builtin_ia32_ucmpd128_mask",
+        "llvm.x86.avx512.mask.cmp.d.512" => "__builtin_ia32_cmpd512_mask",
+        "llvm.x86.avx512.mask.cmp.d.256" => "__builtin_ia32_cmpd256_mask",
+        "llvm.x86.avx512.mask.cmp.d.128" => "__builtin_ia32_cmpd128_mask",
+        "llvm.x86.avx512.mask.ucmp.q.512" => "__builtin_ia32_ucmpq512_mask",
+        "llvm.x86.avx512.mask.ucmp.q.256" => "__builtin_ia32_ucmpq256_mask",
+        "llvm.x86.avx512.mask.ucmp.q.128" => "__builtin_ia32_ucmpq128_mask",
+        "llvm.x86.avx512.mask.cmp.q.512" => "__builtin_ia32_cmpq512_mask",
+        "llvm.x86.avx512.mask.cmp.q.256" => "__builtin_ia32_cmpq256_mask",
+        "llvm.x86.avx512.mask.cmp.q.128" => "__builtin_ia32_cmpq128_mask",
+        "llvm.x86.avx512.mask.max.ss.round" => "__builtin_ia32_maxss_mask_round",
+        "llvm.x86.avx512.mask.max.sd.round" => "__builtin_ia32_maxsd_mask_round",
+        "llvm.x86.avx512.mask.min.ss.round" => "__builtin_ia32_minss_mask_round",
+        "llvm.x86.avx512.mask.min.sd.round" => "__builtin_ia32_minsd_mask_round",
+        "llvm.x86.avx512.mask.sqrt.ss" => "__builtin_ia32_sqrtss_mask_round",
+        "llvm.x86.avx512.mask.sqrt.sd" => "__builtin_ia32_sqrtsd_mask_round",
+        "llvm.x86.avx512.mask.getexp.ss" => "__builtin_ia32_getexpss_mask_round",
+        "llvm.x86.avx512.mask.getexp.sd" => "__builtin_ia32_getexpsd_mask_round",
+        "llvm.x86.avx512.mask.getmant.ss" => "__builtin_ia32_getmantss_mask_round",
+        "llvm.x86.avx512.mask.getmant.sd" => "__builtin_ia32_getmantsd_mask_round",
+        "llvm.x86.avx512.mask.rndscale.ss" => "__builtin_ia32_rndscaless_mask_round",
+        "llvm.x86.avx512.mask.rndscale.sd" => "__builtin_ia32_rndscalesd_mask_round",
+        "llvm.x86.avx512.mask.scalef.ss" => "__builtin_ia32_scalefss_mask_round",
+        "llvm.x86.avx512.mask.scalef.sd" => "__builtin_ia32_scalefsd_mask_round",
+        "llvm.x86.avx512.vfmadd.f32" => "__builtin_ia32_vfmaddss3_round",
+        "llvm.x86.avx512.vfmadd.f64" => "__builtin_ia32_vfmaddsd3_round",
+        "llvm.ceil.v4f64" => "__builtin_ia32_ceilpd256",
+        "llvm.ceil.v8f32" => "__builtin_ia32_ceilps256",
+        "llvm.floor.v4f64" => "__builtin_ia32_floorpd256",
+        "llvm.floor.v8f32" => "__builtin_ia32_floorps256",
+        "llvm.sqrt.v4f64" => "__builtin_ia32_sqrtpd256",
+        "llvm.x86.sse.stmxcsr" => "__builtin_ia32_stmxcsr",
+        "llvm.x86.sse.ldmxcsr" => "__builtin_ia32_ldmxcsr",
+        "llvm.ctpop.v16i32" => "__builtin_ia32_vpopcountd_v16si",
+        "llvm.ctpop.v8i32" => "__builtin_ia32_vpopcountd_v8si",
+        "llvm.ctpop.v4i32" => "__builtin_ia32_vpopcountd_v4si",
+        "llvm.ctpop.v8i64" => "__builtin_ia32_vpopcountq_v8di",
+        "llvm.ctpop.v4i64" => "__builtin_ia32_vpopcountq_v4di",
+        "llvm.ctpop.v2i64" => "__builtin_ia32_vpopcountq_v2di",
+        "llvm.x86.addcarry.64" => "__builtin_ia32_addcarryx_u64",
+        "llvm.x86.subborrow.64" => "__builtin_ia32_sbb_u64",
+        "llvm.floor.v2f64" => "__builtin_ia32_floorpd",
+        "llvm.floor.v4f32" => "__builtin_ia32_floorps",
+        "llvm.ceil.v2f64" => "__builtin_ia32_ceilpd",
+        "llvm.ceil.v4f32" => "__builtin_ia32_ceilps",
+        "llvm.fma.v2f64" => "__builtin_ia32_vfmaddpd",
+        "llvm.fma.v4f64" => "__builtin_ia32_vfmaddpd256",
+        "llvm.fma.v4f32" => "__builtin_ia32_vfmaddps",
+        "llvm.fma.v8f32" => "__builtin_ia32_vfmaddps256",
+        "llvm.ctlz.v16i32" => "__builtin_ia32_vplzcntd_512_mask",
+        "llvm.ctlz.v8i32" => "__builtin_ia32_vplzcntd_256_mask",
+        "llvm.ctlz.v4i32" => "__builtin_ia32_vplzcntd_128_mask",
+        "llvm.ctlz.v8i64" => "__builtin_ia32_vplzcntq_512_mask",
+        "llvm.ctlz.v4i64" => "__builtin_ia32_vplzcntq_256_mask",
+        "llvm.ctlz.v2i64" => "__builtin_ia32_vplzcntq_128_mask",
+        "llvm.ctpop.v32i16" => "__builtin_ia32_vpopcountw_v32hi",
+        "llvm.x86.avx512.conflict.d.512" => "__builtin_ia32_vpconflictsi_512_mask",
+        "llvm.x86.avx512.conflict.d.256" => "__builtin_ia32_vpconflictsi_256_mask",
+        "llvm.x86.avx512.conflict.d.128" => "__builtin_ia32_vpconflictsi_128_mask",
+        "llvm.x86.avx512.conflict.q.512" => "__builtin_ia32_vpconflictdi_512_mask",
+        "llvm.x86.avx512.conflict.q.256" => "__builtin_ia32_vpconflictdi_256_mask",
+        "llvm.x86.avx512.conflict.q.128" => "__builtin_ia32_vpconflictdi_128_mask",
+        "llvm.x86.avx512.vpermi2var.qi.512" => "__builtin_ia32_vpermt2varqi512_mask",
+        "llvm.x86.avx512.vpermi2var.qi.256" => "__builtin_ia32_vpermt2varqi256_mask",
+        "llvm.x86.avx512.vpermi2var.qi.128" => "__builtin_ia32_vpermt2varqi128_mask",
+        "llvm.x86.avx512.permvar.qi.512" => "__builtin_ia32_permvarqi512_mask",
+        "llvm.x86.avx512.permvar.qi.256" => "__builtin_ia32_permvarqi256_mask",
+        "llvm.x86.avx512.permvar.qi.128" => "__builtin_ia32_permvarqi128_mask",
+        "llvm.x86.avx512.pmultishift.qb.512" => "__builtin_ia32_vpmultishiftqb512_mask",
+        "llvm.x86.avx512.pmultishift.qb.256" => "__builtin_ia32_vpmultishiftqb256_mask",
+        "llvm.x86.avx512.pmultishift.qb.128" => "__builtin_ia32_vpmultishiftqb128_mask",
+        "llvm.ctpop.v16i16" => "__builtin_ia32_vpopcountw_v16hi",
+        "llvm.ctpop.v8i16" => "__builtin_ia32_vpopcountw_v8hi",
+        "llvm.ctpop.v64i8" => "__builtin_ia32_vpopcountb_v64qi",
+        "llvm.ctpop.v32i8" => "__builtin_ia32_vpopcountb_v32qi",
+        "llvm.ctpop.v16i8" => "__builtin_ia32_vpopcountb_v16qi",
+        "llvm.x86.avx512.mask.vpshufbitqmb.512" => "__builtin_ia32_vpshufbitqmb512_mask",
+        "llvm.x86.avx512.mask.vpshufbitqmb.256" => "__builtin_ia32_vpshufbitqmb256_mask",
+        "llvm.x86.avx512.mask.vpshufbitqmb.128" => "__builtin_ia32_vpshufbitqmb128_mask",
+        "llvm.x86.avx512.mask.ucmp.w.512" => "__builtin_ia32_ucmpw512_mask",
+        "llvm.x86.avx512.mask.ucmp.w.256" => "__builtin_ia32_ucmpw256_mask",
+        "llvm.x86.avx512.mask.ucmp.w.128" => "__builtin_ia32_ucmpw128_mask",
+        "llvm.x86.avx512.mask.ucmp.b.512" => "__builtin_ia32_ucmpb512_mask",
+        "llvm.x86.avx512.mask.ucmp.b.256" => "__builtin_ia32_ucmpb256_mask",
+        "llvm.x86.avx512.mask.ucmp.b.128" => "__builtin_ia32_ucmpb128_mask",
+        "llvm.x86.avx512.mask.cmp.w.512" => "__builtin_ia32_cmpw512_mask",
+        "llvm.x86.avx512.mask.cmp.w.256" => "__builtin_ia32_cmpw256_mask",
+        "llvm.x86.avx512.mask.cmp.w.128" => "__builtin_ia32_cmpw128_mask",
+        "llvm.x86.avx512.mask.cmp.b.512" => "__builtin_ia32_cmpb512_mask",
+        "llvm.x86.avx512.mask.cmp.b.256" => "__builtin_ia32_cmpb256_mask",
+        "llvm.x86.avx512.mask.cmp.b.128" => "__builtin_ia32_cmpb128_mask",
+        "llvm.x86.xrstor" => "__builtin_ia32_xrstor",
+        "llvm.x86.xrstor64" => "__builtin_ia32_xrstor64",
+        "llvm.x86.xsavec" => "__builtin_ia32_xsavec",
+        "llvm.x86.xsavec64" => "__builtin_ia32_xsavec64",
+        "llvm.x86.addcarry.32" => "__builtin_ia32_addcarryx_u32",
+        "llvm.x86.subborrow.32" => "__builtin_ia32_sbb_u32",
+        "llvm.x86.avx512.mask.compress.store.w.512" => "__builtin_ia32_compressstoreuhi512_mask",
+        "llvm.x86.avx512.mask.compress.store.w.256" => "__builtin_ia32_compressstoreuhi256_mask",
+        "llvm.x86.avx512.mask.compress.store.w.128" => "__builtin_ia32_compressstoreuhi128_mask",
+        "llvm.x86.avx512.mask.compress.store.b.512" => "__builtin_ia32_compressstoreuqi512_mask",
+        "llvm.x86.avx512.mask.compress.store.b.256" => "__builtin_ia32_compressstoreuqi256_mask",
+        "llvm.x86.avx512.mask.compress.store.b.128" => "__builtin_ia32_compressstoreuqi128_mask",
+        "llvm.x86.avx512.mask.compress.w.512" => "__builtin_ia32_compresshi512_mask",
+        "llvm.x86.avx512.mask.compress.w.256" => "__builtin_ia32_compresshi256_mask",
+        "llvm.x86.avx512.mask.compress.w.128" => "__builtin_ia32_compresshi128_mask",
+        "llvm.x86.avx512.mask.compress.b.512" => "__builtin_ia32_compressqi512_mask",
+        "llvm.x86.avx512.mask.compress.b.256" => "__builtin_ia32_compressqi256_mask",
+        "llvm.x86.avx512.mask.compress.b.128" => "__builtin_ia32_compressqi128_mask",
+        "llvm.x86.avx512.mask.expand.w.512" => "__builtin_ia32_expandhi512_mask",
+        "llvm.x86.avx512.mask.expand.w.256" => "__builtin_ia32_expandhi256_mask",
+        "llvm.x86.avx512.mask.expand.w.128" => "__builtin_ia32_expandhi128_mask",
+        "llvm.x86.avx512.mask.expand.b.512" => "__builtin_ia32_expandqi512_mask",
+        "llvm.x86.avx512.mask.expand.b.256" => "__builtin_ia32_expandqi256_mask",
+        "llvm.x86.avx512.mask.expand.b.128" => "__builtin_ia32_expandqi128_mask",
+        "llvm.fshl.v8i64" => "__builtin_ia32_vpshldv_v8di",
+        "llvm.fshl.v4i64" => "__builtin_ia32_vpshldv_v4di",
+        "llvm.fshl.v2i64" => "__builtin_ia32_vpshldv_v2di",
+        "llvm.fshl.v16i32" => "__builtin_ia32_vpshldv_v16si",
+        "llvm.fshl.v8i32" => "__builtin_ia32_vpshldv_v8si",
+        "llvm.fshl.v4i32" => "__builtin_ia32_vpshldv_v4si",
+        "llvm.fshl.v32i16" => "__builtin_ia32_vpshldv_v32hi",
+        "llvm.fshl.v16i16" => "__builtin_ia32_vpshldv_v16hi",
+        "llvm.fshl.v8i16" => "__builtin_ia32_vpshldv_v8hi",
+        "llvm.fshr.v8i64" => "__builtin_ia32_vpshrdv_v8di",
+        "llvm.fshr.v4i64" => "__builtin_ia32_vpshrdv_v4di",
+        "llvm.fshr.v2i64" => "__builtin_ia32_vpshrdv_v2di",
+        "llvm.fshr.v16i32" => "__builtin_ia32_vpshrdv_v16si",
+        "llvm.fshr.v8i32" => "__builtin_ia32_vpshrdv_v8si",
+        "llvm.fshr.v4i32" => "__builtin_ia32_vpshrdv_v4si",
+        "llvm.fshr.v32i16" => "__builtin_ia32_vpshrdv_v32hi",
+        "llvm.fshr.v16i16" => "__builtin_ia32_vpshrdv_v16hi",
+        "llvm.fshr.v8i16" => "__builtin_ia32_vpshrdv_v8hi",
+        "llvm.x86.rdrand.64" => "__builtin_ia32_rdrand64_step",
+
+        // The above doc points to unknown builtins for the following, so override them:
+        "llvm.x86.avx2.gather.d.d" => "__builtin_ia32_gathersiv4si",
+        "llvm.x86.avx2.gather.d.d.256" => "__builtin_ia32_gathersiv8si",
+        "llvm.x86.avx2.gather.d.ps" => "__builtin_ia32_gathersiv4sf",
+        "llvm.x86.avx2.gather.d.ps.256" => "__builtin_ia32_gathersiv8sf",
+        "llvm.x86.avx2.gather.d.q" => "__builtin_ia32_gathersiv2di",
+        "llvm.x86.avx2.gather.d.q.256" => "__builtin_ia32_gathersiv4di",
+        "llvm.x86.avx2.gather.d.pd" => "__builtin_ia32_gathersiv2df",
+        "llvm.x86.avx2.gather.d.pd.256" => "__builtin_ia32_gathersiv4df",
+        "llvm.x86.avx2.gather.q.d" => "__builtin_ia32_gatherdiv4si",
+        "llvm.x86.avx2.gather.q.d.256" => "__builtin_ia32_gatherdiv4si256",
+        "llvm.x86.avx2.gather.q.ps" => "__builtin_ia32_gatherdiv4sf",
+        "llvm.x86.avx2.gather.q.ps.256" => "__builtin_ia32_gatherdiv4sf256",
+        "llvm.x86.avx2.gather.q.q" => "__builtin_ia32_gatherdiv2di",
+        "llvm.x86.avx2.gather.q.q.256" => "__builtin_ia32_gatherdiv4di",
+        "llvm.x86.avx2.gather.q.pd" => "__builtin_ia32_gatherdiv2df",
+        "llvm.x86.avx2.gather.q.pd.256" => "__builtin_ia32_gatherdiv4df",
+        "llvm.x86.avx512.pslli.d.512" => "__builtin_ia32_pslldi512_mask",
+        "llvm.x86.avx512.psrli.d.512" => "__builtin_ia32_psrldi512_mask",
+        "llvm.x86.avx512.pslli.q.512" => "__builtin_ia32_psllqi512_mask",
+        "llvm.x86.avx512.psrli.q.512" => "__builtin_ia32_psrlqi512_mask",
+        "llvm.x86.avx512.psll.d.512" => "__builtin_ia32_pslld512_mask",
+        "llvm.x86.avx512.psrl.d.512" => "__builtin_ia32_psrld512_mask",
+        "llvm.x86.avx512.psll.q.512" => "__builtin_ia32_psllq512_mask",
+        "llvm.x86.avx512.psrl.q.512" => "__builtin_ia32_psrlq512_mask",
+        "llvm.x86.avx512.psra.d.512" => "__builtin_ia32_psrad512_mask",
+        "llvm.x86.avx512.psra.q.512" => "__builtin_ia32_psraq512_mask",
+        "llvm.x86.avx512.psra.q.256" => "__builtin_ia32_psraq256_mask",
+        "llvm.x86.avx512.psra.q.128" => "__builtin_ia32_psraq128_mask",
+        "llvm.x86.avx512.psrai.d.512" => "__builtin_ia32_psradi512_mask",
+        "llvm.x86.avx512.psrai.q.512" => "__builtin_ia32_psraqi512_mask",
+        "llvm.x86.avx512.psrai.q.256" => "__builtin_ia32_psraqi256_mask",
+        "llvm.x86.avx512.psrai.q.128" => "__builtin_ia32_psraqi128_mask",
+        "llvm.x86.avx512.psrav.d.512" => "__builtin_ia32_psrav16si_mask",
+        "llvm.x86.avx512.psrav.q.512" => "__builtin_ia32_psrav8di_mask",
+        "llvm.x86.avx512.psrav.q.256" => "__builtin_ia32_psravq256_mask",
+        "llvm.x86.avx512.psrav.q.128" => "__builtin_ia32_psravq128_mask",
+        "llvm.x86.avx512.psllv.d.512" => "__builtin_ia32_psllv16si_mask",
+        "llvm.x86.avx512.psrlv.d.512" => "__builtin_ia32_psrlv16si_mask",
+        "llvm.x86.avx512.psllv.q.512" => "__builtin_ia32_psllv8di_mask",
+        "llvm.x86.avx512.psrlv.q.512" => "__builtin_ia32_psrlv8di_mask",
+        "llvm.x86.avx512.permvar.si.512" => "__builtin_ia32_permvarsi512_mask",
+        "llvm.x86.avx512.vpermilvar.ps.512" => "__builtin_ia32_vpermilvarps512_mask",
+        "llvm.x86.avx512.vpermilvar.pd.512" => "__builtin_ia32_vpermilvarpd512_mask",
+        "llvm.x86.avx512.permvar.di.512" => "__builtin_ia32_permvardi512_mask",
+        "llvm.x86.avx512.permvar.di.256" => "__builtin_ia32_permvardi256_mask",
+        "llvm.x86.avx512.permvar.sf.512" => "__builtin_ia32_permvarsf512_mask",
+        "llvm.x86.avx512.permvar.df.512" => "__builtin_ia32_permvardf512_mask",
+        "llvm.x86.avx512.permvar.df.256" => "__builtin_ia32_permvardf256_mask",
+        "llvm.x86.avx512.vpermi2var.d.512" => "__builtin_ia32_vpermi2vard512_mask",
+        "llvm.x86.avx512.vpermi2var.d.256" => "__builtin_ia32_vpermi2vard256_mask",
+        "llvm.x86.avx512.vpermi2var.d.128" => "__builtin_ia32_vpermi2vard128_mask",
+        "llvm.x86.avx512.vpermi2var.q.512" => "__builtin_ia32_vpermi2varq512_mask",
+        "llvm.x86.avx512.vpermi2var.q.256" => "__builtin_ia32_vpermi2varq256_mask",
+        "llvm.x86.avx512.vpermi2var.q.128" => "__builtin_ia32_vpermi2varq128_mask",
+        "llvm.x86.avx512.vpermi2var.ps.512" => "__builtin_ia32_vpermi2varps512_mask",
+        "llvm.x86.avx512.vpermi2var.ps.256" => "__builtin_ia32_vpermi2varps256_mask",
+        "llvm.x86.avx512.vpermi2var.ps.128" => "__builtin_ia32_vpermi2varps128_mask",
+        "llvm.x86.avx512.vpermi2var.pd.512" => "__builtin_ia32_vpermi2varpd512_mask",
+        "llvm.x86.avx512.vpermi2var.pd.256" => "__builtin_ia32_vpermi2varpd256_mask",
+        "llvm.x86.avx512.vpermi2var.pd.128" => "__builtin_ia32_vpermi2varpd128_mask",
+        "llvm.x86.avx512.mask.add.ss.round" => "__builtin_ia32_addss_mask_round",
+        "llvm.x86.avx512.mask.add.sd.round" => "__builtin_ia32_addsd_mask_round",
+        "llvm.x86.avx512.mask.sub.ss.round" => "__builtin_ia32_subss_mask_round",
+        "llvm.x86.avx512.mask.sub.sd.round" => "__builtin_ia32_subsd_mask_round",
+        "llvm.x86.avx512.mask.mul.ss.round" => "__builtin_ia32_mulss_mask_round",
+        "llvm.x86.avx512.mask.mul.sd.round" => "__builtin_ia32_mulsd_mask_round",
+        "llvm.x86.avx512.mask.div.ss.round" => "__builtin_ia32_divss_mask_round",
+        "llvm.x86.avx512.mask.div.sd.round" => "__builtin_ia32_divsd_mask_round",
+        "llvm.x86.avx512.mask.cvtss2sd.round" => "__builtin_ia32_cvtss2sd_mask_round",
+        "llvm.x86.avx512.mask.cvtsd2ss.round" => "__builtin_ia32_cvtsd2ss_mask_round",
+        "llvm.x86.avx512.mask.range.ss" => "__builtin_ia32_rangess128_mask_round",
+        "llvm.x86.avx512.mask.range.sd" => "__builtin_ia32_rangesd128_mask_round",
+        "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_mask_round",
+        "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_mask_round",
+        "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_mask_round",
+        "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_mask_round",
+        "llvm.x86.avx512fp16.mask.add.sh.round" => "__builtin_ia32_addsh_mask_round",
+        "llvm.x86.avx512fp16.mask.div.sh.round" => "__builtin_ia32_divsh_mask_round",
+        "llvm.x86.avx512fp16.mask.getmant.sh" => "__builtin_ia32_getmantsh_mask_round",
+        "llvm.x86.avx512fp16.mask.max.sh.round" => "__builtin_ia32_maxsh_mask_round",
+        "llvm.x86.avx512fp16.mask.min.sh.round" => "__builtin_ia32_minsh_mask_round",
+        "llvm.x86.avx512fp16.mask.mul.sh.round" => "__builtin_ia32_mulsh_mask_round",
+        "llvm.x86.avx512fp16.mask.rndscale.sh" => "__builtin_ia32_rndscalesh_mask_round",
+        "llvm.x86.avx512fp16.mask.scalef.sh" => "__builtin_ia32_scalefsh_mask_round",
+        "llvm.x86.avx512fp16.mask.sub.sh.round" => "__builtin_ia32_subsh_mask_round",
+        "llvm.x86.avx512fp16.mask.vcvtsd2sh.round" => "__builtin_ia32_vcvtsd2sh_mask_round",
+        "llvm.x86.avx512fp16.mask.vcvtsh2sd.round" => "__builtin_ia32_vcvtsh2sd_mask_round",
+        "llvm.x86.avx512fp16.mask.vcvtsh2ss.round" => "__builtin_ia32_vcvtsh2ss_mask_round",
+        "llvm.x86.avx512fp16.mask.vcvtss2sh.round" => "__builtin_ia32_vcvtss2sh_mask_round",
+        "llvm.x86.aesni.aesenc.256" => "__builtin_ia32_vaesenc_v32qi",
+        "llvm.x86.aesni.aesenclast.256" => "__builtin_ia32_vaesenclast_v32qi",
+        "llvm.x86.aesni.aesdec.256" => "__builtin_ia32_vaesdec_v32qi",
+        "llvm.x86.aesni.aesdeclast.256" => "__builtin_ia32_vaesdeclast_v32qi",
+        "llvm.x86.aesni.aesenc.512" => "__builtin_ia32_vaesenc_v64qi",
+        "llvm.x86.aesni.aesenclast.512" => "__builtin_ia32_vaesenclast_v64qi",
+        "llvm.x86.aesni.aesdec.512" => "__builtin_ia32_vaesdec_v64qi",
+        "llvm.x86.aesni.aesdeclast.512" => "__builtin_ia32_vaesdeclast_v64qi",
+        "llvm.x86.avx512bf16.cvtne2ps2bf16.128" => "__builtin_ia32_cvtne2ps2bf16_v8bf",
+        "llvm.x86.avx512bf16.cvtne2ps2bf16.256" => "__builtin_ia32_cvtne2ps2bf16_v16bf",
+        "llvm.x86.avx512bf16.cvtne2ps2bf16.512" => "__builtin_ia32_cvtne2ps2bf16_v32bf",
+        "llvm.x86.avx512bf16.cvtneps2bf16.256" => "__builtin_ia32_cvtneps2bf16_v8sf",
+        "llvm.x86.avx512bf16.cvtneps2bf16.512" => "__builtin_ia32_cvtneps2bf16_v16sf",
+        "llvm.x86.avx512bf16.dpbf16ps.128" => "__builtin_ia32_dpbf16ps_v4sf",
+        "llvm.x86.avx512bf16.dpbf16ps.256" => "__builtin_ia32_dpbf16ps_v8sf",
+        "llvm.x86.avx512bf16.dpbf16ps.512" => "__builtin_ia32_dpbf16ps_v16sf",
+        "llvm.x86.pclmulqdq.512" => "__builtin_ia32_vpclmulqdq_v8di",
+        "llvm.x86.pclmulqdq.256" => "__builtin_ia32_vpclmulqdq_v4di",
+        "llvm.x86.avx512.pmulhu.w.512" => "__builtin_ia32_pmulhuw512_mask",
+        "llvm.x86.avx512.pmulh.w.512" => "__builtin_ia32_pmulhw512_mask",
+        "llvm.x86.avx512.pmul.hr.sw.512" => "__builtin_ia32_pmulhrsw512_mask",
+        "llvm.x86.avx512.pmaddw.d.512" => "__builtin_ia32_pmaddwd512_mask",
+        "llvm.x86.avx512.pmaddubs.w.512" => "__builtin_ia32_pmaddubsw512_mask",
+        "llvm.x86.avx512.packssdw.512" => "__builtin_ia32_packssdw512_mask",
+        "llvm.x86.avx512.packsswb.512" => "__builtin_ia32_packsswb512_mask",
+        "llvm.x86.avx512.packusdw.512" => "__builtin_ia32_packusdw512_mask",
+        "llvm.x86.avx512.packuswb.512" => "__builtin_ia32_packuswb512_mask",
+        "llvm.x86.avx512.pavg.w.512" => "__builtin_ia32_pavgw512_mask",
+        "llvm.x86.avx512.pavg.b.512" => "__builtin_ia32_pavgb512_mask",
+        "llvm.x86.avx512.psll.w.512" => "__builtin_ia32_psllw512_mask",
+        "llvm.x86.avx512.pslli.w.512" => "__builtin_ia32_psllwi512_mask",
+        "llvm.x86.avx512.psllv.w.512" => "__builtin_ia32_psllv32hi_mask",
+        "llvm.x86.avx512.psllv.w.256" => "__builtin_ia32_psllv16hi_mask",
+        "llvm.x86.avx512.psllv.w.128" => "__builtin_ia32_psllv8hi_mask",
+        "llvm.x86.avx512.psrl.w.512" => "__builtin_ia32_psrlw512_mask",
+        "llvm.x86.avx512.psrli.w.512" => "__builtin_ia32_psrlwi512_mask",
+        "llvm.x86.avx512.psrlv.w.512" => "__builtin_ia32_psrlv32hi_mask",
+        "llvm.x86.avx512.psrlv.w.256" => "__builtin_ia32_psrlv16hi_mask",
+        "llvm.x86.avx512.psrlv.w.128" => "__builtin_ia32_psrlv8hi_mask",
+        "llvm.x86.avx512.psra.w.512" => "__builtin_ia32_psraw512_mask",
+        "llvm.x86.avx512.psrai.w.512" => "__builtin_ia32_psrawi512_mask",
+        "llvm.x86.avx512.psrav.w.512" => "__builtin_ia32_psrav32hi_mask",
+        "llvm.x86.avx512.psrav.w.256" => "__builtin_ia32_psrav16hi_mask",
+        "llvm.x86.avx512.psrav.w.128" => "__builtin_ia32_psrav8hi_mask",
+        "llvm.x86.avx512.vpermi2var.hi.512" => "__builtin_ia32_vpermt2varhi512_mask",
+        "llvm.x86.avx512.vpermi2var.hi.256" => "__builtin_ia32_vpermt2varhi256_mask",
+        "llvm.x86.avx512.vpermi2var.hi.128" => "__builtin_ia32_vpermt2varhi128_mask",
+        "llvm.x86.avx512.permvar.hi.512" => "__builtin_ia32_permvarhi512_mask",
+        "llvm.x86.avx512.permvar.hi.256" => "__builtin_ia32_permvarhi256_mask",
+        "llvm.x86.avx512.permvar.hi.128" => "__builtin_ia32_permvarhi128_mask",
+        "llvm.x86.avx512.pshuf.b.512" => "__builtin_ia32_pshufb512_mask",
+        "llvm.x86.avx512.dbpsadbw.512" => "__builtin_ia32_dbpsadbw512_mask",
+        "llvm.x86.avx512.dbpsadbw.256" => "__builtin_ia32_dbpsadbw256_mask",
+        "llvm.x86.avx512.dbpsadbw.128" => "__builtin_ia32_dbpsadbw128_mask",
+        "llvm.x86.avx512.vpmadd52h.uq.512" => "__builtin_ia32_vpmadd52huq512_mask",
+        "llvm.x86.avx512.vpmadd52l.uq.512" => "__builtin_ia32_vpmadd52luq512_mask",
+        "llvm.x86.avx512.vpmadd52h.uq.256" => "__builtin_ia32_vpmadd52huq256",
+        "llvm.x86.avx512.vpmadd52l.uq.256" => "__builtin_ia32_vpmadd52luq256",
+        "llvm.x86.avx512.vpmadd52h.uq.128" => "__builtin_ia32_vpmadd52huq128",
+        "llvm.x86.avx512.vpdpwssd.512" => "__builtin_ia32_vpdpwssd_v16si",
+        "llvm.x86.avx512.vpdpwssd.256" => "__builtin_ia32_vpdpwssd_v8si",
+        "llvm.x86.avx512.vpdpwssd.128" => "__builtin_ia32_vpdpwssd_v4si",
+        "llvm.x86.avx512.vpdpwssds.512" => "__builtin_ia32_vpdpwssds_v16si",
+        "llvm.x86.avx512.vpdpwssds.256" => "__builtin_ia32_vpdpwssds_v8si",
+        "llvm.x86.avx512.vpdpwssds.128" => "__builtin_ia32_vpdpwssds_v4si",
+        "llvm.x86.avx512.vpdpbusd.512" => "__builtin_ia32_vpdpbusd_v16si",
+        "llvm.x86.avx512.vpdpbusd.256" => "__builtin_ia32_vpdpbusd_v8si",
+        "llvm.x86.avx512.vpdpbusd.128" => "__builtin_ia32_vpdpbusd_v4si",
+        "llvm.x86.avx512.vpdpbusds.512" => "__builtin_ia32_vpdpbusds_v16si",
+        "llvm.x86.avx512.vpdpbusds.256" => "__builtin_ia32_vpdpbusds_v8si",
+        "llvm.x86.avx512.vpdpbusds.128" => "__builtin_ia32_vpdpbusds_v4si",
+        "llvm.x86.xsave" => "__builtin_ia32_xsave",
+        "llvm.x86.xsave64" => "__builtin_ia32_xsave64",
+        "llvm.x86.xsaveopt" => "__builtin_ia32_xsaveopt",
+        "llvm.x86.xsaveopt64" => "__builtin_ia32_xsaveopt64",
+        "llvm.x86.avx512.mask.loadu.w.512" => "__builtin_ia32_loaddquhi512_mask",
+        "llvm.x86.avx512.mask.loadu.b.512" => "__builtin_ia32_loaddquqi512_mask",
+        "llvm.x86.avx512.mask.loadu.w.256" => "__builtin_ia32_loaddquhi256_mask",
+        "llvm.x86.avx512.mask.loadu.b.256" => "__builtin_ia32_loaddquqi256_mask",
+        "llvm.x86.avx512.mask.loadu.w.128" => "__builtin_ia32_loaddquhi128_mask",
+        "llvm.x86.avx512.mask.loadu.b.128" => "__builtin_ia32_loaddquqi128_mask",
+        "llvm.x86.avx512.mask.storeu.w.512" => "__builtin_ia32_storedquhi512_mask",
+        "llvm.x86.avx512.mask.storeu.b.512" => "__builtin_ia32_storedquqi512_mask",
+        "llvm.x86.avx512.mask.storeu.w.256" => "__builtin_ia32_storedquhi256_mask",
+        "llvm.x86.avx512.mask.storeu.b.256" => "__builtin_ia32_storedquqi256_mask",
+        "llvm.x86.avx512.mask.storeu.w.128" => "__builtin_ia32_storedquhi128_mask",
+        "llvm.x86.avx512.mask.storeu.b.128" => "__builtin_ia32_storedquqi128_mask",
+        "llvm.x86.avx512.mask.expand.load.w.512" => "__builtin_ia32_expandloadhi512_mask",
+        "llvm.x86.avx512.mask.expand.load.w.256" => "__builtin_ia32_expandloadhi256_mask",
+        "llvm.x86.avx512.mask.expand.load.w.128" => "__builtin_ia32_expandloadhi128_mask",
+        "llvm.x86.avx512.mask.expand.load.b.512" => "__builtin_ia32_expandloadqi512_mask",
+        "llvm.x86.avx512.mask.expand.load.b.256" => "__builtin_ia32_expandloadqi256_mask",
+        "llvm.x86.avx512.mask.expand.load.b.128" => "__builtin_ia32_expandloadqi128_mask",
+        "llvm.x86.avx512.sitofp.round.v8f64.v8i64" => "__builtin_ia32_cvtqq2pd512_mask",
+        "llvm.x86.avx512.sitofp.round.v2f64.v2i64" => "__builtin_ia32_cvtqq2pd128_mask",
+        "llvm.x86.avx512.sitofp.round.v4f64.v4i64" => "__builtin_ia32_cvtqq2pd256_mask",
+        "llvm.x86.avx512.sitofp.round.v8f32.v8i64" => "__builtin_ia32_cvtqq2ps512_mask",
+        "llvm.x86.avx512.sitofp.round.v4f32.v4i64" => "__builtin_ia32_cvtqq2ps256_mask",
+        "llvm.x86.avx512.uitofp.round.v8f64.v8u64" => "__builtin_ia32_cvtuqq2pd512_mask",
+        "llvm.x86.avx512.uitofp.round.v2f64.v2u64" => "__builtin_ia32_cvtuqq2pd128_mask",
+        "llvm.x86.avx512.uitofp.round.v4f64.v4u64" => "__builtin_ia32_cvtuqq2pd256_mask",
+        "llvm.x86.avx512.uitofp.round.v8f32.v8u64" => "__builtin_ia32_cvtuqq2ps512_mask",
+        "llvm.x86.avx512.uitofp.round.v4f32.v4u64" => "__builtin_ia32_cvtuqq2ps256_mask",
+        "llvm.x86.avx512.mask.reduce.pd.512" => "__builtin_ia32_reducepd512_mask_round",
+        "llvm.x86.avx512.mask.reduce.ps.512" => "__builtin_ia32_reduceps512_mask_round",
+        "llvm.x86.avx512.mask.reduce.sd" => "__builtin_ia32_reducesd_mask_round",
+        "llvm.x86.avx512.mask.reduce.ss" => "__builtin_ia32_reducess_mask_round",
+        "llvm.x86.avx512.mask.loadu.d.256" => "__builtin_ia32_loaddqusi256_mask",
+        "llvm.x86.avx512.mask.loadu.q.256" => "__builtin_ia32_loaddqudi256_mask",
+        "llvm.x86.avx512.mask.loadu.ps.256" => "__builtin_ia32_loadups256_mask",
+        "llvm.x86.avx512.mask.loadu.pd.256" => "__builtin_ia32_loadupd256_mask",
+        "llvm.x86.avx512.mask.loadu.d.128" => "__builtin_ia32_loaddqusi128_mask",
+        "llvm.x86.avx512.mask.loadu.q.128" => "__builtin_ia32_loaddqudi128_mask",
+        "llvm.x86.avx512.mask.loadu.ps.128" => "__builtin_ia32_loadups128_mask",
+        "llvm.x86.avx512.mask.loadu.pd.128" => "__builtin_ia32_loadupd128_mask",
+        "llvm.x86.avx512.mask.load.d.512" => "__builtin_ia32_movdqa32load512_mask",
+        "llvm.x86.avx512.mask.load.q.512" => "__builtin_ia32_movdqa64load512_mask",
+        "llvm.x86.avx512.mask.load.ps.512" => "__builtin_ia32_loadaps512_mask",
+        "llvm.x86.avx512.mask.load.pd.512" => "__builtin_ia32_loadapd512_mask",
+        "llvm.x86.avx512.mask.load.d.256" => "__builtin_ia32_movdqa32load256_mask",
+        "llvm.x86.avx512.mask.load.q.256" => "__builtin_ia32_movdqa64load256_mask",
+        "llvm.x86.avx512fp16.mask.cmp.sh" => "__builtin_ia32_cmpsh_mask_round",
+        "llvm.x86.avx512fp16.vcomi.sh" => "__builtin_ia32_cmpsh_mask_round",
+        "llvm.x86.avx512fp16.add.ph.512" => "__builtin_ia32_addph512_mask_round",
+        "llvm.x86.avx512fp16.sub.ph.512" => "__builtin_ia32_subph512_mask_round",
+        "llvm.x86.avx512fp16.mul.ph.512" => "__builtin_ia32_mulph512_mask_round",
+        "llvm.x86.avx512fp16.div.ph.512" => "__builtin_ia32_divph512_mask_round",
+        "llvm.x86.avx512fp16.mask.vfmul.cph.512" => "__builtin_ia32_vfmulcph512_mask_round",
+        "llvm.x86.avx512fp16.mask.vfmul.csh" => "__builtin_ia32_vfmulcsh_mask_round",
+        "llvm.x86.avx512fp16.mask.vfcmul.cph.512" => "__builtin_ia32_vfcmulcph512_mask_round",
+        "llvm.x86.avx512fp16.mask.vfcmul.csh" => "__builtin_ia32_vfcmulcsh_mask_round",
+        "llvm.x86.avx512fp16.mask.vfmadd.cph.512" => "__builtin_ia32_vfmaddcph512_mask3_round",
+        "llvm.x86.avx512fp16.maskz.vfmadd.cph.512" => "__builtin_ia32_vfmaddcph512_maskz_round",
+        "llvm.x86.avx512fp16.mask.vfmadd.csh" => "__builtin_ia32_vfmaddcsh_mask3_round",
+        "llvm.x86.avx512fp16.maskz.vfmadd.csh" => "__builtin_ia32_vfmaddcsh_maskz_round",
+        "llvm.x86.avx512fp16.mask.vfcmadd.cph.512" => "__builtin_ia32_vfcmaddcph512_mask3_round",
+        "llvm.x86.avx512fp16.maskz.vfcmadd.cph.512" => "__builtin_ia32_vfcmaddcph512_maskz_round",
+        "llvm.x86.avx512fp16.mask.vfcmadd.csh" => "__builtin_ia32_vfcmaddcsh_mask3_round",
+        "llvm.x86.avx512fp16.maskz.vfcmadd.csh" => "__builtin_ia32_vfcmaddcsh_maskz_round",
+        "llvm.x86.avx512fp16.vfmadd.ph.512" => "__builtin_ia32_vfmaddph512_mask",
+        "llvm.x86.avx512fp16.vcvtsi642sh" => "__builtin_ia32_vcvtsi2sh64_round",
+        "llvm.x86.avx512fp16.vcvtusi642sh" => "__builtin_ia32_vcvtusi2sh64_round",
+        "llvm.x86.avx512fp16.vcvtsh2si64" => "__builtin_ia32_vcvtsh2si64_round",
+        "llvm.x86.avx512fp16.vcvtsh2usi64" => "__builtin_ia32_vcvtsh2usi64_round",
+        "llvm.x86.avx512fp16.vcvttsh2si64" => "__builtin_ia32_vcvttsh2si64_round",
+        "llvm.x86.avx512fp16.vcvttsh2usi64" => "__builtin_ia32_vcvttsh2usi64_round",
+        "llvm.x86.avx512.mask.load.ps.256" => "__builtin_ia32_loadaps256_mask",
+        "llvm.x86.avx512.mask.load.pd.256" => "__builtin_ia32_loadapd256_mask",
+        "llvm.x86.avx512.mask.load.d.128" => "__builtin_ia32_movdqa32load128_mask",
+        "llvm.x86.avx512.mask.load.q.128" => "__builtin_ia32_movdqa64load128_mask",
+        "llvm.x86.avx512.mask.load.ps.128" => "__builtin_ia32_loadaps128_mask",
+        "llvm.x86.avx512.mask.load.pd.128" => "__builtin_ia32_loadapd128_mask",
+        "llvm.x86.avx512.mask.storeu.d.256" => "__builtin_ia32_storedqusi256_mask",
+        "llvm.x86.avx512.mask.storeu.q.256" => "__builtin_ia32_storedqudi256_mask",
+        "llvm.x86.avx512.mask.storeu.ps.256" => "__builtin_ia32_storeups256_mask",
+        "llvm.x86.avx512.mask.storeu.pd.256" => "__builtin_ia32_storeupd256_mask",
+        "llvm.x86.avx512.mask.storeu.d.128" => "__builtin_ia32_storedqusi128_mask",
+        "llvm.x86.avx512.mask.storeu.q.128" => "__builtin_ia32_storedqudi128_mask",
+        "llvm.x86.avx512.mask.storeu.ps.128" => "__builtin_ia32_storeups128_mask",
+        "llvm.x86.avx512.mask.storeu.pd.128" => "__builtin_ia32_storeupd128_mask",
+        "llvm.x86.avx512.mask.store.d.512" => "__builtin_ia32_movdqa32store512_mask",
+        "llvm.x86.avx512.mask.store.q.512" => "__builtin_ia32_movdqa64store512_mask",
+        "llvm.x86.avx512.mask.store.ps.512" => "__builtin_ia32_storeaps512_mask",
+        "llvm.x86.avx512.mask.store.pd.512" => "__builtin_ia32_storeapd512_mask",
+        "llvm.x86.avx512.mask.store.d.256" => "__builtin_ia32_movdqa32store256_mask",
+        "llvm.x86.avx512.mask.store.q.256" => "__builtin_ia32_movdqa64store256_mask",
+        "llvm.x86.avx512.mask.store.ps.256" => "__builtin_ia32_storeaps256_mask",
+        "llvm.x86.avx512.mask.store.pd.256" => "__builtin_ia32_storeapd256_mask",
+        "llvm.x86.avx512.mask.store.d.128" => "__builtin_ia32_movdqa32store128_mask",
+        "llvm.x86.avx512.mask.store.q.128" => "__builtin_ia32_movdqa64store128_mask",
+        "llvm.x86.avx512.mask.store.ps.128" => "__builtin_ia32_storeaps128_mask",
+        "llvm.x86.avx512.mask.store.pd.128" => "__builtin_ia32_storeapd128_mask",
+        "llvm.x86.avx512fp16.vfmadd.f16" => "__builtin_ia32_vfmaddsh3_mask",
+        "llvm.x86.avx512fp16.vfmaddsub.ph.128" => "__builtin_ia32_vfmaddsubph128_mask",
+        "llvm.x86.avx512fp16.vfmaddsub.ph.256" => "__builtin_ia32_vfmaddsubph256_mask",
+        "llvm.x86.avx512fp16.vfmaddsub.ph.512" => "__builtin_ia32_vfmaddsubph512_mask",
+        "llvm.x86.avx512fp16.sqrt.ph.512" => "__builtin_ia32_sqrtph512_mask_round",
+        "llvm.x86.avx512fp16.mask.sqrt.sh" => "__builtin_ia32_sqrtsh_mask_round",
+        "llvm.x86.avx512fp16.max.ph.128" => "__builtin_ia32_maxph128_mask",
+        "llvm.x86.avx512fp16.max.ph.256" => "__builtin_ia32_maxph256_mask",
+        "llvm.x86.avx512fp16.max.ph.512" => "__builtin_ia32_maxph512_mask_round",
+        "llvm.x86.avx512fp16.min.ph.128" => "__builtin_ia32_minph128_mask",
+        "llvm.x86.avx512fp16.min.ph.256" => "__builtin_ia32_minph256_mask",
+        "llvm.x86.avx512fp16.min.ph.512" => "__builtin_ia32_minph512_mask_round",
+        "llvm.x86.avx512fp16.mask.getexp.sh" => "__builtin_ia32_getexpsh_mask_round",
+        "llvm.x86.avx512fp16.mask.rndscale.ph.128" => "__builtin_ia32_rndscaleph128_mask",
+        "llvm.x86.avx512fp16.mask.rndscale.ph.256" => "__builtin_ia32_rndscaleph256_mask",
+        "llvm.x86.avx512fp16.mask.rndscale.ph.512" => "__builtin_ia32_rndscaleph512_mask_round",
+        "llvm.x86.avx512fp16.mask.scalef.ph.512" => "__builtin_ia32_scalefph512_mask_round",
+        "llvm.x86.avx512fp16.mask.reduce.ph.512" => "__builtin_ia32_reduceph512_mask_round",
+        "llvm.x86.avx512fp16.mask.reduce.sh" => "__builtin_ia32_reducesh_mask_round",
+        "llvm.x86.avx512.sitofp.round.v8f16.v8i16" => "__builtin_ia32_vcvtw2ph128_mask",
+        "llvm.x86.avx512.sitofp.round.v16f16.v16i16" => "__builtin_ia32_vcvtw2ph256_mask",
+        "llvm.x86.avx512.sitofp.round.v32f16.v32i16" => "__builtin_ia32_vcvtw2ph512_mask_round",
+        "llvm.x86.avx512.uitofp.round.v8f16.v8u16" => "__builtin_ia32_vcvtuw2ph128_mask",
+        "llvm.x86.avx512.uitofp.round.v16f16.v16u16" => "__builtin_ia32_vcvtuw2ph256_mask",
+        "llvm.x86.avx512.uitofp.round.v32f16.v32u16" => "__builtin_ia32_vcvtuw2ph512_mask_round",
+        "llvm.x86.avx512.sitofp.round.v8f16.v8i32" => "__builtin_ia32_vcvtdq2ph256_mask",
+        "llvm.x86.avx512.sitofp.round.v16f16.v16i32" => "__builtin_ia32_vcvtdq2ph512_mask_round",
+        "llvm.x86.avx512fp16.vcvtsi2sh" => "__builtin_ia32_vcvtsi2sh32_round",
+        "llvm.x86.avx512.uitofp.round.v8f16.v8u32" => "__builtin_ia32_vcvtudq2ph256_mask",
+        "llvm.x86.avx512.uitofp.round.v16f16.v16u32" => "__builtin_ia32_vcvtudq2ph512_mask_round",
+        "llvm.x86.avx512fp16.vcvtusi2sh" => "__builtin_ia32_vcvtusi2sh32_round",
+        "llvm.x86.avx512.sitofp.round.v8f16.v8i64" => "__builtin_ia32_vcvtqq2ph512_mask_round",
+        "llvm.x86.avx512.uitofp.round.v8f16.v8u64" => "__builtin_ia32_vcvtuqq2ph512_mask_round",
+        "llvm.x86.avx512fp16.mask.vcvtps2phx.512" => "__builtin_ia32_vcvtps2phx512_mask_round",
+        "llvm.x86.avx512fp16.mask.vcvtpd2ph.512" => "__builtin_ia32_vcvtpd2ph512_mask_round",
+        "llvm.x86.avx512fp16.mask.vcvtph2uw.512" => "__builtin_ia32_vcvtph2uw512_mask_round",
+        "llvm.x86.avx512fp16.mask.vcvttph2w.512" => "__builtin_ia32_vcvttph2w512_mask_round",
+        "llvm.x86.avx512fp16.mask.vcvttph2uw.512" => "__builtin_ia32_vcvttph2uw512_mask_round",
+        "llvm.x86.avx512fp16.mask.vcvtph2dq.512" => "__builtin_ia32_vcvtph2dq512_mask_round",
+        "llvm.x86.avx512fp16.vcvtsh2si32" => "__builtin_ia32_vcvtsh2si32_round",
+        "llvm.x86.avx512fp16.mask.vcvtph2udq.512" => "__builtin_ia32_vcvtph2udq512_mask_round",
+        "llvm.x86.avx512fp16.vcvtsh2usi32" => "__builtin_ia32_vcvtsh2usi32_round",
+        "llvm.x86.avx512fp16.mask.vcvttph2dq.512" => "__builtin_ia32_vcvttph2dq512_mask_round",
+        "llvm.x86.avx512fp16.vcvttsh2si32" => "__builtin_ia32_vcvttsh2si32_round",
+        "llvm.x86.avx512fp16.mask.vcvttph2udq.512" => "__builtin_ia32_vcvttph2udq512_mask_round",
+        "llvm.x86.avx512fp16.vcvttsh2usi32" => "__builtin_ia32_vcvttsh2usi32_round",
+        "llvm.x86.avx512fp16.mask.vcvtph2qq.512" => "__builtin_ia32_vcvtph2qq512_mask_round",
+        "llvm.x86.avx512fp16.mask.vcvtph2uqq.512" => "__builtin_ia32_vcvtph2uqq512_mask_round",
+        "llvm.x86.avx512fp16.mask.vcvttph2qq.512" => "__builtin_ia32_vcvttph2qq512_mask_round",
+        "llvm.x86.avx512fp16.mask.vcvttph2uqq.512" => "__builtin_ia32_vcvttph2uqq512_mask_round",
+        "llvm.x86.avx512fp16.mask.vcvtph2psx.512" => "__builtin_ia32_vcvtph2psx512_mask_round",
+        "llvm.x86.avx512fp16.mask.vcvtph2pd.512" => "__builtin_ia32_vcvtph2pd512_mask_round",
+        "llvm.x86.avx512fp16.mask.vfcmadd.cph.256" => "__builtin_ia32_vfcmaddcph256_mask3",
+        "llvm.x86.avx512fp16.mask.vfmadd.cph.256" => "__builtin_ia32_vfmaddcph256_mask3",
+        "llvm.x86.avx512fp16.mask.vfcmadd.cph.128" => "__builtin_ia32_vfcmaddcph128_mask3",
+        "llvm.x86.avx512fp16.mask.vfmadd.cph.128" => "__builtin_ia32_vfmaddcph128_mask3",
+        "llvm.x86.encodekey128" => "__builtin_ia32_encodekey128_u32",
+        "llvm.x86.encodekey256" => "__builtin_ia32_encodekey256_u32",
+        "llvm.x86.aesenc128kl" => "__builtin_ia32_aesenc128kl_u8",
+        "llvm.x86.aesdec128kl" => "__builtin_ia32_aesdec128kl_u8",
+        "llvm.x86.aesenc256kl" => "__builtin_ia32_aesenc256kl_u8",
+        "llvm.x86.aesdec256kl" => "__builtin_ia32_aesdec256kl_u8",
+        "llvm.x86.aesencwide128kl" => "__builtin_ia32_aesencwide128kl_u8",
+        "llvm.x86.aesdecwide128kl" => "__builtin_ia32_aesdecwide128kl_u8",
+        "llvm.x86.aesencwide256kl" => "__builtin_ia32_aesencwide256kl_u8",
+        "llvm.x86.aesdecwide256kl" => "__builtin_ia32_aesdecwide256kl_u8",
+
+        // TODO: support the tile builtins:
+        "llvm.x86.ldtilecfg" => "__builtin_trap",
+        "llvm.x86.sttilecfg" => "__builtin_trap",
+        "llvm.x86.tileloadd64" => "__builtin_trap",
+        "llvm.x86.tilerelease" => "__builtin_trap",
+        "llvm.x86.tilestored64" => "__builtin_trap",
+        "llvm.x86.tileloaddt164" => "__builtin_trap",
+        "llvm.x86.tilezero" => "__builtin_trap",
+        "llvm.x86.tdpbf16ps" => "__builtin_trap",
+        "llvm.x86.tdpbssd" => "__builtin_trap",
+        "llvm.x86.tdpbsud" => "__builtin_trap",
+        "llvm.x86.tdpbusd" => "__builtin_trap",
+        "llvm.x86.tdpbuud" => "__builtin_trap",
+        "llvm.x86.tdpfp16ps" => "__builtin_trap",
+        "llvm.x86.tcmmimfp16ps" => "__builtin_trap",
+        "llvm.x86.tcmmrlfp16ps" => "__builtin_trap",
+
+        // NOTE: this file is generated by https://github.com/GuillaumeGomez/llvmint/blob/master/generate_list.py
+        _ => include!("archs.rs"),
+    };
+
+    let func = cx.context.get_target_builtin_function(gcc_name);
+    cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
+    func
+}
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
new file mode 100644
index 00000000000..d22f4229e23
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
@@ -0,0 +1,1292 @@
+pub mod llvm;
+mod simd;
+
+#[cfg(feature = "master")]
+use std::iter;
+
+#[cfg(feature = "master")]
+use gccjit::FunctionType;
+use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp};
+#[cfg(feature = "master")]
+use rustc_abi::ExternAbi;
+use rustc_abi::{BackendRepr, HasDataLayout};
+use rustc_codegen_ssa::MemFlags;
+use rustc_codegen_ssa::base::wants_msvc_seh;
+use rustc_codegen_ssa::common::IntPredicate;
+use rustc_codegen_ssa::errors::InvalidMonomorphization;
+use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
+use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
+#[cfg(feature = "master")]
+use rustc_codegen_ssa::traits::MiscCodegenMethods;
+use rustc_codegen_ssa::traits::{
+    ArgAbiBuilderMethods, BaseTypeCodegenMethods, BuilderMethods, ConstCodegenMethods,
+    IntrinsicCallBuilderMethods,
+};
+use rustc_middle::bug;
+#[cfg(feature = "master")]
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
+use rustc_middle::ty::layout::{HasTypingEnv, LayoutOf};
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_span::{Span, Symbol, sym};
+use rustc_target::callconv::{ArgAbi, FnAbi, PassMode};
+use rustc_target::spec::PanicStrategy;
+
+#[cfg(feature = "master")]
+use crate::abi::FnAbiGccExt;
+use crate::abi::GccType;
+use crate::builder::Builder;
+use crate::common::{SignType, TypeReflection};
+use crate::context::CodegenCx;
+use crate::intrinsic::simd::generic_simd_intrinsic;
+use crate::type_of::LayoutGccExt;
+
+fn get_simple_intrinsic<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    name: Symbol,
+) -> Option<Function<'gcc>> {
+    let gcc_name = match name {
+        sym::sqrtf32 => "sqrtf",
+        sym::sqrtf64 => "sqrt",
+        sym::powif32 => "__builtin_powif",
+        sym::powif64 => "__builtin_powi",
+        sym::sinf32 => "sinf",
+        sym::sinf64 => "sin",
+        sym::cosf32 => "cosf",
+        sym::cosf64 => "cos",
+        sym::powf32 => "powf",
+        sym::powf64 => "pow",
+        sym::expf32 => "expf",
+        sym::expf64 => "exp",
+        sym::exp2f32 => "exp2f",
+        sym::exp2f64 => "exp2",
+        sym::logf32 => "logf",
+        sym::logf64 => "log",
+        sym::log10f32 => "log10f",
+        sym::log10f64 => "log10",
+        sym::log2f32 => "log2f",
+        sym::log2f64 => "log2",
+        sym::fmaf32 => "fmaf",
+        sym::fmaf64 => "fma",
+        // FIXME: calling `fma` from libc without FMA target feature uses expensive software emulation
+        sym::fmuladdf32 => "fmaf", // TODO: use gcc intrinsic analogous to llvm.fmuladd.f32
+        sym::fmuladdf64 => "fma",  // TODO: use gcc intrinsic analogous to llvm.fmuladd.f64
+        sym::fabsf32 => "fabsf",
+        sym::fabsf64 => "fabs",
+        sym::minnumf32 => "fminf",
+        sym::minnumf64 => "fmin",
+        sym::maxnumf32 => "fmaxf",
+        sym::maxnumf64 => "fmax",
+        sym::copysignf32 => "copysignf",
+        sym::copysignf64 => "copysign",
+        sym::copysignf128 => "copysignl",
+        sym::floorf32 => "floorf",
+        sym::floorf64 => "floor",
+        sym::ceilf32 => "ceilf",
+        sym::ceilf64 => "ceil",
+        sym::truncf32 => "truncf",
+        sym::truncf64 => "trunc",
+        // We match the LLVM backend and lower this to `rint`.
+        sym::round_ties_even_f32 => "rintf",
+        sym::round_ties_even_f64 => "rint",
+        sym::roundf32 => "roundf",
+        sym::roundf64 => "round",
+        sym::abort => "abort",
+        _ => return None,
+    };
+    Some(cx.context.get_builtin_function(gcc_name))
+}
+
+impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+    fn codegen_intrinsic_call(
+        &mut self,
+        instance: Instance<'tcx>,
+        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+        args: &[OperandRef<'tcx, RValue<'gcc>>],
+        llresult: RValue<'gcc>,
+        span: Span,
+    ) -> Result<(), Instance<'tcx>> {
+        let tcx = self.tcx;
+        let callee_ty = instance.ty(tcx, self.typing_env());
+
+        let (def_id, fn_args) = match *callee_ty.kind() {
+            ty::FnDef(def_id, fn_args) => (def_id, fn_args),
+            _ => bug!("expected fn item type, found {}", callee_ty),
+        };
+
+        let sig = callee_ty.fn_sig(tcx);
+        let sig = tcx.normalize_erasing_late_bound_regions(self.typing_env(), sig);
+        let arg_tys = sig.inputs();
+        let ret_ty = sig.output();
+        let name = tcx.item_name(def_id);
+        let name_str = name.as_str();
+
+        let llret_ty = self.layout_of(ret_ty).gcc_type(self);
+        let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
+
+        let simple = get_simple_intrinsic(self, name);
+
+        // FIXME(tempdragon): Re-enable `clippy::suspicious_else_formatting` if the following issue is solved:
+        // https://github.com/rust-lang/rust-clippy/issues/12497
+        // and leave `else if use_integer_compare` to be placed "as is".
+        #[allow(clippy::suspicious_else_formatting)]
+        let value = match name {
+            _ if simple.is_some() => {
+                let func = simple.expect("simple function");
+                self.cx.context.new_call(
+                    self.location,
+                    func,
+                    &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
+                )
+            }
+            sym::fmaf16 => {
+                // TODO(antoyo): use the correct builtin for f16.
+                let func = self.cx.context.get_builtin_function("fmaf");
+                let args: Vec<_> = args
+                    .iter()
+                    .map(|arg| {
+                        self.cx.context.new_cast(self.location, arg.immediate(), self.cx.type_f32())
+                    })
+                    .collect();
+                let result = self.cx.context.new_call(self.location, func, &args);
+                self.cx.context.new_cast(self.location, result, self.cx.type_f16())
+            }
+            sym::is_val_statically_known => {
+                let a = args[0].immediate();
+                let builtin = self.context.get_builtin_function("__builtin_constant_p");
+                let res = self.context.new_call(None, builtin, &[a]);
+                self.icmp(IntPredicate::IntEQ, res, self.const_i32(0))
+            }
+            sym::catch_unwind => {
+                try_intrinsic(
+                    self,
+                    args[0].immediate(),
+                    args[1].immediate(),
+                    args[2].immediate(),
+                    llresult,
+                );
+                return Ok(());
+            }
+            sym::breakpoint => {
+                unimplemented!();
+            }
+            sym::va_copy => {
+                unimplemented!();
+            }
+            sym::va_arg => {
+                unimplemented!();
+            }
+
+            sym::volatile_load | sym::unaligned_volatile_load => {
+                let tp_ty = fn_args.type_at(0);
+                let ptr = args[0].immediate();
+                let layout = self.layout_of(tp_ty);
+                let load = if let PassMode::Cast { cast: ref ty, pad_i32: _ } = fn_abi.ret.mode {
+                    let gcc_ty = ty.gcc_type(self);
+                    self.volatile_load(gcc_ty, ptr)
+                } else {
+                    self.volatile_load(layout.gcc_type(self), ptr)
+                };
+                // TODO(antoyo): set alignment.
+                if let BackendRepr::Scalar(scalar) = layout.backend_repr {
+                    self.to_immediate_scalar(load, scalar)
+                } else {
+                    load
+                }
+            }
+            sym::volatile_store => {
+                let dst = args[0].deref(self.cx());
+                args[1].val.volatile_store(self, dst);
+                return Ok(());
+            }
+            sym::unaligned_volatile_store => {
+                let dst = args[0].deref(self.cx());
+                args[1].val.unaligned_volatile_store(self, dst);
+                return Ok(());
+            }
+            sym::prefetch_read_data
+            | sym::prefetch_write_data
+            | sym::prefetch_read_instruction
+            | sym::prefetch_write_instruction => {
+                unimplemented!();
+            }
+            sym::ctlz
+            | sym::ctlz_nonzero
+            | sym::cttz
+            | sym::cttz_nonzero
+            | sym::ctpop
+            | sym::bswap
+            | sym::bitreverse
+            | sym::rotate_left
+            | sym::rotate_right
+            | sym::saturating_add
+            | sym::saturating_sub => {
+                let ty = arg_tys[0];
+                match int_type_width_signed(ty, self) {
+                    Some((width, signed)) => match name {
+                        sym::ctlz | sym::cttz => {
+                            let func = self.current_func.borrow().expect("func");
+                            let then_block = func.new_block("then");
+                            let else_block = func.new_block("else");
+                            let after_block = func.new_block("after");
+
+                            let arg = args[0].immediate();
+                            let result = func.new_local(None, self.u32_type, "zeros");
+                            let zero = self.cx.gcc_zero(arg.get_type());
+                            let cond = self.gcc_icmp(IntPredicate::IntEQ, arg, zero);
+                            self.llbb().end_with_conditional(None, cond, then_block, else_block);
+
+                            let zero_result = self.cx.gcc_uint(self.u32_type, width);
+                            then_block.add_assignment(None, result, zero_result);
+                            then_block.end_with_jump(None, after_block);
+
+                            // NOTE: since jumps were added in a place
+                            // count_leading_zeroes() does not expect, the current block
+                            // in the state need to be updated.
+                            self.switch_to_block(else_block);
+
+                            let zeros = match name {
+                                sym::ctlz => self.count_leading_zeroes(width, arg),
+                                sym::cttz => self.count_trailing_zeroes(width, arg),
+                                _ => unreachable!(),
+                            };
+                            self.llbb().add_assignment(None, result, zeros);
+                            self.llbb().end_with_jump(None, after_block);
+
+                            // NOTE: since jumps were added in a place rustc does not
+                            // expect, the current block in the state need to be updated.
+                            self.switch_to_block(after_block);
+
+                            result.to_rvalue()
+                        }
+                        sym::ctlz_nonzero => self.count_leading_zeroes(width, args[0].immediate()),
+                        sym::cttz_nonzero => self.count_trailing_zeroes(width, args[0].immediate()),
+                        sym::ctpop => self.pop_count(args[0].immediate()),
+                        sym::bswap => {
+                            if width == 8 {
+                                args[0].immediate() // byte swap a u8/i8 is just a no-op
+                            } else {
+                                self.gcc_bswap(args[0].immediate(), width)
+                            }
+                        }
+                        sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
+                        sym::rotate_left | sym::rotate_right => {
+                            // TODO(antoyo): implement using algorithm from:
+                            // https://blog.regehr.org/archives/1063
+                            // for other platforms.
+                            let is_left = name == sym::rotate_left;
+                            let val = args[0].immediate();
+                            let raw_shift = args[1].immediate();
+                            if is_left {
+                                self.rotate_left(val, raw_shift, width)
+                            } else {
+                                self.rotate_right(val, raw_shift, width)
+                            }
+                        }
+                        sym::saturating_add => self.saturating_add(
+                            args[0].immediate(),
+                            args[1].immediate(),
+                            signed,
+                            width,
+                        ),
+                        sym::saturating_sub => self.saturating_sub(
+                            args[0].immediate(),
+                            args[1].immediate(),
+                            signed,
+                            width,
+                        ),
+                        _ => bug!(),
+                    },
+                    None => {
+                        tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
+                            span,
+                            name,
+                            ty,
+                        });
+                        return Ok(());
+                    }
+                }
+            }
+
+            sym::raw_eq => {
+                use rustc_abi::BackendRepr::*;
+                let tp_ty = fn_args.type_at(0);
+                let layout = self.layout_of(tp_ty).layout;
+                let _use_integer_compare = match layout.backend_repr() {
+                    Scalar(_) | ScalarPair(_, _) => true,
+                    SimdVector { .. } => false,
+                    Memory { .. } => {
+                        // For rusty ABIs, small aggregates are actually passed
+                        // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
+                        // so we re-use that same threshold here.
+                        layout.size() <= self.data_layout().pointer_size * 2
+                    }
+                };
+
+                let a = args[0].immediate();
+                let b = args[1].immediate();
+                if layout.size().bytes() == 0 {
+                    self.const_bool(true)
+                }
+                /*else if use_integer_compare {
+                    let integer_ty = self.type_ix(layout.size.bits()); // FIXME(antoyo): LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
+                    let ptr_ty = self.type_ptr_to(integer_ty);
+                    let a_ptr = self.bitcast(a, ptr_ty);
+                    let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
+                    let b_ptr = self.bitcast(b, ptr_ty);
+                    let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
+                    self.icmp(IntPredicate::IntEQ, a_val, b_val)
+                }*/
+                else {
+                    let void_ptr_type = self.context.new_type::<*const ()>();
+                    let a_ptr = self.bitcast(a, void_ptr_type);
+                    let b_ptr = self.bitcast(b, void_ptr_type);
+                    let n = self.context.new_cast(
+                        None,
+                        self.const_usize(layout.size().bytes()),
+                        self.sizet_type,
+                    );
+                    let builtin = self.context.get_builtin_function("memcmp");
+                    let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
+                    self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0))
+                }
+            }
+
+            sym::compare_bytes => {
+                let a = args[0].immediate();
+                let b = args[1].immediate();
+                let n = args[2].immediate();
+
+                let void_ptr_type = self.context.new_type::<*const ()>();
+                let a_ptr = self.bitcast(a, void_ptr_type);
+                let b_ptr = self.bitcast(b, void_ptr_type);
+
+                // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
+                let builtin = self.context.get_builtin_function("memcmp");
+                let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
+                self.sext(cmp, self.type_ix(32))
+            }
+
+            sym::black_box => {
+                args[0].val.store(self, result);
+
+                let block = self.llbb();
+                let extended_asm = block.add_extended_asm(None, "");
+                extended_asm.add_input_operand(None, "r", result.val.llval);
+                extended_asm.add_clobber("memory");
+                extended_asm.set_volatile_flag(true);
+
+                // We have copied the value to `result` already.
+                return Ok(());
+            }
+
+            sym::ptr_mask => {
+                let usize_type = self.context.new_type::<usize>();
+                let void_ptr_type = self.context.new_type::<*const ()>();
+
+                let ptr = args[0].immediate();
+                let mask = args[1].immediate();
+
+                let addr = self.bitcast(ptr, usize_type);
+                let masked = self.and(addr, mask);
+                self.bitcast(masked, void_ptr_type)
+            }
+
+            _ if name_str.starts_with("simd_") => {
+                match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
+                    Ok(value) => value,
+                    Err(()) => return Ok(()),
+                }
+            }
+
+            // Fall back to default body
+            _ => return Err(Instance::new(instance.def_id(), instance.args)),
+        };
+
+        if !fn_abi.ret.is_ignore() {
+            if let PassMode::Cast { cast: ref ty, .. } = fn_abi.ret.mode {
+                let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
+                let ptr = self.pointercast(result.val.llval, ptr_llty);
+                self.store(value, ptr, result.val.align);
+            } else {
+                OperandRef::from_immediate_or_packed_pair(self, value, result.layout)
+                    .val
+                    .store(self, result);
+            }
+        }
+        Ok(())
+    }
+
+    fn abort(&mut self) {
+        let func = self.context.get_builtin_function("abort");
+        let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };
+        self.call(self.type_void(), None, None, func, &[], None, None);
+    }
+
+    fn assume(&mut self, value: Self::Value) {
+        // TODO(antoyo): switch to assume when it exists.
+        // Or use something like this:
+        // #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0)
+        self.expect(value, true);
+    }
+
+    fn expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value {
+        // TODO(antoyo)
+        cond
+    }
+
+    fn type_test(&mut self, _pointer: Self::Value, _typeid: Self::Value) -> Self::Value {
+        // Unsupported.
+        self.context.new_rvalue_from_int(self.int_type, 0)
+    }
+
+    fn type_checked_load(
+        &mut self,
+        _llvtable: Self::Value,
+        _vtable_byte_offset: u64,
+        _typeid: Self::Value,
+    ) -> Self::Value {
+        // Unsupported.
+        self.context.new_rvalue_from_int(self.int_type, 0)
+    }
+
+    fn va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+}
+
+impl<'a, 'gcc, 'tcx> ArgAbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+    fn store_fn_arg(
+        &mut self,
+        arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+        idx: &mut usize,
+        dst: PlaceRef<'tcx, Self::Value>,
+    ) {
+        arg_abi.store_fn_arg(self, idx, dst)
+    }
+
+    fn store_arg(
+        &mut self,
+        arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+        val: RValue<'gcc>,
+        dst: PlaceRef<'tcx, RValue<'gcc>>,
+    ) {
+        arg_abi.store(self, val, dst)
+    }
+
+    fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
+        arg_abi.memory_ty(self)
+    }
+}
+
+pub trait ArgAbiExt<'gcc, 'tcx> {
+    fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+    fn store(
+        &self,
+        bx: &mut Builder<'_, 'gcc, 'tcx>,
+        val: RValue<'gcc>,
+        dst: PlaceRef<'tcx, RValue<'gcc>>,
+    );
+    fn store_fn_arg(
+        &self,
+        bx: &mut Builder<'_, 'gcc, 'tcx>,
+        idx: &mut usize,
+        dst: PlaceRef<'tcx, RValue<'gcc>>,
+    );
+}
+
+impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
+    /// Gets the LLVM type for a place of the original Rust type of
+    /// this argument/return, i.e., the result of `type_of::type_of`.
+    fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+        self.layout.gcc_type(cx)
+    }
+
+    /// Stores a direct/indirect value described by this ArgAbi into a
+    /// place for the original Rust type of this argument/return.
+    /// Can be used for both storing formal arguments into Rust variables
+    /// or results of call/invoke instructions into their destinations.
+    fn store(
+        &self,
+        bx: &mut Builder<'_, 'gcc, 'tcx>,
+        val: RValue<'gcc>,
+        dst: PlaceRef<'tcx, RValue<'gcc>>,
+    ) {
+        if self.is_ignore() {
+            return;
+        }
+        if self.is_sized_indirect() {
+            OperandValue::Ref(PlaceValue::new_sized(val, self.layout.align.abi)).store(bx, dst)
+        } else if self.is_unsized_indirect() {
+            bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
+        } else if let PassMode::Cast { ref cast, .. } = self.mode {
+            // FIXME(eddyb): Figure out when the simpler Store is safe, clang
+            // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
+            let can_store_through_cast_ptr = false;
+            if can_store_through_cast_ptr {
+                let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx));
+                let cast_dst = bx.pointercast(dst.val.llval, cast_ptr_llty);
+                bx.store(val, cast_dst, self.layout.align.abi);
+            } else {
+                // The actual return type is a struct, but the ABI
+                // adaptation code has cast it into some scalar type.  The
+                // code that follows is the only reliable way I have
+                // found to do a transform like i64 -> {i32,i32}.
+                // Basically we dump the data onto the stack then memcpy it.
+                //
+                // Other approaches I tried:
+                // - Casting rust ret pointer to the foreign type and using Store
+                //   is (a) unsafe if size of foreign type > size of rust type and
+                //   (b) runs afoul of strict aliasing rules, yielding invalid
+                //   assembly under -O (specifically, the store gets removed).
+                // - Truncating foreign type to correct integral type and then
+                //   bitcasting to the struct type yields invalid cast errors.
+
+                // We instead thus allocate some scratch space...
+                let scratch_size = cast.size(bx);
+                let scratch_align = cast.align(bx);
+                let llscratch = bx.alloca(scratch_size, scratch_align);
+                bx.lifetime_start(llscratch, scratch_size);
+
+                // ... where we first store the value...
+                bx.store(val, llscratch, scratch_align);
+
+                // ... and then memcpy it to the intended destination.
+                bx.memcpy(
+                    dst.val.llval,
+                    self.layout.align.abi,
+                    llscratch,
+                    scratch_align,
+                    bx.const_usize(self.layout.size.bytes()),
+                    MemFlags::empty(),
+                );
+
+                bx.lifetime_end(llscratch, scratch_size);
+            }
+        } else {
+            OperandValue::Immediate(val).store(bx, dst);
+        }
+    }
+
+    fn store_fn_arg<'a>(
+        &self,
+        bx: &mut Builder<'a, 'gcc, 'tcx>,
+        idx: &mut usize,
+        dst: PlaceRef<'tcx, RValue<'gcc>>,
+    ) {
+        let mut next = || {
+            let val = bx.current_func().get_param(*idx as i32);
+            *idx += 1;
+            val.to_rvalue()
+        };
+        match self.mode {
+            PassMode::Ignore => {}
+            PassMode::Pair(..) => {
+                OperandValue::Pair(next(), next()).store(bx, dst);
+            }
+            PassMode::Indirect { meta_attrs: Some(_), .. } => {
+                let place_val = PlaceValue {
+                    llval: next(),
+                    llextra: Some(next()),
+                    align: self.layout.align.abi,
+                };
+                OperandValue::Ref(place_val).store(bx, dst);
+            }
+            PassMode::Direct(_)
+            | PassMode::Indirect { meta_attrs: None, .. }
+            | PassMode::Cast { .. } => {
+                let next_arg = next();
+                self.store(bx, next_arg, dst);
+            }
+        }
+    }
+}
+
+fn int_type_width_signed<'gcc, 'tcx>(
+    ty: Ty<'tcx>,
+    cx: &CodegenCx<'gcc, 'tcx>,
+) -> Option<(u64, bool)> {
+    match *ty.kind() {
+        ty::Int(t) => Some((
+            match t {
+                rustc_middle::ty::IntTy::Isize => u64::from(cx.tcx.sess.target.pointer_width),
+                rustc_middle::ty::IntTy::I8 => 8,
+                rustc_middle::ty::IntTy::I16 => 16,
+                rustc_middle::ty::IntTy::I32 => 32,
+                rustc_middle::ty::IntTy::I64 => 64,
+                rustc_middle::ty::IntTy::I128 => 128,
+            },
+            true,
+        )),
+        ty::Uint(t) => Some((
+            match t {
+                rustc_middle::ty::UintTy::Usize => u64::from(cx.tcx.sess.target.pointer_width),
+                rustc_middle::ty::UintTy::U8 => 8,
+                rustc_middle::ty::UintTy::U16 => 16,
+                rustc_middle::ty::UintTy::U32 => 32,
+                rustc_middle::ty::UintTy::U64 => 64,
+                rustc_middle::ty::UintTy::U128 => 128,
+            },
+            false,
+        )),
+        _ => None,
+    }
+}
+
+impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+    fn bit_reverse(&mut self, width: u64, value: RValue<'gcc>) -> RValue<'gcc> {
+        let result_type = value.get_type();
+        let typ = result_type.to_unsigned(self.cx);
+
+        let value =
+            if result_type.is_signed(self.cx) { self.gcc_int_cast(value, typ) } else { value };
+
+        let context = &self.cx.context;
+        let result = match width {
+            8 | 16 | 32 | 64 => {
+                let mask = ((1u128 << width) - 1) as u64;
+                let (m0, m1, m2) = if width > 16 {
+                    (
+                        context.new_rvalue_from_long(typ, (0x5555555555555555u64 & mask) as i64),
+                        context.new_rvalue_from_long(typ, (0x3333333333333333u64 & mask) as i64),
+                        context.new_rvalue_from_long(typ, (0x0f0f0f0f0f0f0f0fu64 & mask) as i64),
+                    )
+                } else {
+                    (
+                        context.new_rvalue_from_int(typ, (0x5555u64 & mask) as i32),
+                        context.new_rvalue_from_int(typ, (0x3333u64 & mask) as i32),
+                        context.new_rvalue_from_int(typ, (0x0f0fu64 & mask) as i32),
+                    )
+                };
+                let one = context.new_rvalue_from_int(typ, 1);
+                let two = context.new_rvalue_from_int(typ, 2);
+                let four = context.new_rvalue_from_int(typ, 4);
+
+                // First step.
+                let left = self.lshr(value, one);
+                let left = self.and(left, m0);
+                let right = self.and(value, m0);
+                let right = self.shl(right, one);
+                let step1 = self.or(left, right);
+
+                // Second step.
+                let left = self.lshr(step1, two);
+                let left = self.and(left, m1);
+                let right = self.and(step1, m1);
+                let right = self.shl(right, two);
+                let step2 = self.or(left, right);
+
+                // Third step.
+                let left = self.lshr(step2, four);
+                let left = self.and(left, m2);
+                let right = self.and(step2, m2);
+                let right = self.shl(right, four);
+                let step3 = self.or(left, right);
+
+                // Fourth step.
+                if width == 8 { step3 } else { self.gcc_bswap(step3, width) }
+            }
+            128 => {
+                // TODO(antoyo): find a more efficient implementation?
+                let sixty_four = self.gcc_int(typ, 64);
+                let right_shift = self.gcc_lshr(value, sixty_four);
+                let high = self.gcc_int_cast(right_shift, self.u64_type);
+                let low = self.gcc_int_cast(value, self.u64_type);
+
+                let reversed_high = self.bit_reverse(64, high);
+                let reversed_low = self.bit_reverse(64, low);
+
+                let new_low = self.gcc_int_cast(reversed_high, typ);
+                let new_high = self.shl(self.gcc_int_cast(reversed_low, typ), sixty_four);
+
+                self.gcc_or(new_low, new_high, self.location)
+            }
+            _ => {
+                panic!("cannot bit reverse with width = {}", width);
+            }
+        };
+
+        self.gcc_int_cast(result, result_type)
+    }
+
+    fn count_leading_zeroes(&mut self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): use width?
+        let arg_type = arg.get_type();
+        let result_type = self.u32_type;
+        let count_leading_zeroes =
+            // TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
+            // instead of using is_uint().
+            if arg_type.is_uint(self.cx) {
+                "__builtin_clz"
+            }
+            else if arg_type.is_ulong(self.cx) {
+                "__builtin_clzl"
+            }
+            else if arg_type.is_ulonglong(self.cx) {
+                "__builtin_clzll"
+            }
+            else if width == 128 {
+                // Algorithm from: https://stackoverflow.com/a/28433850/389119
+                let array_type = self.context.new_array_type(None, arg_type, 3);
+                let result = self.current_func()
+                    .new_local(None, array_type, "count_loading_zeroes_results");
+
+                let sixty_four = self.const_uint(arg_type, 64);
+                let shift = self.lshr(arg, sixty_four);
+                let high = self.gcc_int_cast(shift, self.u64_type);
+                let low = self.gcc_int_cast(arg, self.u64_type);
+
+                let zero = self.context.new_rvalue_zero(self.usize_type);
+                let one = self.context.new_rvalue_one(self.usize_type);
+                let two = self.context.new_rvalue_from_long(self.usize_type, 2);
+
+                let clzll = self.context.get_builtin_function("__builtin_clzll");
+
+                let first_elem = self.context.new_array_access(None, result, zero);
+                let first_value = self.gcc_int_cast(self.context.new_call(None, clzll, &[high]), arg_type);
+                self.llbb()
+                    .add_assignment(self.location, first_elem, first_value);
+
+                let second_elem = self.context.new_array_access(self.location, result, one);
+                let cast = self.gcc_int_cast(self.context.new_call(self.location, clzll, &[low]), arg_type);
+                let second_value = self.add(cast, sixty_four);
+                self.llbb()
+                    .add_assignment(self.location, second_elem, second_value);
+
+                let third_elem = self.context.new_array_access(self.location, result, two);
+                let third_value = self.const_uint(arg_type, 128);
+                self.llbb()
+                    .add_assignment(self.location, third_elem, third_value);
+
+                let not_high = self.context.new_unary_op(self.location, UnaryOp::LogicalNegate, self.u64_type, high);
+                let not_low = self.context.new_unary_op(self.location, UnaryOp::LogicalNegate, self.u64_type, low);
+                let not_low_and_not_high = not_low & not_high;
+                let index = not_high + not_low_and_not_high;
+                // NOTE: the following cast is necessary to avoid a GIMPLE verification failure in
+                // gcc.
+                // TODO(antoyo): do the correct verification in libgccjit to avoid an error at the
+                // compilation stage.
+                let index = self.context.new_cast(self.location, index, self.i32_type);
+
+                let res = self.context.new_array_access(self.location, result, index);
+
+                return self.gcc_int_cast(res.to_rvalue(), result_type);
+            }
+            else {
+                let count_leading_zeroes = self.context.get_builtin_function("__builtin_clzll");
+                let arg = self.context.new_cast(self.location, arg, self.ulonglong_type);
+                let diff = self.ulonglong_type.get_size() as i64 - arg_type.get_size() as i64;
+                let diff = self.context.new_rvalue_from_long(self.int_type, diff * 8);
+                let res = self.context.new_call(self.location, count_leading_zeroes, &[arg]) - diff;
+                return self.context.new_cast(self.location, res, result_type);
+            };
+        let count_leading_zeroes = self.context.get_builtin_function(count_leading_zeroes);
+        let res = self.context.new_call(self.location, count_leading_zeroes, &[arg]);
+        self.context.new_cast(self.location, res, result_type)
+    }
+
+    fn count_trailing_zeroes(&mut self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
+        let arg_type = arg.get_type();
+        let result_type = self.u32_type;
+        let arg = if arg_type.is_signed(self.cx) {
+            let new_type = arg_type.to_unsigned(self.cx);
+            self.gcc_int_cast(arg, new_type)
+        } else {
+            arg
+        };
+        let arg_type = arg.get_type();
+        let (count_trailing_zeroes, expected_type) =
+            // TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
+            // instead of using is_uint().
+            if arg_type.is_uchar(self.cx) || arg_type.is_ushort(self.cx) || arg_type.is_uint(self.cx) {
+                // NOTE: we don't need to & 0xFF for uchar because the result is undefined on zero.
+                ("__builtin_ctz", self.cx.uint_type)
+            }
+            else if arg_type.is_ulong(self.cx) {
+                ("__builtin_ctzl", self.cx.ulong_type)
+            }
+            else if arg_type.is_ulonglong(self.cx) {
+                ("__builtin_ctzll", self.cx.ulonglong_type)
+            }
+            else if arg_type.is_u128(self.cx) {
+                // Adapted from the algorithm to count leading zeroes from: https://stackoverflow.com/a/28433850/389119
+                let array_type = self.context.new_array_type(None, arg_type, 3);
+                let result = self.current_func()
+                    .new_local(None, array_type, "count_loading_zeroes_results");
+
+                let sixty_four = self.gcc_int(arg_type, 64);
+                let shift = self.gcc_lshr(arg, sixty_four);
+                let high = self.gcc_int_cast(shift, self.u64_type);
+                let low = self.gcc_int_cast(arg, self.u64_type);
+
+                let zero = self.context.new_rvalue_zero(self.usize_type);
+                let one = self.context.new_rvalue_one(self.usize_type);
+                let two = self.context.new_rvalue_from_long(self.usize_type, 2);
+
+                let ctzll = self.context.get_builtin_function("__builtin_ctzll");
+
+                let first_elem = self.context.new_array_access(self.location, result, zero);
+                let first_value = self.gcc_int_cast(self.context.new_call(self.location, ctzll, &[low]), arg_type);
+                self.llbb()
+                    .add_assignment(self.location, first_elem, first_value);
+
+                let second_elem = self.context.new_array_access(self.location, result, one);
+                let second_value = self.gcc_add(self.gcc_int_cast(self.context.new_call(self.location, ctzll, &[high]), arg_type), sixty_four);
+                self.llbb()
+                    .add_assignment(self.location, second_elem, second_value);
+
+                let third_elem = self.context.new_array_access(self.location, result, two);
+                let third_value = self.gcc_int(arg_type, 128);
+                self.llbb()
+                    .add_assignment(self.location, third_elem, third_value);
+
+                let not_low = self.context.new_unary_op(self.location, UnaryOp::LogicalNegate, self.u64_type, low);
+                let not_high = self.context.new_unary_op(self.location, UnaryOp::LogicalNegate, self.u64_type, high);
+                let not_low_and_not_high = not_low & not_high;
+                let index = not_low + not_low_and_not_high;
+                // NOTE: the following cast is necessary to avoid a GIMPLE verification failure in
+                // gcc.
+                // TODO(antoyo): do the correct verification in libgccjit to avoid an error at the
+                // compilation stage.
+                let index = self.context.new_cast(self.location, index, self.i32_type);
+
+                let res = self.context.new_array_access(self.location, result, index);
+
+                return self.gcc_int_cast(res.to_rvalue(), result_type);
+            }
+            else {
+                let count_trailing_zeroes = self.context.get_builtin_function("__builtin_ctzll");
+                let arg_size = arg_type.get_size();
+                let casted_arg = self.context.new_cast(self.location, arg, self.ulonglong_type);
+                let byte_diff = self.ulonglong_type.get_size() as i64 - arg_size as i64;
+                let diff = self.context.new_rvalue_from_long(self.int_type, byte_diff * 8);
+                let mask = self.context.new_rvalue_from_long(arg_type, -1); // To get the value with all bits set.
+                let masked = mask & self.context.new_unary_op(self.location, UnaryOp::BitwiseNegate, arg_type, arg);
+                let cond = self.context.new_comparison(self.location, ComparisonOp::Equals, masked, mask);
+                let diff = diff * self.context.new_cast(self.location, cond, self.int_type);
+                let res = self.context.new_call(self.location, count_trailing_zeroes, &[casted_arg]) - diff;
+                return self.context.new_cast(self.location, res, result_type);
+            };
+        let count_trailing_zeroes = self.context.get_builtin_function(count_trailing_zeroes);
+        let arg = if arg_type != expected_type {
+            self.context.new_cast(self.location, arg, expected_type)
+        } else {
+            arg
+        };
+        let res = self.context.new_call(self.location, count_trailing_zeroes, &[arg]);
+        self.context.new_cast(self.location, res, result_type)
+    }
+
+    fn pop_count(&mut self, value: RValue<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): use the optimized version with fewer operations.
+        let result_type = self.u32_type;
+        let arg_type = value.get_type();
+        let value_type = arg_type.to_unsigned(self.cx);
+
+        let value =
+            if arg_type.is_signed(self.cx) { self.gcc_int_cast(value, value_type) } else { value };
+
+        // only break apart 128-bit ints if they're not natively supported
+        // TODO(antoyo): remove this if/when native 128-bit integers land in libgccjit
+        if value_type.is_u128(self.cx) && !self.cx.supports_128bit_integers {
+            let sixty_four = self.gcc_int(value_type, 64);
+            let right_shift = self.gcc_lshr(value, sixty_four);
+            let high = self.gcc_int_cast(right_shift, self.cx.ulonglong_type);
+            let high = self.pop_count(high);
+            let low = self.gcc_int_cast(value, self.cx.ulonglong_type);
+            let low = self.pop_count(low);
+            let res = high + low;
+            return self.gcc_int_cast(res, result_type);
+        }
+
+        // Use Wenger's algorithm for population count, gcc's seems to play better with it
+        // for (int counter = 0; value != 0; counter++) {
+        //     value &= value - 1;
+        // }
+        let func = self.current_func.borrow().expect("func");
+        let loop_head = func.new_block("head");
+        let loop_body = func.new_block("body");
+        let loop_tail = func.new_block("tail");
+
+        let counter_type = self.int_type;
+        let counter = self.current_func().new_local(None, counter_type, "popcount_counter");
+        let val = self.current_func().new_local(None, value_type, "popcount_value");
+        let zero = self.gcc_zero(counter_type);
+        self.llbb().add_assignment(self.location, counter, zero);
+        self.llbb().add_assignment(self.location, val, value);
+        self.br(loop_head);
+
+        // check if value isn't zero
+        self.switch_to_block(loop_head);
+        let zero = self.gcc_zero(value_type);
+        let cond = self.gcc_icmp(IntPredicate::IntNE, val.to_rvalue(), zero);
+        self.cond_br(cond, loop_body, loop_tail);
+
+        // val &= val - 1;
+        self.switch_to_block(loop_body);
+        let one = self.gcc_int(value_type, 1);
+        let sub = self.gcc_sub(val.to_rvalue(), one);
+        let op = self.gcc_and(val.to_rvalue(), sub);
+        loop_body.add_assignment(self.location, val, op);
+
+        // counter += 1
+        let one = self.gcc_int(counter_type, 1);
+        let op = self.gcc_add(counter.to_rvalue(), one);
+        loop_body.add_assignment(self.location, counter, op);
+        self.br(loop_head);
+
+        // end of loop
+        self.switch_to_block(loop_tail);
+        self.gcc_int_cast(counter.to_rvalue(), result_type)
+    }
+
+    // Algorithm from: https://blog.regehr.org/archives/1063
+    fn rotate_left(
+        &mut self,
+        value: RValue<'gcc>,
+        shift: RValue<'gcc>,
+        width: u64,
+    ) -> RValue<'gcc> {
+        let max = self.const_uint(shift.get_type(), width);
+        let shift = self.urem(shift, max);
+        let lhs = self.shl(value, shift);
+        let result_neg = self.neg(shift);
+        let result_and = self.and(result_neg, self.const_uint(shift.get_type(), width - 1));
+        let rhs = self.lshr(value, result_and);
+        self.or(lhs, rhs)
+    }
+
+    // Algorithm from: https://blog.regehr.org/archives/1063
+    fn rotate_right(
+        &mut self,
+        value: RValue<'gcc>,
+        shift: RValue<'gcc>,
+        width: u64,
+    ) -> RValue<'gcc> {
+        let max = self.const_uint(shift.get_type(), width);
+        let shift = self.urem(shift, max);
+        let lhs = self.lshr(value, shift);
+        let result_neg = self.neg(shift);
+        let result_and = self.and(result_neg, self.const_uint(shift.get_type(), width - 1));
+        let rhs = self.shl(value, result_and);
+        self.or(lhs, rhs)
+    }
+
+    fn saturating_add(
+        &mut self,
+        lhs: RValue<'gcc>,
+        rhs: RValue<'gcc>,
+        signed: bool,
+        width: u64,
+    ) -> RValue<'gcc> {
+        let result_type = lhs.get_type();
+        if signed {
+            // Based on algorithm from: https://stackoverflow.com/a/56531252/389119
+            let func = self.current_func.borrow().expect("func");
+            let res = func.new_local(self.location, result_type, "saturating_sum");
+            let supports_native_type = self.is_native_int_type(result_type);
+            let overflow = if supports_native_type {
+                let func_name = match width {
+                    8 => "__builtin_add_overflow",
+                    16 => "__builtin_add_overflow",
+                    32 => "__builtin_sadd_overflow",
+                    64 => "__builtin_saddll_overflow",
+                    128 => "__builtin_add_overflow",
+                    _ => unreachable!(),
+                };
+                let overflow_func = self.context.get_builtin_function(func_name);
+                self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(self.location)], None)
+            } else {
+                let func_name = match width {
+                    128 => "__rust_i128_addo",
+                    _ => unreachable!(),
+                };
+                let (int_result, overflow) =
+                    self.operation_with_overflow(func_name, lhs, rhs, width);
+                self.llbb().add_assignment(self.location, res, int_result);
+                overflow
+            };
+
+            let then_block = func.new_block("then");
+            let after_block = func.new_block("after");
+
+            // Return `result_type`'s maximum or minimum value on overflow
+            // NOTE: convert the type to unsigned to have an unsigned shift.
+            let unsigned_type = result_type.to_unsigned(self.cx);
+            let shifted = self.gcc_lshr(
+                self.gcc_int_cast(lhs, unsigned_type),
+                self.gcc_int(unsigned_type, width as i64 - 1),
+            );
+            let uint_max = self.gcc_not(self.gcc_int(unsigned_type, 0));
+            let int_max = self.gcc_lshr(uint_max, self.gcc_int(unsigned_type, 1));
+            then_block.add_assignment(
+                self.location,
+                res,
+                self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type),
+            );
+            then_block.end_with_jump(self.location, after_block);
+
+            self.llbb().end_with_conditional(self.location, overflow, then_block, after_block);
+
+            // NOTE: since jumps were added in a place rustc does not
+            // expect, the current block in the state need to be updated.
+            self.switch_to_block(after_block);
+
+            res.to_rvalue()
+        } else {
+            // Algorithm from: http://locklessinc.com/articles/sat_arithmetic/
+            let res = self.gcc_add(lhs, rhs);
+            let cond = self.gcc_icmp(IntPredicate::IntULT, res, lhs);
+            let value = self.gcc_neg(self.gcc_int_cast(cond, result_type));
+            self.gcc_or(res, value, self.location)
+        }
+    }
+
+    // Algorithm from: https://locklessinc.com/articles/sat_arithmetic/
+    fn saturating_sub(
+        &mut self,
+        lhs: RValue<'gcc>,
+        rhs: RValue<'gcc>,
+        signed: bool,
+        width: u64,
+    ) -> RValue<'gcc> {
+        let result_type = lhs.get_type();
+        if signed {
+            // Based on algorithm from: https://stackoverflow.com/a/56531252/389119
+            let func = self.current_func.borrow().expect("func");
+            let res = func.new_local(self.location, result_type, "saturating_diff");
+            let supports_native_type = self.is_native_int_type(result_type);
+            let overflow = if supports_native_type {
+                let func_name = match width {
+                    8 => "__builtin_sub_overflow",
+                    16 => "__builtin_sub_overflow",
+                    32 => "__builtin_ssub_overflow",
+                    64 => "__builtin_ssubll_overflow",
+                    128 => "__builtin_sub_overflow",
+                    _ => unreachable!(),
+                };
+                let overflow_func = self.context.get_builtin_function(func_name);
+                self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(self.location)], None)
+            } else {
+                let func_name = match width {
+                    128 => "__rust_i128_subo",
+                    _ => unreachable!(),
+                };
+                let (int_result, overflow) =
+                    self.operation_with_overflow(func_name, lhs, rhs, width);
+                self.llbb().add_assignment(self.location, res, int_result);
+                overflow
+            };
+
+            let then_block = func.new_block("then");
+            let after_block = func.new_block("after");
+
+            // Return `result_type`'s maximum or minimum value on overflow
+            // NOTE: convert the type to unsigned to have an unsigned shift.
+            let unsigned_type = result_type.to_unsigned(self.cx);
+            let shifted = self.gcc_lshr(
+                self.gcc_int_cast(lhs, unsigned_type),
+                self.gcc_int(unsigned_type, width as i64 - 1),
+            );
+            let uint_max = self.gcc_not(self.gcc_int(unsigned_type, 0));
+            let int_max = self.gcc_lshr(uint_max, self.gcc_int(unsigned_type, 1));
+            then_block.add_assignment(
+                self.location,
+                res,
+                self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type),
+            );
+            then_block.end_with_jump(self.location, after_block);
+
+            self.llbb().end_with_conditional(self.location, overflow, then_block, after_block);
+
+            // NOTE: since jumps were added in a place rustc does not
+            // expect, the current block in the state need to be updated.
+            self.switch_to_block(after_block);
+
+            res.to_rvalue()
+        } else {
+            let res = self.gcc_sub(lhs, rhs);
+            let comparison = self.gcc_icmp(IntPredicate::IntULE, res, lhs);
+            let value = self.gcc_neg(self.gcc_int_cast(comparison, result_type));
+            self.gcc_and(res, value)
+        }
+    }
+}
+
+fn try_intrinsic<'a, 'b, 'gcc, 'tcx>(
+    bx: &'b mut Builder<'a, 'gcc, 'tcx>,
+    try_func: RValue<'gcc>,
+    data: RValue<'gcc>,
+    _catch_func: RValue<'gcc>,
+    dest: RValue<'gcc>,
+) {
+    if bx.sess().panic_strategy() == PanicStrategy::Abort {
+        bx.call(bx.type_void(), None, None, try_func, &[data], None, None);
+        // Return 0 unconditionally from the intrinsic call;
+        // we can never unwind.
+        let ret_align = bx.tcx.data_layout.i32_align.abi;
+        bx.store(bx.const_i32(0), dest, ret_align);
+    } else {
+        if wants_msvc_seh(bx.sess()) {
+            unimplemented!();
+        }
+        #[cfg(feature = "master")]
+        codegen_gnu_try(bx, try_func, data, _catch_func, dest);
+        #[cfg(not(feature = "master"))]
+        unimplemented!();
+    }
+}
+
+// Definition of the standard `try` function for Rust using the GNU-like model
+// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
+// instructions).
+//
+// This codegen is a little surprising because we always call a shim
+// function instead of inlining the call to `invoke` manually here. This is done
+// because in LLVM we're only allowed to have one personality per function
+// definition. The call to the `try` intrinsic is being inlined into the
+// function calling it, and that function may already have other personality
+// functions in play. By calling a shim we're guaranteed that our shim will have
+// the right personality function.
+#[cfg(feature = "master")]
+fn codegen_gnu_try<'gcc>(
+    bx: &mut Builder<'_, 'gcc, '_>,
+    try_func: RValue<'gcc>,
+    data: RValue<'gcc>,
+    catch_func: RValue<'gcc>,
+    dest: RValue<'gcc>,
+) {
+    let cx: &CodegenCx<'gcc, '_> = bx.cx;
+    let (llty, func) = get_rust_try_fn(cx, &mut |mut bx| {
+        // Codegens the shims described above:
+        //
+        //   bx:
+        //      invoke %try_func(%data) normal %normal unwind %catch
+        //
+        //   normal:
+        //      ret 0
+        //
+        //   catch:
+        //      (%ptr, _) = landingpad
+        //      call %catch_func(%data, %ptr)
+        //      ret 1
+        let then = bx.append_sibling_block("then");
+        let catch = bx.append_sibling_block("catch");
+
+        let func = bx.current_func();
+        let try_func = func.get_param(0).to_rvalue();
+        let data = func.get_param(1).to_rvalue();
+        let catch_func = func.get_param(2).to_rvalue();
+        let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+
+        let current_block = bx.block;
+
+        bx.switch_to_block(then);
+        bx.ret(bx.const_i32(0));
+
+        // Type indicator for the exception being thrown.
+        //
+        // The value is a pointer to the exception object
+        // being thrown.
+        bx.switch_to_block(catch);
+        bx.set_personality_fn(bx.eh_personality());
+
+        let eh_pointer_builtin = bx.cx.context.get_target_builtin_function("__builtin_eh_pointer");
+        let zero = bx.cx.context.new_rvalue_zero(bx.int_type);
+        let ptr = bx.cx.context.new_call(None, eh_pointer_builtin, &[zero]);
+        let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+        bx.call(catch_ty, None, None, catch_func, &[data, ptr], None, None);
+        bx.ret(bx.const_i32(1));
+
+        // NOTE: the blocks must be filled before adding the try/catch, otherwise gcc will not
+        // generate a try/catch.
+        // FIXME(antoyo): add a check in the libgccjit API to prevent this.
+        bx.switch_to_block(current_block);
+        bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None);
+    });
+
+    let func = unsafe { std::mem::transmute::<Function<'gcc>, RValue<'gcc>>(func) };
+
+    // Note that no invoke is used here because by definition this function
+    // can't panic (that's what it's catching).
+    let ret = bx.call(llty, None, None, func, &[try_func, data, catch_func], None, None);
+    let i32_align = bx.tcx().data_layout.i32_align.abi;
+    bx.store(ret, dest, i32_align);
+}
+
+// Helper function used to get a handle to the `__rust_try` function used to
+// catch exceptions.
+//
+// This function is only generated once and is then cached.
+#[cfg(feature = "master")]
+fn get_rust_try_fn<'a, 'gcc, 'tcx>(
+    cx: &'a CodegenCx<'gcc, 'tcx>,
+    codegen: &mut dyn FnMut(Builder<'a, 'gcc, 'tcx>),
+) -> (Type<'gcc>, Function<'gcc>) {
+    if let Some(llfn) = cx.rust_try_fn.get() {
+        return llfn;
+    }
+
+    // Define the type up front for the signature of the rust_try function.
+    let tcx = cx.tcx;
+    let i8p = Ty::new_mut_ptr(tcx, tcx.types.i8);
+    // `unsafe fn(*mut i8) -> ()`
+    let try_fn_ty = Ty::new_fn_ptr(
+        tcx,
+        ty::Binder::dummy(tcx.mk_fn_sig(
+            iter::once(i8p),
+            tcx.types.unit,
+            false,
+            rustc_hir::Safety::Unsafe,
+            ExternAbi::Rust,
+        )),
+    );
+    // `unsafe fn(*mut i8, *mut i8) -> ()`
+    let catch_fn_ty = Ty::new_fn_ptr(
+        tcx,
+        ty::Binder::dummy(tcx.mk_fn_sig(
+            [i8p, i8p].iter().cloned(),
+            tcx.types.unit,
+            false,
+            rustc_hir::Safety::Unsafe,
+            ExternAbi::Rust,
+        )),
+    );
+    // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
+    let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
+        [try_fn_ty, i8p, catch_fn_ty],
+        tcx.types.i32,
+        false,
+        rustc_hir::Safety::Unsafe,
+        ExternAbi::Rust,
+    ));
+    let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen);
+    cx.rust_try_fn.set(Some(rust_try));
+    rust_try
+}
+
+// Helper function to give a Block to a closure to codegen a shim function.
+// This is currently primarily used for the `try` intrinsic functions above.
+#[cfg(feature = "master")]
+fn gen_fn<'a, 'gcc, 'tcx>(
+    cx: &'a CodegenCx<'gcc, 'tcx>,
+    name: &str,
+    rust_fn_sig: ty::PolyFnSig<'tcx>,
+    codegen: &mut dyn FnMut(Builder<'a, 'gcc, 'tcx>),
+) -> (Type<'gcc>, Function<'gcc>) {
+    let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty());
+    let return_type = fn_abi.gcc_type(cx).return_type;
+    // FIXME(eddyb) find a nicer way to do this.
+    cx.linkage.set(FunctionType::Internal);
+    let func = cx.declare_fn(name, fn_abi);
+    let func_val = unsafe { std::mem::transmute::<Function<'gcc>, RValue<'gcc>>(func) };
+    cx.set_frame_pointer_type(func_val);
+    cx.apply_target_cpu_attr(func_val);
+    let block = Builder::append_block(cx, func_val, "entry-block");
+    let bx = Builder::build(cx, block);
+    codegen(bx);
+    (return_type, func)
+}
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
new file mode 100644
index 00000000000..6d40d5297f1
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
@@ -0,0 +1,1439 @@
+use std::iter::FromIterator;
+
+use gccjit::{BinaryOp, RValue, ToRValue, Type};
+#[cfg(feature = "master")]
+use gccjit::{ComparisonOp, UnaryOp};
+use rustc_abi::{Align, Size};
+use rustc_codegen_ssa::base::compare_simd_types;
+use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
+#[cfg(feature = "master")]
+use rustc_codegen_ssa::errors::ExpectedPointerMutability;
+use rustc_codegen_ssa::errors::InvalidMonomorphization;
+use rustc_codegen_ssa::mir::operand::OperandRef;
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{BaseTypeCodegenMethods, BuilderMethods};
+#[cfg(feature = "master")]
+use rustc_hir as hir;
+use rustc_middle::mir::BinOp;
+use rustc_middle::ty::layout::HasTyCtxt;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::{Span, Symbol, sym};
+
+use crate::builder::Builder;
+#[cfg(not(feature = "master"))]
+use crate::common::SignType;
+#[cfg(feature = "master")]
+use crate::context::CodegenCx;
+
+pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
+    bx: &mut Builder<'a, 'gcc, 'tcx>,
+    name: Symbol,
+    callee_ty: Ty<'tcx>,
+    args: &[OperandRef<'tcx, RValue<'gcc>>],
+    ret_ty: Ty<'tcx>,
+    llret_ty: Type<'gcc>,
+    span: Span,
+) -> Result<RValue<'gcc>, ()> {
+    // macros for error handling:
+    macro_rules! return_error {
+        ($err:expr) => {{
+            bx.tcx.dcx().emit_err($err);
+            return Err(());
+        }};
+    }
+    macro_rules! require {
+        ($cond:expr, $err:expr) => {
+            if !$cond {
+                return_error!($err);
+            }
+        };
+    }
+    macro_rules! require_simd {
+        ($ty: expr, $diag: expr) => {
+            require!($ty.is_simd(), $diag)
+        };
+    }
+
+    let tcx = bx.tcx();
+    let sig = tcx.normalize_erasing_late_bound_regions(
+        ty::TypingEnv::fully_monomorphized(),
+        callee_ty.fn_sig(tcx),
+    );
+    let arg_tys = sig.inputs();
+
+    if name == sym::simd_select_bitmask {
+        require_simd!(
+            arg_tys[1],
+            InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] }
+        );
+        let (len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
+
+        let expected_int_bits = (len.max(8) - 1).next_power_of_two();
+        let expected_bytes = len / 8 + ((len % 8 > 0) as u64);
+
+        let mask_ty = arg_tys[0];
+        let mut mask = match *mask_ty.kind() {
+            ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
+            ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
+            ty::Array(elem, len)
+                if matches!(*elem.kind(), ty::Uint(ty::UintTy::U8))
+                    && len
+                        .try_to_target_usize(bx.tcx)
+                        .expect("expected monomorphic const in codegen")
+                        == expected_bytes =>
+            {
+                let place = PlaceRef::alloca(bx, args[0].layout);
+                args[0].val.store(bx, place);
+                let int_ty = bx.type_ix(expected_bytes * 8);
+                let ptr = bx.pointercast(place.val.llval, bx.cx.type_ptr_to(int_ty));
+                bx.load(int_ty, ptr, Align::ONE)
+            }
+            _ => return_error!(InvalidMonomorphization::InvalidBitmask {
+                span,
+                name,
+                mask_ty,
+                expected_int_bits,
+                expected_bytes
+            }),
+        };
+
+        let arg1 = args[1].immediate();
+        let arg1_type = arg1.get_type();
+        let arg1_vector_type = arg1_type.unqualified().dyncast_vector().expect("vector type");
+        let arg1_element_type = arg1_vector_type.get_element_type();
+
+        // NOTE: since the arguments can be vectors of floats, make sure the mask is a vector of
+        // integer.
+        let mask_element_type = bx.type_ix(arg1_element_type.get_size() as u64 * 8);
+        let vector_mask_type =
+            bx.context.new_vector_type(mask_element_type, arg1_vector_type.get_num_units() as u64);
+
+        let mut elements = vec![];
+        let one = bx.context.new_rvalue_one(mask.get_type());
+        for _ in 0..len {
+            let element = bx.context.new_cast(None, mask & one, mask_element_type);
+            elements.push(element);
+            mask = mask >> one;
+        }
+        let vector_mask = bx.context.new_rvalue_from_vector(None, vector_mask_type, &elements);
+
+        return Ok(bx.vector_select(vector_mask, arg1, args[2].immediate()));
+    }
+
+    // every intrinsic below takes a SIMD vector as its first argument
+    require_simd!(arg_tys[0], InvalidMonomorphization::SimdInput { span, name, ty: arg_tys[0] });
+    let in_ty = arg_tys[0];
+
+    let comparison = match name {
+        sym::simd_eq => Some(BinOp::Eq),
+        sym::simd_ne => Some(BinOp::Ne),
+        sym::simd_lt => Some(BinOp::Lt),
+        sym::simd_le => Some(BinOp::Le),
+        sym::simd_gt => Some(BinOp::Gt),
+        sym::simd_ge => Some(BinOp::Ge),
+        _ => None,
+    };
+
+    let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx());
+    if let Some(cmp_op) = comparison {
+        require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
+
+        let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+        require!(
+            in_len == out_len,
+            InvalidMonomorphization::ReturnLengthInputType {
+                span,
+                name,
+                in_len,
+                in_ty,
+                ret_ty,
+                out_len
+            }
+        );
+        require!(
+            bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
+            InvalidMonomorphization::ReturnIntegerType { span, name, ret_ty, out_ty }
+        );
+
+        let arg1 = args[0].immediate();
+        // NOTE: we get different vector types for the same vector type and libgccjit doesn't
+        // compare them as equal, so bitcast.
+        // FIXME(antoyo): allow comparing vector types as equal in libgccjit.
+        let arg2 = bx.context.new_bitcast(None, args[1].immediate(), arg1.get_type());
+        return Ok(compare_simd_types(bx, arg1, arg2, in_elem, llret_ty, cmp_op));
+    }
+
+    let simd_bswap = |bx: &mut Builder<'a, 'gcc, 'tcx>, vector: RValue<'gcc>| -> RValue<'gcc> {
+        let v_type = vector.get_type();
+        let vector_type = v_type.unqualified().dyncast_vector().expect("vector type");
+        let elem_type = vector_type.get_element_type();
+        let elem_size_bytes = elem_type.get_size();
+        if elem_size_bytes == 1 {
+            return vector;
+        }
+
+        let type_size_bytes = elem_size_bytes as u64 * in_len;
+        let shuffle_indices = Vec::from_iter(0..type_size_bytes);
+        let byte_vector_type = bx.context.new_vector_type(bx.type_u8(), type_size_bytes);
+        let byte_vector = bx.context.new_bitcast(None, args[0].immediate(), byte_vector_type);
+
+        #[cfg(not(feature = "master"))]
+        let shuffled = {
+            let new_elements: Vec<_> = shuffle_indices
+                .chunks_exact(elem_size_bytes as _)
+                .flat_map(|x| x.iter().rev())
+                .map(|&i| {
+                    let index = bx.context.new_rvalue_from_long(bx.u64_type, i as _);
+                    bx.extract_element(byte_vector, index)
+                })
+                .collect();
+
+            bx.context.new_rvalue_from_vector(None, byte_vector_type, &new_elements)
+        };
+        #[cfg(feature = "master")]
+        let shuffled = {
+            let indices: Vec<_> = shuffle_indices
+                .chunks_exact(elem_size_bytes as _)
+                .flat_map(|x| x.iter().rev())
+                .map(|&i| bx.context.new_rvalue_from_int(bx.u8_type, i as _))
+                .collect();
+
+            let mask = bx.context.new_rvalue_from_vector(None, byte_vector_type, &indices);
+            bx.context.new_rvalue_vector_perm(None, byte_vector, byte_vector, mask)
+        };
+        bx.context.new_bitcast(None, shuffled, v_type)
+    };
+
+    if matches!(name, sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctpop) {
+        require!(
+            bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
+            InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
+        );
+    }
+
+    if name == sym::simd_bswap {
+        return Ok(simd_bswap(bx, args[0].immediate()));
+    }
+
+    let simd_ctpop = |bx: &mut Builder<'a, 'gcc, 'tcx>, vector: RValue<'gcc>| -> RValue<'gcc> {
+        let mut vector_elements = vec![];
+        let elem_ty = bx.element_type(llret_ty);
+        for i in 0..in_len {
+            let index = bx.context.new_rvalue_from_long(bx.ulong_type, i as i64);
+            let element = bx.extract_element(vector, index).to_rvalue();
+            let result = bx.context.new_cast(None, bx.pop_count(element), elem_ty);
+            vector_elements.push(result);
+        }
+        bx.context.new_rvalue_from_vector(None, llret_ty, &vector_elements)
+    };
+
+    if name == sym::simd_ctpop {
+        return Ok(simd_ctpop(bx, args[0].immediate()));
+    }
+
+    // We use a different algorithm from non-vector bitreverse to take advantage of most
+    // processors' vector shuffle units.  It works like this:
+    // 1. Generate pre-reversed low and high nibbles as a vector.
+    // 2. Byte-swap the input.
+    // 3. Mask off the low and high nibbles of each byte in the byte-swapped input.
+    // 4. Shuffle the pre-reversed low and high-nibbles using the masked nibbles as a shuffle mask.
+    // 5. Combine the results of the shuffle back together and cast back to the original type.
+    #[cfg(feature = "master")]
+    if name == sym::simd_bitreverse {
+        let vector = args[0].immediate();
+        let v_type = vector.get_type();
+        let vector_type = v_type.unqualified().dyncast_vector().expect("vector type");
+        let elem_type = vector_type.get_element_type();
+        let elem_size_bytes = elem_type.get_size();
+
+        let type_size_bytes = elem_size_bytes as u64 * in_len;
+        // We need to ensure at least 16 entries in our vector type, since the pre-reversed vectors
+        // we generate below have 16 entries in them.  `new_rvalue_vector_perm` requires the mask
+        // vector to be of the same length as the source vectors.
+        let byte_vector_type_size = type_size_bytes.max(16);
+
+        let byte_vector_type = bx.context.new_vector_type(bx.u8_type, type_size_bytes);
+        let long_byte_vector_type = bx.context.new_vector_type(bx.u8_type, byte_vector_type_size);
+
+        // Step 1: Generate pre-reversed low and high nibbles as a vector.
+        let zero_byte = bx.context.new_rvalue_zero(bx.u8_type);
+        let hi_nibble_elements: Vec<_> = (0u8..16)
+            .map(|x| bx.context.new_rvalue_from_int(bx.u8_type, x.reverse_bits() as _))
+            .chain((16..byte_vector_type_size).map(|_| zero_byte))
+            .collect();
+        let hi_nibble =
+            bx.context.new_rvalue_from_vector(None, long_byte_vector_type, &hi_nibble_elements);
+
+        let lo_nibble_elements: Vec<_> = (0u8..16)
+            .map(|x| bx.context.new_rvalue_from_int(bx.u8_type, (x.reverse_bits() >> 4) as _))
+            .chain((16..byte_vector_type_size).map(|_| zero_byte))
+            .collect();
+        let lo_nibble =
+            bx.context.new_rvalue_from_vector(None, long_byte_vector_type, &lo_nibble_elements);
+
+        let mask = bx.context.new_rvalue_from_vector(
+            None,
+            long_byte_vector_type,
+            &vec![bx.context.new_rvalue_from_int(bx.u8_type, 0x0f); byte_vector_type_size as _],
+        );
+
+        let four_vec = bx.context.new_rvalue_from_vector(
+            None,
+            long_byte_vector_type,
+            &vec![bx.context.new_rvalue_from_int(bx.u8_type, 4); byte_vector_type_size as _],
+        );
+
+        // Step 2: Byte-swap the input.
+        let swapped = simd_bswap(bx, args[0].immediate());
+        let byte_vector = bx.context.new_bitcast(None, swapped, byte_vector_type);
+
+        // We're going to need to extend the vector with zeros to make sure that the types are the
+        // same, since that's what new_rvalue_vector_perm expects.
+        let byte_vector = if byte_vector_type_size > type_size_bytes {
+            let mut byte_vector_elements = Vec::with_capacity(byte_vector_type_size as _);
+            for i in 0..type_size_bytes {
+                let idx = bx.context.new_rvalue_from_int(bx.u32_type, i as _);
+                let val = bx.extract_element(byte_vector, idx);
+                byte_vector_elements.push(val);
+            }
+            for _ in type_size_bytes..byte_vector_type_size {
+                byte_vector_elements.push(zero_byte);
+            }
+            bx.context.new_rvalue_from_vector(None, long_byte_vector_type, &byte_vector_elements)
+        } else {
+            bx.context.new_bitcast(None, byte_vector, long_byte_vector_type)
+        };
+
+        // Step 3: Mask off the low and high nibbles of each byte in the byte-swapped input.
+        let masked_hi = (byte_vector >> four_vec) & mask;
+        let masked_lo = byte_vector & mask;
+
+        // Step 4: Shuffle the pre-reversed low and high-nibbles using the masked nibbles as a shuffle mask.
+        let hi = bx.context.new_rvalue_vector_perm(None, hi_nibble, hi_nibble, masked_lo);
+        let lo = bx.context.new_rvalue_vector_perm(None, lo_nibble, lo_nibble, masked_hi);
+
+        // Step 5: Combine the results of the shuffle back together and cast back to the original type.
+        let result = hi | lo;
+        let cast_ty =
+            bx.context.new_vector_type(elem_type, byte_vector_type_size / (elem_size_bytes as u64));
+
+        // we might need to truncate if sizeof(v_type) < sizeof(cast_type)
+        if type_size_bytes < byte_vector_type_size {
+            let cast_result = bx.context.new_bitcast(None, result, cast_ty);
+            let elems: Vec<_> = (0..in_len)
+                .map(|i| {
+                    let idx = bx.context.new_rvalue_from_int(bx.u32_type, i as _);
+                    bx.extract_element(cast_result, idx)
+                })
+                .collect();
+            return Ok(bx.context.new_rvalue_from_vector(None, v_type, &elems));
+        }
+        // avoid the unnecessary truncation as an optimization.
+        return Ok(bx.context.new_bitcast(None, result, v_type));
+    }
+    // since gcc doesn't have vector shuffle methods available in non-patched builds, fallback to
+    // component-wise bitreverses if they're not available.
+    #[cfg(not(feature = "master"))]
+    if name == sym::simd_bitreverse {
+        let vector = args[0].immediate();
+        let vector_ty = vector.get_type();
+        let vector_type = vector_ty.unqualified().dyncast_vector().expect("vector type");
+        let num_elements = vector_type.get_num_units();
+
+        let elem_type = vector_type.get_element_type();
+        let elem_size_bytes = elem_type.get_size();
+        let num_type = elem_type.to_unsigned(bx.cx);
+        let new_elements: Vec<_> = (0..num_elements)
+            .map(|idx| {
+                let index = bx.context.new_rvalue_from_long(num_type, idx as _);
+                let extracted_value = bx.extract_element(vector, index).to_rvalue();
+                bx.bit_reverse(elem_size_bytes as u64 * 8, extracted_value)
+            })
+            .collect();
+        return Ok(bx.context.new_rvalue_from_vector(None, vector_ty, &new_elements));
+    }
+
+    if name == sym::simd_ctlz || name == sym::simd_cttz {
+        let vector = args[0].immediate();
+        let elements: Vec<_> = (0..in_len)
+            .map(|i| {
+                let index = bx.context.new_rvalue_from_long(bx.i32_type, i as i64);
+                let value = bx.extract_element(vector, index).to_rvalue();
+                let value_type = value.get_type();
+                let element = if name == sym::simd_ctlz {
+                    bx.count_leading_zeroes(value_type.get_size() as u64 * 8, value)
+                } else {
+                    bx.count_trailing_zeroes(value_type.get_size() as u64 * 8, value)
+                };
+                bx.context.new_cast(None, element, value_type)
+            })
+            .collect();
+        return Ok(bx.context.new_rvalue_from_vector(None, vector.get_type(), &elements));
+    }
+
+    if name == sym::simd_shuffle {
+        // Make sure this is actually a SIMD vector.
+        let idx_ty = args[2].layout.ty;
+        let n: u64 = if idx_ty.is_simd()
+            && matches!(*idx_ty.simd_size_and_type(bx.cx.tcx).1.kind(), ty::Uint(ty::UintTy::U32))
+        {
+            idx_ty.simd_size_and_type(bx.cx.tcx).0
+        } else {
+            return_error!(InvalidMonomorphization::SimdShuffle { span, name, ty: idx_ty })
+        };
+        require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
+
+        let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+        require!(
+            out_len == n,
+            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
+        );
+        require!(
+            in_elem == out_ty,
+            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
+        );
+
+        let vector = args[2].immediate();
+
+        return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), vector));
+    }
+
+    #[cfg(feature = "master")]
+    if name == sym::simd_insert || name == sym::simd_insert_dyn {
+        require!(
+            in_elem == arg_tys[2],
+            InvalidMonomorphization::InsertedType {
+                span,
+                name,
+                in_elem,
+                in_ty,
+                out_ty: arg_tys[2]
+            }
+        );
+
+        // TODO(antoyo): For simd_insert, check if the index is a constant of the correct size.
+        let vector = args[0].immediate();
+        let index = args[1].immediate();
+        let value = args[2].immediate();
+        let variable = bx.current_func().new_local(None, vector.get_type(), "new_vector");
+        bx.llbb().add_assignment(None, variable, vector);
+        let lvalue = bx.context.new_vector_access(None, variable.to_rvalue(), index);
+        // TODO(antoyo): if simd_insert is constant, use BIT_REF.
+        bx.llbb().add_assignment(None, lvalue, value);
+        return Ok(variable.to_rvalue());
+    }
+
+    #[cfg(feature = "master")]
+    if name == sym::simd_extract || name == sym::simd_extract_dyn {
+        require!(
+            ret_ty == in_elem,
+            InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
+        );
+        // TODO(antoyo): For simd_extract, check if the index is a constant of the correct size.
+        let vector = args[0].immediate();
+        let index = args[1].immediate();
+        return Ok(bx.context.new_vector_access(None, vector, index).to_rvalue());
+    }
+
+    if name == sym::simd_select {
+        let m_elem_ty = in_elem;
+        let m_len = in_len;
+        require_simd!(
+            arg_tys[1],
+            InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] }
+        );
+        let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
+        require!(
+            m_len == v_len,
+            InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
+        );
+        match *m_elem_ty.kind() {
+            ty::Int(_) => {}
+            _ => return_error!(InvalidMonomorphization::MaskType { span, name, ty: m_elem_ty }),
+        }
+        return Ok(bx.vector_select(args[0].immediate(), args[1].immediate(), args[2].immediate()));
+    }
+
+    if name == sym::simd_cast_ptr {
+        require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
+        let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
+
+        require!(
+            in_len == out_len,
+            InvalidMonomorphization::ReturnLengthInputType {
+                span,
+                name,
+                in_len,
+                in_ty,
+                ret_ty,
+                out_len
+            }
+        );
+
+        match *in_elem.kind() {
+            ty::RawPtr(p_ty, _) => {
+                let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
+                    bx.tcx.normalize_erasing_regions(ty::TypingEnv::fully_monomorphized(), ty)
+                });
+                require!(
+                    metadata.is_unit(),
+                    InvalidMonomorphization::CastWidePointer { span, name, ty: in_elem }
+                );
+            }
+            _ => {
+                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
+            }
+        }
+        match *out_elem.kind() {
+            ty::RawPtr(p_ty, _) => {
+                let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
+                    bx.tcx.normalize_erasing_regions(ty::TypingEnv::fully_monomorphized(), ty)
+                });
+                require!(
+                    metadata.is_unit(),
+                    InvalidMonomorphization::CastWidePointer { span, name, ty: out_elem }
+                );
+            }
+            _ => {
+                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
+            }
+        }
+
+        let arg = args[0].immediate();
+        let elem_type = llret_ty.dyncast_vector().expect("vector return type").get_element_type();
+        let values: Vec<_> = (0..in_len)
+            .map(|i| {
+                let idx = bx.gcc_int(bx.usize_type, i as _);
+                let value = bx.extract_element(arg, idx);
+                bx.pointercast(value, elem_type)
+            })
+            .collect();
+        return Ok(bx.context.new_rvalue_from_vector(bx.location, llret_ty, &values));
+    }
+
+    if name == sym::simd_expose_provenance {
+        require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
+        let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
+
+        require!(
+            in_len == out_len,
+            InvalidMonomorphization::ReturnLengthInputType {
+                span,
+                name,
+                in_len,
+                in_ty,
+                ret_ty,
+                out_len
+            }
+        );
+
+        match *in_elem.kind() {
+            ty::RawPtr(_, _) => {}
+            _ => {
+                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
+            }
+        }
+        match *out_elem.kind() {
+            ty::Uint(ty::UintTy::Usize) => {}
+            _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: out_elem }),
+        }
+
+        let arg = args[0].immediate();
+        let elem_type = llret_ty.dyncast_vector().expect("vector return type").get_element_type();
+        let values: Vec<_> = (0..in_len)
+            .map(|i| {
+                let idx = bx.gcc_int(bx.usize_type, i as _);
+                let value = bx.extract_element(arg, idx);
+                bx.ptrtoint(value, elem_type)
+            })
+            .collect();
+        return Ok(bx.context.new_rvalue_from_vector(bx.location, llret_ty, &values));
+    }
+
+    if name == sym::simd_with_exposed_provenance {
+        require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
+        let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
+
+        require!(
+            in_len == out_len,
+            InvalidMonomorphization::ReturnLengthInputType {
+                span,
+                name,
+                in_len,
+                in_ty,
+                ret_ty,
+                out_len
+            }
+        );
+
+        match *in_elem.kind() {
+            ty::Uint(ty::UintTy::Usize) => {}
+            _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: in_elem }),
+        }
+        match *out_elem.kind() {
+            ty::RawPtr(_, _) => {}
+            _ => {
+                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
+            }
+        }
+
+        let arg = args[0].immediate();
+        let elem_type = llret_ty.dyncast_vector().expect("vector return type").get_element_type();
+        let values: Vec<_> = (0..in_len)
+            .map(|i| {
+                let idx = bx.gcc_int(bx.usize_type, i as _);
+                let value = bx.extract_element(arg, idx);
+                bx.inttoptr(value, elem_type)
+            })
+            .collect();
+        return Ok(bx.context.new_rvalue_from_vector(bx.location, llret_ty, &values));
+    }
+
+    #[cfg(feature = "master")]
+    if name == sym::simd_cast || name == sym::simd_as {
+        require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
+        let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
+        require!(
+            in_len == out_len,
+            InvalidMonomorphization::ReturnLengthInputType {
+                span,
+                name,
+                in_len,
+                in_ty,
+                ret_ty,
+                out_len
+            }
+        );
+        // casting cares about nominal type, not just structural type
+        if in_elem == out_elem {
+            return Ok(args[0].immediate());
+        }
+
+        enum Style {
+            Float,
+            Int,
+            Unsupported,
+        }
+
+        let in_style = match *in_elem.kind() {
+            ty::Int(_) | ty::Uint(_) => Style::Int,
+            ty::Float(_) => Style::Float,
+            _ => Style::Unsupported,
+        };
+
+        let out_style = match *out_elem.kind() {
+            ty::Int(_) | ty::Uint(_) => Style::Int,
+            ty::Float(_) => Style::Float,
+            _ => Style::Unsupported,
+        };
+
+        match (in_style, out_style) {
+            (Style::Unsupported, Style::Unsupported) => {
+                require!(
+                    false,
+                    InvalidMonomorphization::UnsupportedCast {
+                        span,
+                        name,
+                        in_ty,
+                        in_elem,
+                        ret_ty,
+                        out_elem
+                    }
+                );
+            }
+            _ => return Ok(bx.context.convert_vector(None, args[0].immediate(), llret_ty)),
+        }
+    }
+
+    macro_rules! arith_binary {
+        ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
+            $(if name == sym::$name {
+                match *in_elem.kind() {
+                    $($(ty::$p(_))|* => {
+                        return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
+                    })*
+                    _ => {},
+                }
+                return_error!(InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem })
+            })*
+        }
+    }
+
+    if name == sym::simd_bitmask {
+        // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
+        // vector mask and returns the most significant bit (MSB) of each lane in the form
+        // of either:
+        // * an unsigned integer
+        // * an array of `u8`
+        // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits.
+        //
+        // The bit order of the result depends on the byte endianness, LSB-first for little
+        // endian and MSB-first for big endian.
+
+        let vector = args[0].immediate();
+        // TODO(antoyo): dyncast_vector should not require a call to unqualified.
+        let vector_type = vector.get_type().unqualified().dyncast_vector().expect("vector type");
+        let elem_type = vector_type.get_element_type();
+
+        let expected_int_bits = in_len.max(8);
+        let expected_bytes = expected_int_bits / 8 + ((expected_int_bits % 8 > 0) as u64);
+
+        // FIXME(antoyo): that's not going to work for masks bigger than 128 bits.
+        let result_type = bx.type_ix(expected_int_bits);
+        let mut result = bx.context.new_rvalue_zero(result_type);
+
+        let elem_size = elem_type.get_size() * 8;
+        let sign_shift = bx.context.new_rvalue_from_int(elem_type, elem_size as i32 - 1);
+        let one = bx.context.new_rvalue_one(elem_type);
+
+        for i in 0..in_len {
+            let elem =
+                bx.extract_element(vector, bx.context.new_rvalue_from_int(bx.int_type, i as i32));
+            let shifted = elem >> sign_shift;
+            let masked = shifted & one;
+            result = result
+                | (bx.context.new_cast(None, masked, result_type)
+                    << bx.context.new_rvalue_from_int(result_type, i as i32));
+        }
+
+        match *ret_ty.kind() {
+            ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {
+                // Zero-extend iN to the bitmask type:
+                return Ok(result);
+            }
+            ty::Array(elem, len)
+                if matches!(*elem.kind(), ty::Uint(ty::UintTy::U8))
+                    && len
+                        .try_to_target_usize(bx.tcx)
+                        .expect("expected monomorphic const in codegen")
+                        == expected_bytes =>
+            {
+                // Zero-extend iN to the array length:
+                let ze = bx.zext(result, bx.type_ix(expected_bytes * 8));
+
+                // Convert the integer to a byte array
+                let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
+                bx.store(ze, ptr, Align::ONE);
+                let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
+                let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty));
+                return Ok(bx.load(array_ty, ptr, Align::ONE));
+            }
+            _ => return_error!(InvalidMonomorphization::CannotReturn {
+                span,
+                name,
+                ret_ty,
+                expected_int_bits,
+                expected_bytes
+            }),
+        }
+    }
+
+    fn simd_simple_float_intrinsic<'gcc, 'tcx>(
+        name: Symbol,
+        in_elem: Ty<'_>,
+        in_ty: Ty<'_>,
+        in_len: u64,
+        bx: &mut Builder<'_, 'gcc, 'tcx>,
+        span: Span,
+        args: &[OperandRef<'tcx, RValue<'gcc>>],
+    ) -> Result<RValue<'gcc>, ()> {
+        macro_rules! return_error {
+            ($err:expr) => {{
+                bx.tcx.dcx().emit_err($err);
+                return Err(());
+            }};
+        }
+        let (elem_ty_str, elem_ty, cast_type) = if let ty::Float(ref f) = *in_elem.kind() {
+            let elem_ty = bx.cx.type_float_from_ty(*f);
+            match f.bit_width() {
+                16 => ("", elem_ty, Some(bx.cx.double_type)),
+                32 => ("f", elem_ty, None),
+                64 => ("", elem_ty, None),
+                _ => {
+                    return_error!(InvalidMonomorphization::FloatingPointVector {
+                        span,
+                        name,
+                        f_ty: *f,
+                        in_ty
+                    });
+                }
+            }
+        } else {
+            return_error!(InvalidMonomorphization::FloatingPointType { span, name, in_ty });
+        };
+
+        let vec_ty = bx.cx.type_vector(elem_ty, in_len);
+
+        let intr_name = match name {
+            sym::simd_ceil => "ceil",
+            sym::simd_fabs => "fabs", // TODO(antoyo): pand with 170141183420855150465331762880109871103
+            sym::simd_fcos => "cos",
+            sym::simd_fexp2 => "exp2",
+            sym::simd_fexp => "exp",
+            sym::simd_flog10 => "log10",
+            sym::simd_flog2 => "log2",
+            sym::simd_flog => "log",
+            sym::simd_floor => "floor",
+            sym::simd_fma => "fma",
+            sym::simd_relaxed_fma => "fma", // FIXME: this should relax to non-fused multiply-add when necessary
+            sym::simd_fsin => "sin",
+            sym::simd_fsqrt => "sqrt",
+            sym::simd_round => "round",
+            sym::simd_trunc => "trunc",
+            _ => return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }),
+        };
+        let builtin_name = format!("{}{}", intr_name, elem_ty_str);
+        let function = bx.context.get_builtin_function(builtin_name);
+
+        // TODO(antoyo): add platform-specific behavior here for architectures that have these
+        // intrinsics as instructions (for instance, gpus)
+        let mut vector_elements = vec![];
+        for i in 0..in_len {
+            let index = bx.context.new_rvalue_from_long(bx.ulong_type, i as i64);
+            let mut arguments = vec![];
+            for arg in args {
+                let mut element = bx.extract_element(arg.immediate(), index).to_rvalue();
+                // FIXME: it would probably be better to not have casts here and use the proper
+                // instructions.
+                if let Some(typ) = cast_type {
+                    element = bx.context.new_cast(None, element, typ);
+                }
+                arguments.push(element);
+            }
+            let mut result = bx.context.new_call(None, function, &arguments);
+            if cast_type.is_some() {
+                result = bx.context.new_cast(None, result, elem_ty);
+            }
+            vector_elements.push(result);
+        }
+        let c = bx.context.new_rvalue_from_vector(None, vec_ty, &vector_elements);
+        Ok(c)
+    }
+
+    if std::matches!(
+        name,
+        sym::simd_ceil
+            | sym::simd_fabs
+            | sym::simd_fcos
+            | sym::simd_fexp2
+            | sym::simd_fexp
+            | sym::simd_flog10
+            | sym::simd_flog2
+            | sym::simd_flog
+            | sym::simd_floor
+            | sym::simd_fma
+            | sym::simd_relaxed_fma
+            | sym::simd_fsin
+            | sym::simd_fsqrt
+            | sym::simd_round
+            | sym::simd_trunc
+    ) {
+        return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
+    }
+
+    #[cfg(feature = "master")]
+    fn vector_ty<'gcc, 'tcx>(
+        cx: &CodegenCx<'gcc, 'tcx>,
+        elem_ty: Ty<'tcx>,
+        vec_len: u64,
+    ) -> Type<'gcc> {
+        // FIXME: use cx.layout_of(ty).llvm_type() ?
+        let elem_ty = match *elem_ty.kind() {
+            ty::Int(v) => cx.type_int_from_ty(v),
+            ty::Uint(v) => cx.type_uint_from_ty(v),
+            ty::Float(v) => cx.type_float_from_ty(v),
+            _ => unreachable!(),
+        };
+        cx.type_vector(elem_ty, vec_len)
+    }
+
+    #[cfg(feature = "master")]
+    fn gather<'a, 'gcc, 'tcx>(
+        default: RValue<'gcc>,
+        pointers: RValue<'gcc>,
+        mask: RValue<'gcc>,
+        bx: &mut Builder<'a, 'gcc, 'tcx>,
+        in_len: u64,
+        invert: bool,
+    ) -> RValue<'gcc> {
+        let vector_type = default.get_type();
+        let elem_type =
+            vector_type.unqualified().dyncast_vector().expect("vector type").get_element_type();
+
+        let mut values = Vec::with_capacity(in_len as usize);
+        for i in 0..in_len {
+            let index = bx.context.new_rvalue_from_long(bx.i32_type, i as i64);
+            let int = bx.context.new_vector_access(None, pointers, index).to_rvalue();
+
+            let ptr_type = elem_type.make_pointer();
+            let ptr = bx.context.new_bitcast(None, int, ptr_type);
+            let value = ptr.dereference(None).to_rvalue();
+            values.push(value);
+        }
+
+        let vector = bx.context.new_rvalue_from_vector(None, vector_type, &values);
+
+        let mut mask_types = Vec::with_capacity(in_len as usize);
+        let mut mask_values = Vec::with_capacity(in_len as usize);
+        for i in 0..in_len {
+            let index = bx.context.new_rvalue_from_long(bx.i32_type, i as i64);
+            mask_types.push(bx.context.new_field(None, bx.i32_type, "m"));
+            let mask_value = bx.context.new_vector_access(None, mask, index).to_rvalue();
+            let mask_value_cast = bx.context.new_cast(None, mask_value, bx.i32_type);
+            let masked =
+                bx.context.new_rvalue_from_int(bx.i32_type, in_len as i32) & mask_value_cast;
+            let value = index + masked;
+            mask_values.push(value);
+        }
+        let mask_type = bx.context.new_struct_type(None, "mask_type", &mask_types);
+        let mask = bx.context.new_struct_constructor(None, mask_type.as_type(), None, &mask_values);
+
+        if invert {
+            bx.shuffle_vector(vector, default, mask)
+        } else {
+            bx.shuffle_vector(default, vector, mask)
+        }
+    }
+
+    #[cfg(feature = "master")]
+    if name == sym::simd_gather {
+        // simd_gather(values: <N x T>, pointers: <N x *_ T>,
+        //             mask: <N x i{M}>) -> <N x T>
+        // * N: number of elements in the input vectors
+        // * T: type of the element to load
+        // * M: any integer width is supported, will be truncated to i1
+
+        // All types must be simd vector types
+        require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty });
+        require_simd!(
+            arg_tys[1],
+            InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] }
+        );
+        require_simd!(
+            arg_tys[2],
+            InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] }
+        );
+        require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
+
+        // Of the same length:
+        let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
+        let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
+        require!(
+            in_len == out_len,
+            InvalidMonomorphization::SecondArgumentLength {
+                span,
+                name,
+                in_len,
+                in_ty,
+                arg_ty: arg_tys[1],
+                out_len
+            }
+        );
+        require!(
+            in_len == out_len2,
+            InvalidMonomorphization::ThirdArgumentLength {
+                span,
+                name,
+                in_len,
+                in_ty,
+                arg_ty: arg_tys[2],
+                out_len: out_len2
+            }
+        );
+
+        // The return type must match the first argument type
+        require!(
+            ret_ty == in_ty,
+            InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
+        );
+
+        // This counts how many pointers
+        fn ptr_count(t: Ty<'_>) -> usize {
+            match *t.kind() {
+                ty::RawPtr(p_ty, _) => 1 + ptr_count(p_ty),
+                _ => 0,
+            }
+        }
+
+        // Non-ptr type
+        fn non_ptr(t: Ty<'_>) -> Ty<'_> {
+            match *t.kind() {
+                ty::RawPtr(p_ty, _) => non_ptr(p_ty),
+                _ => t,
+            }
+        }
+
+        // The second argument must be a simd vector with an element type that's a pointer
+        // to the element type of the first argument
+        let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
+        let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
+        let (pointer_count, underlying_ty) = match *element_ty1.kind() {
+            ty::RawPtr(p_ty, _) if p_ty == in_elem => {
+                (ptr_count(element_ty1), non_ptr(element_ty1))
+            }
+            _ => {
+                require!(
+                    false,
+                    InvalidMonomorphization::ExpectedElementType {
+                        span,
+                        name,
+                        expected_element: element_ty1,
+                        second_arg: arg_tys[1],
+                        in_elem,
+                        in_ty,
+                        mutability: ExpectedPointerMutability::Not,
+                    }
+                );
+                unreachable!();
+            }
+        };
+        assert!(pointer_count > 0);
+        assert_eq!(pointer_count - 1, ptr_count(element_ty0));
+        assert_eq!(underlying_ty, non_ptr(element_ty0));
+
+        // The element type of the third argument must be a signed integer type of any width:
+        let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
+        match *element_ty2.kind() {
+            ty::Int(_) => (),
+            _ => {
+                require!(
+                    false,
+                    InvalidMonomorphization::ThirdArgElementType {
+                        span,
+                        name,
+                        expected_element: element_ty2,
+                        third_arg: arg_tys[2]
+                    }
+                );
+            }
+        }
+
+        return Ok(gather(
+            args[0].immediate(),
+            args[1].immediate(),
+            args[2].immediate(),
+            bx,
+            in_len,
+            false,
+        ));
+    }
+
+    #[cfg(feature = "master")]
+    if name == sym::simd_scatter {
+        // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
+        //             mask: <N x i{M}>) -> ()
+        // * N: number of elements in the input vectors
+        // * T: type of the element to load
+        // * M: any integer width is supported, will be truncated to i1
+
+        // All types must be simd vector types
+        require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty });
+        require_simd!(
+            arg_tys[1],
+            InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] }
+        );
+        require_simd!(
+            arg_tys[2],
+            InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] }
+        );
+
+        // Of the same length:
+        let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx());
+        let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
+        require!(
+            in_len == element_len1,
+            InvalidMonomorphization::SecondArgumentLength {
+                span,
+                name,
+                in_len,
+                in_ty,
+                arg_ty: arg_tys[1],
+                out_len: element_len1
+            }
+        );
+        require!(
+            in_len == element_len2,
+            InvalidMonomorphization::ThirdArgumentLength {
+                span,
+                name,
+                in_len,
+                in_ty,
+                arg_ty: arg_tys[2],
+                out_len: element_len2
+            }
+        );
+
+        // This counts how many pointers
+        fn ptr_count(t: Ty<'_>) -> usize {
+            match *t.kind() {
+                ty::RawPtr(p_ty, _) => 1 + ptr_count(p_ty),
+                _ => 0,
+            }
+        }
+
+        // Non-ptr type
+        fn non_ptr(t: Ty<'_>) -> Ty<'_> {
+            match *t.kind() {
+                ty::RawPtr(p_ty, _) => non_ptr(p_ty),
+                _ => t,
+            }
+        }
+
+        // The second argument must be a simd vector with an element type that's a pointer
+        // to the element type of the first argument
+        let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
+        let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
+        let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
+        let (pointer_count, underlying_ty) = match *element_ty1.kind() {
+            ty::RawPtr(p_ty, mutbl) if p_ty == in_elem && mutbl == hir::Mutability::Mut => {
+                (ptr_count(element_ty1), non_ptr(element_ty1))
+            }
+            _ => {
+                require!(
+                    false,
+                    InvalidMonomorphization::ExpectedElementType {
+                        span,
+                        name,
+                        expected_element: element_ty1,
+                        second_arg: arg_tys[1],
+                        in_elem,
+                        in_ty,
+                        mutability: ExpectedPointerMutability::Mut,
+                    }
+                );
+                unreachable!();
+            }
+        };
+        assert!(pointer_count > 0);
+        assert_eq!(pointer_count - 1, ptr_count(element_ty0));
+        assert_eq!(underlying_ty, non_ptr(element_ty0));
+
+        // The element type of the third argument must be a signed integer type of any width:
+        match *element_ty2.kind() {
+            ty::Int(_) => (),
+            _ => {
+                require!(
+                    false,
+                    InvalidMonomorphization::ThirdArgElementType {
+                        span,
+                        name,
+                        expected_element: element_ty2,
+                        third_arg: arg_tys[2]
+                    }
+                );
+            }
+        }
+
+        let result =
+            gather(args[0].immediate(), args[1].immediate(), args[2].immediate(), bx, in_len, true);
+
+        let pointers = args[1].immediate();
+
+        let vector_type = if pointer_count > 1 {
+            bx.context.new_vector_type(bx.usize_type, in_len)
+        } else {
+            vector_ty(bx, underlying_ty, in_len)
+        };
+        let elem_type = vector_type.dyncast_vector().expect("vector type").get_element_type();
+
+        for i in 0..in_len {
+            let index = bx.context.new_rvalue_from_int(bx.int_type, i as i32);
+            let value = bx.context.new_vector_access(None, result, index);
+
+            let int = bx.context.new_vector_access(None, pointers, index).to_rvalue();
+            let ptr_type = elem_type.make_pointer();
+            let ptr = bx.context.new_bitcast(None, int, ptr_type);
+            bx.llbb().add_assignment(None, ptr.dereference(None), value);
+        }
+
+        return Ok(bx.context.new_rvalue_zero(bx.i32_type));
+    }
+
+    arith_binary! {
+        simd_add: Uint, Int => add, Float => fadd;
+        simd_sub: Uint, Int => sub, Float => fsub;
+        simd_mul: Uint, Int => mul, Float => fmul;
+        simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
+        simd_rem: Uint => urem, Int => srem, Float => frem;
+        simd_shl: Uint, Int => shl;
+        simd_shr: Uint => lshr, Int => ashr;
+        simd_and: Uint, Int => and;
+        simd_or: Uint, Int => or; // FIXME(antoyo): calling `or` might not work on vectors.
+        simd_xor: Uint, Int => xor;
+        simd_fmin: Float => vector_fmin;
+        simd_fmax: Float => vector_fmax;
+    }
+
+    macro_rules! arith_unary {
+        ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
+            $(if name == sym::$name {
+                match *in_elem.kind() {
+                    $($(ty::$p(_))|* => {
+                        return Ok(bx.$call(args[0].immediate()))
+                    })*
+                    _ => {},
+                }
+                return_error!(InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem })
+            })*
+        }
+    }
+
+    arith_unary! {
+        simd_neg: Int => neg, Float => fneg;
+    }
+
+    #[cfg(feature = "master")]
+    if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
+        let lhs = args[0].immediate();
+        let rhs = args[1].immediate();
+        let is_add = name == sym::simd_saturating_add;
+        let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
+        let (signed, elem_width, elem_ty) = match *in_elem.kind() {
+            ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_int_from_ty(i)),
+            ty::Uint(i) => {
+                (false, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_uint_from_ty(i))
+            }
+            _ => {
+                return_error!(InvalidMonomorphization::ExpectedVectorElementType {
+                    span,
+                    name,
+                    expected_element: arg_tys[0].simd_size_and_type(bx.tcx()).1,
+                    vector_type: arg_tys[0],
+                });
+            }
+        };
+
+        let result = match (signed, is_add) {
+            (false, true) => {
+                let res = lhs + rhs;
+                let cmp = bx.context.new_comparison(None, ComparisonOp::LessThan, res, lhs);
+                res | cmp
+            }
+            (true, true) => {
+                // Algorithm from: https://codereview.stackexchange.com/questions/115869/saturated-signed-addition
+                // TODO(antoyo): improve using conditional operators if possible.
+                // TODO(antoyo): dyncast_vector should not require a call to unqualified.
+                let arg_type = lhs.get_type().unqualified();
+                // TODO(antoyo): convert lhs and rhs to unsigned.
+                let sum = lhs + rhs;
+                let vector_type = arg_type.dyncast_vector().expect("vector type");
+                let unit = vector_type.get_num_units();
+                let a = bx.context.new_rvalue_from_int(elem_ty, ((elem_width as i32) << 3) - 1);
+                let width = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![a; unit]);
+
+                let xor1 = lhs ^ rhs;
+                let xor2 = lhs ^ sum;
+                let and =
+                    bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, xor1) & xor2;
+                let mask = and >> width;
+
+                let one = bx.context.new_rvalue_one(elem_ty);
+                let ones =
+                    bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![one; unit]);
+                let shift1 = ones << width;
+                let shift2 = sum >> width;
+                let mask_min = shift1 ^ shift2;
+
+                let and1 =
+                    bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, mask) & sum;
+                let and2 = mask & mask_min;
+
+                and1 + and2
+            }
+            (false, false) => {
+                let res = lhs - rhs;
+                let cmp = bx.context.new_comparison(None, ComparisonOp::LessThanEquals, res, lhs);
+                res & cmp
+            }
+            (true, false) => {
+                // TODO(antoyo): dyncast_vector should not require a call to unqualified.
+                let arg_type = lhs.get_type().unqualified();
+                // TODO(antoyo): this uses the same algorithm from saturating add, but add the
+                // negative of the right operand. Find a proper subtraction algorithm.
+                let rhs = bx.context.new_unary_op(None, UnaryOp::Minus, arg_type, rhs);
+
+                // TODO(antoyo): convert lhs and rhs to unsigned.
+                let sum = lhs + rhs;
+                let vector_type = arg_type.dyncast_vector().expect("vector type");
+                let unit = vector_type.get_num_units();
+                let a = bx.context.new_rvalue_from_int(elem_ty, ((elem_width as i32) << 3) - 1);
+                let width = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![a; unit]);
+
+                let xor1 = lhs ^ rhs;
+                let xor2 = lhs ^ sum;
+                let and =
+                    bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, xor1) & xor2;
+                let mask = and >> width;
+
+                let one = bx.context.new_rvalue_one(elem_ty);
+                let ones =
+                    bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![one; unit]);
+                let shift1 = ones << width;
+                let shift2 = sum >> width;
+                let mask_min = shift1 ^ shift2;
+
+                let and1 =
+                    bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, mask) & sum;
+                let and2 = mask & mask_min;
+
+                and1 + and2
+            }
+        };
+
+        return Ok(result);
+    }
+
+    macro_rules! arith_red {
+        ($name:ident : $vec_op:expr, $float_reduce:ident, $ordered:expr, $op:ident,
+         $identity:expr) => {
+            if name == sym::$name {
+                require!(
+                    ret_ty == in_elem,
+                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
+                );
+                return match *in_elem.kind() {
+                    ty::Int(_) | ty::Uint(_) => {
+                        let r = bx.vector_reduce_op(args[0].immediate(), $vec_op);
+                        if $ordered {
+                            // if overflow occurs, the result is the
+                            // mathematical result modulo 2^n:
+                            Ok(bx.$op(args[1].immediate(), r))
+                        } else {
+                            Ok(bx.vector_reduce_op(args[0].immediate(), $vec_op))
+                        }
+                    }
+                    ty::Float(_) => {
+                        if $ordered {
+                            // ordered arithmetic reductions take an accumulator
+                            let acc = args[1].immediate();
+                            Ok(bx.$float_reduce(acc, args[0].immediate()))
+                        } else {
+                            Ok(bx.vector_reduce_op(args[0].immediate(), $vec_op))
+                        }
+                    }
+                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
+                        span,
+                        name,
+                        symbol: sym::$name,
+                        in_ty,
+                        in_elem,
+                        ret_ty
+                    }),
+                };
+            }
+        };
+    }
+
+    arith_red!(
+        simd_reduce_add_unordered: BinaryOp::Plus,
+        vector_reduce_fadd_reassoc,
+        false,
+        add,
+        0.0 // TODO: Use this argument.
+    );
+    arith_red!(
+        simd_reduce_mul_unordered: BinaryOp::Mult,
+        vector_reduce_fmul_reassoc,
+        false,
+        mul,
+        1.0
+    );
+    arith_red!(
+        simd_reduce_add_ordered: BinaryOp::Plus,
+        vector_reduce_fadd,
+        true,
+        add,
+        0.0
+    );
+    arith_red!(
+        simd_reduce_mul_ordered: BinaryOp::Mult,
+        vector_reduce_fmul,
+        true,
+        mul,
+        1.0
+    );
+
+    macro_rules! minmax_red {
+        ($name:ident: $int_red:ident, $float_red:ident) => {
+            if name == sym::$name {
+                require!(
+                    ret_ty == in_elem,
+                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
+                );
+                return match *in_elem.kind() {
+                    ty::Int(_) | ty::Uint(_) => Ok(bx.$int_red(args[0].immediate())),
+                    ty::Float(_) => Ok(bx.$float_red(args[0].immediate())),
+                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
+                        span,
+                        name,
+                        symbol: sym::$name,
+                        in_ty,
+                        in_elem,
+                        ret_ty
+                    }),
+                };
+            }
+        };
+    }
+
+    minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
+    minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
+
+    macro_rules! bitwise_red {
+        ($name:ident : $op:expr, $boolean:expr) => {
+            if name == sym::$name {
+                let input = if !$boolean {
+                    require!(
+                        ret_ty == in_elem,
+                        InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
+                    );
+                    args[0].immediate()
+                } else {
+                    match *in_elem.kind() {
+                        ty::Int(_) | ty::Uint(_) => {}
+                        _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
+                            span,
+                            name,
+                            symbol: sym::$name,
+                            in_ty,
+                            in_elem,
+                            ret_ty
+                        }),
+                    }
+
+                    args[0].immediate()
+                };
+                return match *in_elem.kind() {
+                    ty::Int(_) | ty::Uint(_) => {
+                        let r = bx.vector_reduce_op(input, $op);
+                        Ok(if !$boolean {
+                            r
+                        } else {
+                            bx.icmp(
+                                IntPredicate::IntNE,
+                                r,
+                                bx.context.new_rvalue_zero(r.get_type()),
+                            )
+                        })
+                    }
+                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
+                        span,
+                        name,
+                        symbol: sym::$name,
+                        in_ty,
+                        in_elem,
+                        ret_ty
+                    }),
+                };
+            }
+        };
+    }
+
+    bitwise_red!(simd_reduce_and: BinaryOp::BitwiseAnd, false);
+    bitwise_red!(simd_reduce_or: BinaryOp::BitwiseOr, false);
+    bitwise_red!(simd_reduce_xor: BinaryOp::BitwiseXor, false);
+    bitwise_red!(simd_reduce_all: BinaryOp::BitwiseAnd, true);
+    bitwise_red!(simd_reduce_any: BinaryOp::BitwiseOr, true);
+
+    unimplemented!("simd {}", name);
+}
diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs
new file mode 100644
index 00000000000..624fdb4043c
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/lib.rs
@@ -0,0 +1,527 @@
+/*
+ * TODO(antoyo): implement equality in libgccjit based on https://zpz.github.io/blog/overloading-equality-operator-in-cpp-class-hierarchy/ (for type equality?)
+ * TODO(antoyo): support #[inline] attributes.
+ * TODO(antoyo): support LTO (gcc's equivalent to Full LTO is -flto -flto-partition=one — https://documentation.suse.com/sbp/all/html/SBP-GCC-10/index.html).
+ * For Thin LTO, this might be helpful:
+ * In gcc 4.6 -fwhopr was removed and became default with -flto. The non-whopr path can still be executed via -flto-partition=none.
+ * Or the new incremental LTO (https://www.phoronix.com/news/GCC-Incremental-LTO-Patches)?
+ *
+ * Maybe some missing optizations enabled by rustc's LTO is in there: https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html
+ * Like -fipa-icf (should be already enabled) and maybe -fdevirtualize-at-ltrans.
+ * TODO: disable debug info always being emitted. Perhaps this slows down things?
+ *
+ * TODO(antoyo): remove the patches.
+ */
+
+#![allow(internal_features)]
+#![doc(rust_logo)]
+#![feature(rustdoc_internals)]
+#![feature(rustc_private, decl_macro, never_type, trusted_len, let_chains)]
+#![allow(broken_intra_doc_links)]
+#![recursion_limit = "256"]
+#![warn(rust_2018_idioms)]
+#![warn(unused_lifetimes)]
+#![deny(clippy::pattern_type_mismatch)]
+#![allow(clippy::needless_lifetimes)]
+
+// Some "regular" crates we want to share with rustc
+extern crate object;
+extern crate smallvec;
+// FIXME(antoyo): clippy bug: remove the #[allow] when it's fixed.
+#[allow(unused_extern_crates)]
+extern crate tempfile;
+#[macro_use]
+extern crate tracing;
+
+// The rustc crates we need
+extern crate rustc_abi;
+extern crate rustc_apfloat;
+extern crate rustc_ast;
+extern crate rustc_attr_parsing;
+extern crate rustc_codegen_ssa;
+extern crate rustc_data_structures;
+extern crate rustc_errors;
+extern crate rustc_fluent_macro;
+extern crate rustc_fs_util;
+extern crate rustc_hir;
+extern crate rustc_index;
+#[cfg(feature = "master")]
+extern crate rustc_interface;
+extern crate rustc_macros;
+extern crate rustc_metadata;
+extern crate rustc_middle;
+extern crate rustc_session;
+extern crate rustc_span;
+extern crate rustc_symbol_mangling;
+extern crate rustc_target;
+
+// This prevents duplicating functions and statics that are already part of the host rustc process.
+#[allow(unused_extern_crates)]
+extern crate rustc_driver;
+
+mod abi;
+mod allocator;
+mod asm;
+mod attributes;
+mod back;
+mod base;
+mod builder;
+mod callee;
+mod common;
+mod consts;
+mod context;
+mod coverageinfo;
+mod debuginfo;
+mod declare;
+mod errors;
+mod gcc_util;
+mod int;
+mod intrinsic;
+mod mono_item;
+mod type_;
+mod type_of;
+
+use std::any::Any;
+use std::fmt::Debug;
+use std::ops::Deref;
+#[cfg(not(feature = "master"))]
+use std::sync::atomic::AtomicBool;
+#[cfg(not(feature = "master"))]
+use std::sync::atomic::Ordering;
+use std::sync::{Arc, Mutex};
+
+use back::lto::{ThinBuffer, ThinData};
+use gccjit::{CType, Context, OptimizationLevel};
+#[cfg(feature = "master")]
+use gccjit::{TargetInfo, Version};
+use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_ast::expand::autodiff_attrs::AutoDiffItem;
+use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
+use rustc_codegen_ssa::back::write::{
+    CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryFn,
+};
+use rustc_codegen_ssa::base::codegen_crate;
+use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, WriteBackendMethods};
+use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen};
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_data_structures::sync::IntoDynSyncSend;
+use rustc_errors::DiagCtxtHandle;
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::ty::TyCtxt;
+use rustc_middle::util::Providers;
+use rustc_session::Session;
+use rustc_session::config::{OptLevel, OutputFilenames};
+use rustc_span::Symbol;
+use rustc_span::fatal_error::FatalError;
+use rustc_target::spec::RelocModel;
+use tempfile::TempDir;
+
+use crate::back::lto::ModuleBuffer;
+use crate::gcc_util::target_cpu;
+
+rustc_fluent_macro::fluent_messages! { "../messages.ftl" }
+
+pub struct PrintOnPanic<F: Fn() -> String>(pub F);
+
+impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
+    fn drop(&mut self) {
+        if ::std::thread::panicking() {
+            println!("{}", (self.0)());
+        }
+    }
+}
+
+#[cfg(not(feature = "master"))]
+#[derive(Debug)]
+pub struct TargetInfo {
+    supports_128bit_integers: AtomicBool,
+}
+
+#[cfg(not(feature = "master"))]
+impl TargetInfo {
+    fn cpu_supports(&self, _feature: &str) -> bool {
+        false
+    }
+
+    fn supports_target_dependent_type(&self, typ: CType) -> bool {
+        match typ {
+            CType::UInt128t | CType::Int128t => {
+                if self.supports_128bit_integers.load(Ordering::SeqCst) {
+                    return true;
+                }
+            }
+            _ => (),
+        }
+        false
+    }
+}
+
+#[derive(Clone)]
+pub struct LockedTargetInfo {
+    info: Arc<Mutex<IntoDynSyncSend<TargetInfo>>>,
+}
+
+impl Debug for LockedTargetInfo {
+    fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        self.info.lock().expect("lock").fmt(formatter)
+    }
+}
+
+impl LockedTargetInfo {
+    fn cpu_supports(&self, feature: &str) -> bool {
+        self.info.lock().expect("lock").cpu_supports(feature)
+    }
+
+    fn supports_target_dependent_type(&self, typ: CType) -> bool {
+        self.info.lock().expect("lock").supports_target_dependent_type(typ)
+    }
+}
+
+#[derive(Clone)]
+pub struct GccCodegenBackend {
+    target_info: LockedTargetInfo,
+}
+
+impl CodegenBackend for GccCodegenBackend {
+    fn locale_resource(&self) -> &'static str {
+        crate::DEFAULT_LOCALE_RESOURCE
+    }
+
+    fn init(&self, _sess: &Session) {
+        #[cfg(feature = "master")]
+        {
+            let target_cpu = target_cpu(_sess);
+
+            // Get the second TargetInfo with the correct CPU features by setting the arch.
+            let context = Context::default();
+            if target_cpu != "generic" {
+                context.add_command_line_option(format!("-march={}", target_cpu));
+            }
+
+            **self.target_info.info.lock().expect("lock") = context.get_target_info();
+        }
+
+        #[cfg(feature = "master")]
+        gccjit::set_global_personality_function_name(b"rust_eh_personality\0");
+
+        #[cfg(not(feature = "master"))]
+        {
+            let temp_dir = TempDir::new().expect("cannot create temporary directory");
+            let temp_file = temp_dir.into_path().join("result.asm");
+            let check_context = Context::default();
+            check_context.set_print_errors_to_stderr(false);
+            let _int128_ty = check_context.new_c_type(CType::UInt128t);
+            // NOTE: we cannot just call compile() as this would require other files than libgccjit.so.
+            check_context.compile_to_file(
+                gccjit::OutputKind::Assembler,
+                temp_file.to_str().expect("path to str"),
+            );
+            self.target_info
+                .info
+                .lock()
+                .expect("lock")
+                .supports_128bit_integers
+                .store(check_context.get_last_error() == Ok(None), Ordering::SeqCst);
+        }
+    }
+
+    fn provide(&self, providers: &mut Providers) {
+        providers.global_backend_features = |tcx, ()| gcc_util::global_gcc_features(tcx.sess, true)
+    }
+
+    fn codegen_crate(
+        &self,
+        tcx: TyCtxt<'_>,
+        metadata: EncodedMetadata,
+        need_metadata_module: bool,
+    ) -> Box<dyn Any> {
+        let target_cpu = target_cpu(tcx.sess);
+        let res = codegen_crate(
+            self.clone(),
+            tcx,
+            target_cpu.to_string(),
+            metadata,
+            need_metadata_module,
+        );
+
+        Box::new(res)
+    }
+
+    fn join_codegen(
+        &self,
+        ongoing_codegen: Box<dyn Any>,
+        sess: &Session,
+        _outputs: &OutputFilenames,
+    ) -> (CodegenResults, FxIndexMap<WorkProductId, WorkProduct>) {
+        ongoing_codegen
+            .downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<GccCodegenBackend>>()
+            .expect("Expected GccCodegenBackend's OngoingCodegen, found Box<Any>")
+            .join(sess)
+    }
+
+    fn target_features_cfg(&self, sess: &Session) -> (Vec<Symbol>, Vec<Symbol>) {
+        target_features_cfg(sess, &self.target_info)
+    }
+}
+
+fn new_context<'gcc, 'tcx>(tcx: TyCtxt<'tcx>) -> Context<'gcc> {
+    let context = Context::default();
+    if tcx.sess.target.arch == "x86" || tcx.sess.target.arch == "x86_64" {
+        context.add_command_line_option("-masm=intel");
+    }
+    #[cfg(feature = "master")]
+    {
+        context.set_special_chars_allowed_in_func_names("$.*");
+        let version = Version::get();
+        let version = format!("{}.{}.{}", version.major, version.minor, version.patch);
+        context.set_output_ident(&format!(
+            "rustc version {} with libgccjit {}",
+            rustc_interface::util::rustc_version_str().unwrap_or("unknown version"),
+            version,
+        ));
+    }
+    // TODO(antoyo): check if this should only be added when using -Cforce-unwind-tables=n.
+    context.add_command_line_option("-fno-asynchronous-unwind-tables");
+    context
+}
+
+impl ExtraBackendMethods for GccCodegenBackend {
+    fn codegen_allocator(
+        &self,
+        tcx: TyCtxt<'_>,
+        module_name: &str,
+        kind: AllocatorKind,
+        alloc_error_handler_kind: AllocatorKind,
+    ) -> Self::Module {
+        let mut mods = GccContext {
+            context: Arc::new(SyncContext::new(new_context(tcx))),
+            relocation_model: tcx.sess.relocation_model(),
+            should_combine_object_files: false,
+            temp_dir: None,
+        };
+
+        unsafe {
+            allocator::codegen(tcx, &mut mods, module_name, kind, alloc_error_handler_kind);
+        }
+        mods
+    }
+
+    fn compile_codegen_unit(
+        &self,
+        tcx: TyCtxt<'_>,
+        cgu_name: Symbol,
+    ) -> (ModuleCodegen<Self::Module>, u64) {
+        base::compile_codegen_unit(tcx, cgu_name, self.target_info.clone())
+    }
+
+    fn target_machine_factory(
+        &self,
+        _sess: &Session,
+        _opt_level: OptLevel,
+        _features: &[String],
+    ) -> TargetMachineFactoryFn<Self> {
+        // TODO(antoyo): set opt level.
+        Arc::new(|_| Ok(()))
+    }
+}
+
+pub struct GccContext {
+    context: Arc<SyncContext>,
+    /// This field is needed in order to be able to set the flag -fPIC when necessary when doing
+    /// LTO.
+    relocation_model: RelocModel,
+    should_combine_object_files: bool,
+    // Temporary directory used by LTO. We keep it here so that it's not removed before linking.
+    temp_dir: Option<TempDir>,
+}
+
+struct SyncContext {
+    context: Context<'static>,
+}
+
+impl SyncContext {
+    fn new(context: Context<'static>) -> Self {
+        Self { context }
+    }
+}
+
+impl Deref for SyncContext {
+    type Target = Context<'static>;
+
+    fn deref(&self) -> &Self::Target {
+        &self.context
+    }
+}
+
+unsafe impl Send for SyncContext {}
+// FIXME(antoyo): that shouldn't be Sync. Parallel compilation is currently disabled with "-Zno-parallel-llvm".
+// TODO: disable it here by returning false in CodegenBackend::supports_parallel().
+unsafe impl Sync for SyncContext {}
+
+impl WriteBackendMethods for GccCodegenBackend {
+    type Module = GccContext;
+    type TargetMachine = ();
+    type TargetMachineError = ();
+    type ModuleBuffer = ModuleBuffer;
+    type ThinData = ThinData;
+    type ThinBuffer = ThinBuffer;
+
+    fn run_fat_lto(
+        cgcx: &CodegenContext<Self>,
+        modules: Vec<FatLtoInput<Self>>,
+        cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+    ) -> Result<LtoModuleCodegen<Self>, FatalError> {
+        back::lto::run_fat(cgcx, modules, cached_modules)
+    }
+
+    fn run_thin_lto(
+        cgcx: &CodegenContext<Self>,
+        modules: Vec<(String, Self::ThinBuffer)>,
+        cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+    ) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
+        back::lto::run_thin(cgcx, modules, cached_modules)
+    }
+
+    fn print_pass_timings(&self) {
+        unimplemented!();
+    }
+
+    fn print_statistics(&self) {
+        unimplemented!()
+    }
+
+    unsafe fn optimize(
+        _cgcx: &CodegenContext<Self>,
+        _dcx: DiagCtxtHandle<'_>,
+        module: &mut ModuleCodegen<Self::Module>,
+        config: &ModuleConfig,
+    ) -> Result<(), FatalError> {
+        module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level));
+        Ok(())
+    }
+
+    fn optimize_fat(
+        _cgcx: &CodegenContext<Self>,
+        _module: &mut ModuleCodegen<Self::Module>,
+    ) -> Result<(), FatalError> {
+        // TODO(antoyo)
+        Ok(())
+    }
+
+    unsafe fn optimize_thin(
+        cgcx: &CodegenContext<Self>,
+        thin: ThinModule<Self>,
+    ) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+        back::lto::optimize_thin_module(thin, cgcx)
+    }
+
+    unsafe fn codegen(
+        cgcx: &CodegenContext<Self>,
+        dcx: DiagCtxtHandle<'_>,
+        module: ModuleCodegen<Self::Module>,
+        config: &ModuleConfig,
+    ) -> Result<CompiledModule, FatalError> {
+        back::write::codegen(cgcx, dcx, module, config)
+    }
+
+    fn prepare_thin(
+        module: ModuleCodegen<Self::Module>,
+        emit_summary: bool,
+    ) -> (String, Self::ThinBuffer) {
+        back::lto::prepare_thin(module, emit_summary)
+    }
+
+    fn serialize_module(_module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) {
+        unimplemented!();
+    }
+
+    fn run_link(
+        cgcx: &CodegenContext<Self>,
+        dcx: DiagCtxtHandle<'_>,
+        modules: Vec<ModuleCodegen<Self::Module>>,
+    ) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+        back::write::link(cgcx, dcx, modules)
+    }
+    fn autodiff(
+        _cgcx: &CodegenContext<Self>,
+        _module: &ModuleCodegen<Self::Module>,
+        _diff_fncs: Vec<AutoDiffItem>,
+        _config: &ModuleConfig,
+    ) -> Result<(), FatalError> {
+        unimplemented!()
+    }
+}
+
+/// This is the entrypoint for a hot plugged rustc_codegen_gccjit
+#[no_mangle]
+pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
+    #[cfg(feature = "master")]
+    let info = {
+        // Check whether the target supports 128-bit integers, and sized floating point types (like
+        // Float16).
+        let context = Context::default();
+        Arc::new(Mutex::new(IntoDynSyncSend(context.get_target_info())))
+    };
+    #[cfg(not(feature = "master"))]
+    let info = Arc::new(Mutex::new(IntoDynSyncSend(TargetInfo {
+        supports_128bit_integers: AtomicBool::new(false),
+    })));
+
+    Box::new(GccCodegenBackend { target_info: LockedTargetInfo { info } })
+}
+
+fn to_gcc_opt_level(optlevel: Option<OptLevel>) -> OptimizationLevel {
+    match optlevel {
+        None => OptimizationLevel::None,
+        Some(level) => match level {
+            OptLevel::No => OptimizationLevel::None,
+            OptLevel::Less => OptimizationLevel::Limited,
+            OptLevel::More => OptimizationLevel::Standard,
+            OptLevel::Aggressive => OptimizationLevel::Aggressive,
+            OptLevel::Size | OptLevel::SizeMin => OptimizationLevel::Limited,
+        },
+    }
+}
+
+/// Returns the features that should be set in `cfg(target_feature)`.
+fn target_features_cfg(
+    sess: &Session,
+    target_info: &LockedTargetInfo,
+) -> (Vec<Symbol>, Vec<Symbol>) {
+    // TODO(antoyo): use global_gcc_features.
+    let f = |allow_unstable| {
+        sess.target
+            .rust_target_features()
+            .iter()
+            .filter_map(|&(feature, gate, _)| {
+                if allow_unstable
+                    || (gate.in_cfg()
+                        && (sess.is_nightly_build() || gate.requires_nightly().is_none()))
+                {
+                    Some(feature)
+                } else {
+                    None
+                }
+            })
+            .filter(|feature| {
+                // TODO: we disable Neon for now since we don't support the LLVM intrinsics for it.
+                if *feature == "neon" {
+                    return false;
+                }
+                target_info.cpu_supports(feature)
+                /*
+                  adx, aes, avx, avx2, avx512bf16, avx512bitalg, avx512bw, avx512cd, avx512dq, avx512er, avx512f, avx512fp16, avx512ifma,
+                  avx512pf, avx512vbmi, avx512vbmi2, avx512vl, avx512vnni, avx512vp2intersect, avx512vpopcntdq,
+                  bmi1, bmi2, cmpxchg16b, ermsb, f16c, fma, fxsr, gfni, lzcnt, movbe, pclmulqdq, popcnt, rdrand, rdseed, rtm,
+                  sha, sse, sse2, sse3, sse4.1, sse4.2, sse4a, ssse3, tbm, vaes, vpclmulqdq, xsave, xsavec, xsaveopt, xsaves
+                */
+            })
+            .map(Symbol::intern)
+            .collect()
+    };
+
+    let target_features = f(false);
+    let unstable_target_features = f(true);
+    (target_features, unstable_target_features)
+}
diff --git a/compiler/rustc_codegen_gcc/src/mono_item.rs b/compiler/rustc_codegen_gcc/src/mono_item.rs
new file mode 100644
index 00000000000..a2df7b2596f
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/mono_item.rs
@@ -0,0 +1,79 @@
+#[cfg(feature = "master")]
+use gccjit::{FnAttribute, VarAttribute};
+use rustc_codegen_ssa::traits::PreDefineCodegenMethods;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_middle::bug;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty::layout::{FnAbiOf, HasTypingEnv, LayoutOf};
+use rustc_middle::ty::{self, Instance, TypeVisitableExt};
+
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+use crate::{attributes, base};
+
+impl<'gcc, 'tcx> PreDefineCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+    #[cfg_attr(not(feature = "master"), allow(unused_variables))]
+    fn predefine_static(
+        &self,
+        def_id: DefId,
+        _linkage: Linkage,
+        visibility: Visibility,
+        symbol_name: &str,
+    ) {
+        let attrs = self.tcx.codegen_fn_attrs(def_id);
+        let instance = Instance::mono(self.tcx, def_id);
+        let DefKind::Static { nested, .. } = self.tcx.def_kind(def_id) else { bug!() };
+        // Nested statics do not have a type, so pick a dummy type and let `codegen_static` figure out
+        // the gcc type from the actual evaluated initializer.
+        let ty =
+            if nested { self.tcx.types.unit } else { instance.ty(self.tcx, self.typing_env()) };
+        let gcc_type = self.layout_of(ty).gcc_type(self);
+
+        let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
+        let global = self.define_global(symbol_name, gcc_type, is_tls, attrs.link_section);
+        #[cfg(feature = "master")]
+        global.add_attribute(VarAttribute::Visibility(base::visibility_to_gcc(visibility)));
+
+        // TODO(antoyo): set linkage.
+        self.instances.borrow_mut().insert(instance, global);
+    }
+
+    #[cfg_attr(not(feature = "master"), allow(unused_variables))]
+    fn predefine_fn(
+        &self,
+        instance: Instance<'tcx>,
+        linkage: Linkage,
+        visibility: Visibility,
+        symbol_name: &str,
+    ) {
+        assert!(!instance.args.has_infer());
+
+        let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
+        self.linkage.set(base::linkage_to_gcc(linkage));
+        let decl = self.declare_fn(symbol_name, fn_abi);
+        //let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
+
+        attributes::from_fn_attrs(self, decl, instance);
+
+        // If we're compiling the compiler-builtins crate, e.g., the equivalent of
+        // compiler-rt, then we want to implicitly compile everything with hidden
+        // visibility as we're going to link this object all over the place but
+        // don't want the symbols to get exported.
+        if linkage != Linkage::Internal && self.tcx.is_compiler_builtins(LOCAL_CRATE) {
+            #[cfg(feature = "master")]
+            decl.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden));
+        } else {
+            #[cfg(feature = "master")]
+            decl.add_attribute(FnAttribute::Visibility(base::visibility_to_gcc(visibility)));
+        }
+
+        // TODO(antoyo): call set_link_section() to allow initializing argc/argv.
+        // TODO(antoyo): set unique comdat.
+        // TODO(antoyo): use inline attribute from there in linkage.set() above.
+
+        self.functions.borrow_mut().insert(symbol_name.to_string(), decl);
+        self.function_instances.borrow_mut().insert(instance, decl);
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/type_.rs b/compiler/rustc_codegen_gcc/src/type_.rs
new file mode 100644
index 00000000000..4e0a250b550
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/type_.rs
@@ -0,0 +1,386 @@
+#[cfg(feature = "master")]
+use std::convert::TryInto;
+
+#[cfg(feature = "master")]
+use gccjit::CType;
+use gccjit::{RValue, Struct, Type};
+use rustc_abi::{AddressSpace, Align, Integer, Size};
+use rustc_codegen_ssa::common::TypeKind;
+use rustc_codegen_ssa::traits::{
+    BaseTypeCodegenMethods, DerivedTypeCodegenMethods, TypeMembershipCodegenMethods,
+};
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::{bug, ty};
+
+use crate::common::TypeReflection;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+    pub fn type_ix(&self, num_bits: u64) -> Type<'gcc> {
+        // gcc only supports 1, 2, 4 or 8-byte integers.
+        // FIXME(antoyo): this is misleading to use the next power of two as rustc_codegen_ssa
+        // sometimes use 96-bit numbers and the following code will give an integer of a different
+        // size.
+        let bytes = (num_bits / 8).next_power_of_two() as i32;
+        match bytes {
+            1 => self.i8_type,
+            2 => self.i16_type,
+            4 => self.i32_type,
+            8 => self.i64_type,
+            16 => self.i128_type,
+            _ => panic!("unexpected num_bits: {}", num_bits),
+        }
+    }
+
+    pub fn type_void(&self) -> Type<'gcc> {
+        self.context.new_type::<()>()
+    }
+
+    pub fn type_size_t(&self) -> Type<'gcc> {
+        self.context.new_type::<usize>()
+    }
+
+    pub fn type_u8(&self) -> Type<'gcc> {
+        self.u8_type
+    }
+
+    pub fn type_u16(&self) -> Type<'gcc> {
+        self.u16_type
+    }
+
+    pub fn type_u32(&self) -> Type<'gcc> {
+        self.u32_type
+    }
+
+    pub fn type_u64(&self) -> Type<'gcc> {
+        self.u64_type
+    }
+
+    pub fn type_u128(&self) -> Type<'gcc> {
+        self.u128_type
+    }
+
+    pub fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> {
+        ty.make_pointer()
+    }
+
+    pub fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> {
+        // TODO(antoyo): use address_space, perhaps with TYPE_ADDR_SPACE?
+        ty.make_pointer()
+    }
+
+    pub fn type_i8p(&self) -> Type<'gcc> {
+        self.type_ptr_to(self.type_i8())
+    }
+
+    pub fn type_i8p_ext(&self, address_space: AddressSpace) -> Type<'gcc> {
+        self.type_ptr_to_ext(self.type_i8(), address_space)
+    }
+
+    pub fn type_pointee_for_align(&self, align: Align) -> Type<'gcc> {
+        // FIXME(eddyb) We could find a better approximation if ity.align < align.
+        let ity = Integer::approximate_align(self, align);
+        self.type_from_integer(ity)
+    }
+
+    pub fn type_vector(&self, ty: Type<'gcc>, len: u64) -> Type<'gcc> {
+        self.context.new_vector_type(ty, len)
+    }
+
+    pub fn type_float_from_ty(&self, t: ty::FloatTy) -> Type<'gcc> {
+        match t {
+            ty::FloatTy::F16 => self.type_f16(),
+            ty::FloatTy::F32 => self.type_f32(),
+            ty::FloatTy::F64 => self.type_f64(),
+            ty::FloatTy::F128 => self.type_f128(),
+        }
+    }
+
+    pub fn type_i1(&self) -> Type<'gcc> {
+        self.bool_type
+    }
+
+    pub fn type_struct(&self, fields: &[Type<'gcc>], packed: bool) -> Type<'gcc> {
+        let types = fields.to_vec();
+        if let Some(typ) = self.struct_types.borrow().get(fields) {
+            return *typ;
+        }
+        let fields: Vec<_> = fields
+            .iter()
+            .enumerate()
+            .map(|(index, field)| {
+                self.context.new_field(None, *field, format!("field{}_TODO", index))
+            })
+            .collect();
+        let typ = self.context.new_struct_type(None, "struct", &fields).as_type();
+        if packed {
+            #[cfg(feature = "master")]
+            typ.set_packed();
+        }
+        self.struct_types.borrow_mut().insert(types, typ);
+        typ
+    }
+}
+
+impl<'gcc, 'tcx> BaseTypeCodegenMethods for CodegenCx<'gcc, 'tcx> {
+    fn type_i8(&self) -> Type<'gcc> {
+        self.i8_type
+    }
+
+    fn type_i16(&self) -> Type<'gcc> {
+        self.i16_type
+    }
+
+    fn type_i32(&self) -> Type<'gcc> {
+        self.i32_type
+    }
+
+    fn type_i64(&self) -> Type<'gcc> {
+        self.i64_type
+    }
+
+    fn type_i128(&self) -> Type<'gcc> {
+        self.i128_type
+    }
+
+    fn type_isize(&self) -> Type<'gcc> {
+        self.isize_type
+    }
+
+    fn type_f16(&self) -> Type<'gcc> {
+        #[cfg(feature = "master")]
+        if self.supports_f16_type {
+            return self.context.new_c_type(CType::Float16);
+        }
+        bug!("unsupported float width 16")
+    }
+
+    fn type_f32(&self) -> Type<'gcc> {
+        #[cfg(feature = "master")]
+        if self.supports_f32_type {
+            return self.context.new_c_type(CType::Float32);
+        }
+        self.float_type
+    }
+
+    fn type_f64(&self) -> Type<'gcc> {
+        #[cfg(feature = "master")]
+        if self.supports_f64_type {
+            return self.context.new_c_type(CType::Float64);
+        }
+        self.double_type
+    }
+
+    fn type_f128(&self) -> Type<'gcc> {
+        #[cfg(feature = "master")]
+        if self.supports_f128_type {
+            return self.context.new_c_type(CType::Float128);
+        }
+        bug!("unsupported float width 128")
+    }
+
+    fn type_func(&self, params: &[Type<'gcc>], return_type: Type<'gcc>) -> Type<'gcc> {
+        self.context.new_function_pointer_type(None, return_type, params, false)
+    }
+
+    #[cfg(feature = "master")]
+    fn type_kind(&self, typ: Type<'gcc>) -> TypeKind {
+        if self.is_int_type_or_bool(typ) {
+            TypeKind::Integer
+        } else if typ.get_pointee().is_some() {
+            TypeKind::Pointer
+        } else if typ.is_vector() {
+            TypeKind::Vector
+        } else if typ.dyncast_array().is_some() {
+            TypeKind::Array
+        } else if typ.is_struct().is_some() {
+            TypeKind::Struct
+        } else if typ.dyncast_function_ptr_type().is_some() {
+            TypeKind::Function
+        } else if typ.is_compatible_with(self.float_type) {
+            TypeKind::Float
+        } else if typ.is_compatible_with(self.double_type) {
+            TypeKind::Double
+        } else if typ.is_floating_point() {
+            match typ.get_size() {
+                2 => TypeKind::Half,
+                4 => TypeKind::Float,
+                8 => TypeKind::Double,
+                16 => TypeKind::FP128,
+                size => unreachable!("Floating-point type of size {}", size),
+            }
+        } else if typ == self.type_void() {
+            TypeKind::Void
+        } else {
+            // TODO(antoyo): support other types.
+            unimplemented!();
+        }
+    }
+
+    #[cfg(not(feature = "master"))]
+    fn type_kind(&self, typ: Type<'gcc>) -> TypeKind {
+        if self.is_int_type_or_bool(typ) {
+            TypeKind::Integer
+        } else if typ.is_compatible_with(self.float_type) {
+            TypeKind::Float
+        } else if typ.is_compatible_with(self.double_type) {
+            TypeKind::Double
+        } else if typ.is_vector() {
+            TypeKind::Vector
+        } else if typ.get_pointee().is_some() {
+            TypeKind::Pointer
+        } else if typ.dyncast_array().is_some() {
+            TypeKind::Array
+        } else if typ.is_struct().is_some() {
+            TypeKind::Struct
+        } else if typ.dyncast_function_ptr_type().is_some() {
+            TypeKind::Function
+        } else if typ == self.type_void() {
+            TypeKind::Void
+        } else {
+            // TODO(antoyo): support other types.
+            unimplemented!();
+        }
+    }
+
+    fn type_ptr(&self) -> Type<'gcc> {
+        self.type_ptr_to(self.type_void())
+    }
+
+    fn type_ptr_ext(&self, address_space: AddressSpace) -> Type<'gcc> {
+        self.type_ptr_to_ext(self.type_void(), address_space)
+    }
+
+    fn element_type(&self, ty: Type<'gcc>) -> Type<'gcc> {
+        if let Some(typ) = ty.dyncast_array() {
+            typ
+        } else if let Some(vector_type) = ty.dyncast_vector() {
+            vector_type.get_element_type()
+        } else if let Some(typ) = ty.get_pointee() {
+            typ
+        } else {
+            unreachable!()
+        }
+    }
+
+    fn vector_length(&self, _ty: Type<'gcc>) -> usize {
+        unimplemented!();
+    }
+
+    #[cfg(feature = "master")]
+    fn float_width(&self, typ: Type<'gcc>) -> usize {
+        if typ.is_floating_point() {
+            (typ.get_size() * u8::BITS).try_into().unwrap()
+        } else {
+            panic!("Cannot get width of float type {:?}", typ);
+        }
+    }
+
+    #[cfg(not(feature = "master"))]
+    fn float_width(&self, typ: Type<'gcc>) -> usize {
+        let f32 = self.context.new_type::<f32>();
+        let f64 = self.context.new_type::<f64>();
+        if typ.is_compatible_with(f32) {
+            32
+        } else if typ.is_compatible_with(f64) {
+            64
+        } else {
+            panic!("Cannot get width of float type {:?}", typ);
+        }
+        // TODO(antoyo): support other sizes.
+    }
+
+    fn int_width(&self, typ: Type<'gcc>) -> u64 {
+        self.gcc_int_width(typ)
+    }
+
+    fn val_ty(&self, value: RValue<'gcc>) -> Type<'gcc> {
+        value.get_type()
+    }
+
+    #[cfg_attr(feature = "master", allow(unused_mut))]
+    fn type_array(&self, ty: Type<'gcc>, mut len: u64) -> Type<'gcc> {
+        #[cfg(not(feature = "master"))]
+        if let Some(struct_type) = ty.is_struct() {
+            if struct_type.get_field_count() == 0 {
+                // NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a
+                // size of usize::MAX in test_binary_search, we workaround this by setting the size to
+                // zero for ZSTs.
+                len = 0;
+            }
+        }
+
+        self.context.new_array_type(None, ty, len)
+    }
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+    pub fn type_padding_filler(&self, size: Size, align: Align) -> Type<'gcc> {
+        let unit = Integer::approximate_align(self, align);
+        let size = size.bytes();
+        let unit_size = unit.size().bytes();
+        assert_eq!(size % unit_size, 0);
+        self.type_array(self.type_from_integer(unit), size / unit_size)
+    }
+
+    pub fn set_struct_body(&self, typ: Struct<'gcc>, fields: &[Type<'gcc>], packed: bool) {
+        let fields: Vec<_> = fields
+            .iter()
+            .enumerate()
+            .map(|(index, field)| self.context.new_field(None, *field, format!("field_{}", index)))
+            .collect();
+        typ.set_fields(None, &fields);
+        if packed {
+            #[cfg(feature = "master")]
+            typ.as_type().set_packed();
+        }
+    }
+
+    pub fn type_named_struct(&self, name: &str) -> Struct<'gcc> {
+        self.context.new_opaque_struct_type(None, name)
+    }
+}
+
+pub fn struct_fields<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    layout: TyAndLayout<'tcx>,
+) -> (Vec<Type<'gcc>>, bool) {
+    let field_count = layout.fields.count();
+
+    let mut packed = false;
+    let mut offset = Size::ZERO;
+    let mut prev_effective_align = layout.align.abi;
+    let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
+    for i in layout.fields.index_by_increasing_offset() {
+        let target_offset = layout.fields.offset(i);
+        let field = layout.field(cx, i);
+        let effective_field_align =
+            layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
+        packed |= effective_field_align < field.align.abi;
+
+        assert!(target_offset >= offset);
+        let padding = target_offset - offset;
+        let padding_align = prev_effective_align.min(effective_field_align);
+        assert_eq!(offset.align_to(padding_align) + padding, target_offset);
+        result.push(cx.type_padding_filler(padding, padding_align));
+
+        result.push(field.gcc_type(cx));
+        offset = target_offset + field.size;
+        prev_effective_align = effective_field_align;
+    }
+    if layout.is_sized() && field_count > 0 {
+        if offset > layout.size {
+            bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
+        }
+        let padding = layout.size - offset;
+        let padding_align = prev_effective_align;
+        assert_eq!(offset.align_to(padding_align) + padding, layout.size);
+        result.push(cx.type_padding_filler(padding, padding_align));
+        assert_eq!(result.len(), 1 + field_count * 2);
+    }
+
+    (result, packed)
+}
+
+impl<'gcc, 'tcx> TypeMembershipCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> {}
diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs
new file mode 100644
index 00000000000..ae98b3d0b56
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/type_of.rs
@@ -0,0 +1,381 @@
+use std::fmt::Write;
+
+use gccjit::{Struct, Type};
+use rustc_abi as abi;
+use rustc_abi::Primitive::*;
+use rustc_abi::{
+    BackendRepr, FieldsShape, Integer, PointeeInfo, Reg, Size, TyAbiInterface, Variants,
+};
+use rustc_codegen_ssa::traits::{
+    BaseTypeCodegenMethods, DerivedTypeCodegenMethods, LayoutTypeCodegenMethods,
+};
+use rustc_middle::bug;
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::{self, CoroutineArgsExt, Ty, TypeVisitableExt};
+use rustc_target::callconv::{CastTarget, FnAbi};
+
+use crate::abi::{FnAbiGcc, FnAbiGccExt, GccType};
+use crate::context::CodegenCx;
+use crate::type_::struct_fields;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+    fn type_from_unsigned_integer(&self, i: Integer) -> Type<'gcc> {
+        use Integer::*;
+        match i {
+            I8 => self.type_u8(),
+            I16 => self.type_u16(),
+            I32 => self.type_u32(),
+            I64 => self.type_u64(),
+            I128 => self.type_u128(),
+        }
+    }
+
+    #[cfg(feature = "master")]
+    pub fn type_int_from_ty(&self, t: ty::IntTy) -> Type<'gcc> {
+        match t {
+            ty::IntTy::Isize => self.type_isize(),
+            ty::IntTy::I8 => self.type_i8(),
+            ty::IntTy::I16 => self.type_i16(),
+            ty::IntTy::I32 => self.type_i32(),
+            ty::IntTy::I64 => self.type_i64(),
+            ty::IntTy::I128 => self.type_i128(),
+        }
+    }
+
+    #[cfg(feature = "master")]
+    pub fn type_uint_from_ty(&self, t: ty::UintTy) -> Type<'gcc> {
+        match t {
+            ty::UintTy::Usize => self.type_isize(),
+            ty::UintTy::U8 => self.type_i8(),
+            ty::UintTy::U16 => self.type_i16(),
+            ty::UintTy::U32 => self.type_i32(),
+            ty::UintTy::U64 => self.type_i64(),
+            ty::UintTy::U128 => self.type_i128(),
+        }
+    }
+}
+
+fn uncached_gcc_type<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    layout: TyAndLayout<'tcx>,
+    defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>,
+) -> Type<'gcc> {
+    match layout.backend_repr {
+        BackendRepr::Scalar(_) => bug!("handled elsewhere"),
+        BackendRepr::SimdVector { ref element, count } => {
+            let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO);
+            let element =
+                // NOTE: gcc doesn't allow pointer types in vectors.
+                if element.get_pointee().is_some() {
+                    cx.usize_type
+                }
+                else {
+                    element
+                };
+            return cx.context.new_vector_type(element, count);
+        }
+        BackendRepr::ScalarPair(..) => {
+            return cx.type_struct(
+                &[
+                    layout.scalar_pair_element_gcc_type(cx, 0),
+                    layout.scalar_pair_element_gcc_type(cx, 1),
+                ],
+                false,
+            );
+        }
+        BackendRepr::Memory { .. } => {}
+    }
+
+    let name = match *layout.ty.kind() {
+        // FIXME(eddyb) producing readable type names for trait objects can result
+        // in problematically distinct types due to HRTB and subtyping (see #47638).
+        // ty::Dynamic(..) |
+        ty::Adt(..)
+        | ty::Closure(..)
+        | ty::CoroutineClosure(..)
+        | ty::Foreign(..)
+        | ty::Coroutine(..)
+        | ty::Str
+            if !cx.sess().fewer_names() =>
+        {
+            let mut name = with_no_trimmed_paths!(layout.ty.to_string());
+            if let (&ty::Adt(def, _), &Variants::Single { index }) =
+                (layout.ty.kind(), &layout.variants)
+            {
+                if def.is_enum() && !def.variants().is_empty() {
+                    write!(&mut name, "::{}", def.variant(index).name).unwrap();
+                }
+            }
+            if let (&ty::Coroutine(_, _), &Variants::Single { index }) =
+                (layout.ty.kind(), &layout.variants)
+            {
+                write!(&mut name, "::{}", ty::CoroutineArgs::variant_name(index)).unwrap();
+            }
+            Some(name)
+        }
+        ty::Adt(..) => {
+            // If `Some` is returned then a named struct is created in LLVM. Name collisions are
+            // avoided by LLVM (with increasing suffixes). If rustc doesn't generate names then that
+            // can improve perf.
+            // FIXME(antoyo): I don't think that's true for libgccjit.
+            Some(String::new())
+        }
+        _ => None,
+    };
+
+    match layout.fields {
+        FieldsShape::Primitive | FieldsShape::Union(_) => {
+            let fill = cx.type_padding_filler(layout.size, layout.align.abi);
+            let packed = false;
+            match name {
+                None => cx.type_struct(&[fill], packed),
+                Some(ref name) => {
+                    let gcc_type = cx.type_named_struct(name);
+                    cx.set_struct_body(gcc_type, &[fill], packed);
+                    gcc_type.as_type()
+                }
+            }
+        }
+        FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).gcc_type(cx), count),
+        FieldsShape::Arbitrary { .. } => match name {
+            None => {
+                let (gcc_fields, packed) = struct_fields(cx, layout);
+                cx.type_struct(&gcc_fields, packed)
+            }
+            Some(ref name) => {
+                let gcc_type = cx.type_named_struct(name);
+                *defer = Some((gcc_type, layout));
+                gcc_type.as_type()
+            }
+        },
+    }
+}
+
+pub trait LayoutGccExt<'tcx> {
+    fn is_gcc_immediate(&self) -> bool;
+    fn is_gcc_scalar_pair(&self) -> bool;
+    fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+    fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+    fn scalar_gcc_type_at<'gcc>(
+        &self,
+        cx: &CodegenCx<'gcc, 'tcx>,
+        scalar: &abi::Scalar,
+        offset: Size,
+    ) -> Type<'gcc>;
+    fn scalar_pair_element_gcc_type<'gcc>(
+        &self,
+        cx: &CodegenCx<'gcc, 'tcx>,
+        index: usize,
+    ) -> Type<'gcc>;
+    fn pointee_info_at<'gcc>(
+        &self,
+        cx: &CodegenCx<'gcc, 'tcx>,
+        offset: Size,
+    ) -> Option<PointeeInfo>;
+}
+
+impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
+    fn is_gcc_immediate(&self) -> bool {
+        match self.backend_repr {
+            BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => true,
+            BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => false,
+        }
+    }
+
+    fn is_gcc_scalar_pair(&self) -> bool {
+        match self.backend_repr {
+            BackendRepr::ScalarPair(..) => true,
+            BackendRepr::Scalar(_)
+            | BackendRepr::SimdVector { .. }
+            | BackendRepr::Memory { .. } => false,
+        }
+    }
+
+    /// Gets the GCC type corresponding to a Rust type, i.e., `rustc_middle::ty::Ty`.
+    /// The pointee type of the pointer in `PlaceRef` is always this type.
+    /// For sized types, it is also the right LLVM type for an `alloca`
+    /// containing a value of that type, and most immediates (except `bool`).
+    /// Unsized types, however, are represented by a "minimal unit", e.g.
+    /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
+    /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
+    /// If the type is an unsized struct, the regular layout is generated,
+    /// with the innermost trailing unsized field using the "minimal unit"
+    /// of that field's type - this is useful for taking the address of
+    /// that field and ensuring the struct has the right alignment.
+    fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+        use rustc_middle::ty::layout::FnAbiOf;
+        // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
+        // In other words, this should generally not look at the type at all, but only at the
+        // layout.
+        if let BackendRepr::Scalar(ref scalar) = self.backend_repr {
+            // Use a different cache for scalars because pointers to DSTs
+            // can be either wide or thin (data pointers of wide pointers).
+            if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) {
+                return ty;
+            }
+            let ty = match *self.ty.kind() {
+                // NOTE: we cannot remove this match like in the LLVM codegen because the call
+                // to fn_ptr_backend_type handle the on-stack attribute.
+                // TODO(antoyo): find a less hackish way to hande the on-stack attribute.
+                ty::FnPtr(sig_tys, hdr) => cx
+                    .fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig_tys.with(hdr), ty::List::empty())),
+                _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO),
+            };
+            cx.scalar_types.borrow_mut().insert(self.ty, ty);
+            return ty;
+        }
+
+        // Check the cache.
+        let variant_index = match self.variants {
+            Variants::Single { index } => Some(index),
+            _ => None,
+        };
+        let cached_type = cx.types.borrow().get(&(self.ty, variant_index)).cloned();
+        if let Some(ty) = cached_type {
+            return ty;
+        }
+
+        assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
+
+        // Make sure lifetimes are erased, to avoid generating distinct LLVM
+        // types for Rust types that only differ in the choice of lifetimes.
+        let normal_ty = cx.tcx.erase_regions(self.ty);
+
+        let mut defer = None;
+        let ty = if self.ty != normal_ty {
+            let mut layout = cx.layout_of(normal_ty);
+            if let Some(v) = variant_index {
+                layout = layout.for_variant(cx, v);
+            }
+            layout.gcc_type(cx)
+        } else {
+            uncached_gcc_type(cx, *self, &mut defer)
+        };
+
+        cx.types.borrow_mut().insert((self.ty, variant_index), ty);
+
+        if let Some((deferred_ty, layout)) = defer {
+            let (fields, packed) = struct_fields(cx, layout);
+            cx.set_struct_body(deferred_ty, &fields, packed);
+        }
+
+        ty
+    }
+
+    fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+        if let BackendRepr::Scalar(ref scalar) = self.backend_repr {
+            if scalar.is_bool() {
+                return cx.type_i1();
+            }
+        }
+        self.gcc_type(cx)
+    }
+
+    fn scalar_gcc_type_at<'gcc>(
+        &self,
+        cx: &CodegenCx<'gcc, 'tcx>,
+        scalar: &abi::Scalar,
+        offset: Size,
+    ) -> Type<'gcc> {
+        match scalar.primitive() {
+            Int(i, true) => cx.type_from_integer(i),
+            Int(i, false) => cx.type_from_unsigned_integer(i),
+            Float(f) => cx.type_from_float(f),
+            Pointer(address_space) => {
+                // If we know the alignment, pick something better than i8.
+                let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
+                    cx.type_pointee_for_align(pointee.align)
+                } else {
+                    cx.type_i8()
+                };
+                cx.type_ptr_to_ext(pointee, address_space)
+            }
+        }
+    }
+
+    fn scalar_pair_element_gcc_type<'gcc>(
+        &self,
+        cx: &CodegenCx<'gcc, 'tcx>,
+        index: usize,
+    ) -> Type<'gcc> {
+        // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
+        // In other words, this should generally not look at the type at all, but only at the
+        // layout.
+        let (a, b) = match self.backend_repr {
+            BackendRepr::ScalarPair(ref a, ref b) => (a, b),
+            _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self),
+        };
+        let scalar = [a, b][index];
+
+        // Make sure to return the same type `immediate_gcc_type` would when
+        // dealing with an immediate pair.  This means that `(bool, bool)` is
+        // effectively represented as `{i8, i8}` in memory and two `i1`s as an
+        // immediate, just like `bool` is typically `i8` in memory and only `i1`
+        // when immediate.  We need to load/store `bool` as `i8` to avoid
+        // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
+        // TODO(antoyo): this bugs certainly don't happen in this case since the bool type is used instead of i1.
+        if scalar.is_bool() {
+            return cx.type_i1();
+        }
+
+        let offset = if index == 0 { Size::ZERO } else { a.size(cx).align_to(b.align(cx).abi) };
+        self.scalar_gcc_type_at(cx, scalar, offset)
+    }
+
+    fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo> {
+        if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
+            return pointee;
+        }
+
+        let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset);
+
+        cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
+        result
+    }
+}
+
+impl<'gcc, 'tcx> LayoutTypeCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+    fn backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> {
+        layout.gcc_type(self)
+    }
+
+    fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> {
+        layout.immediate_gcc_type(self)
+    }
+
+    fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool {
+        layout.is_gcc_immediate()
+    }
+
+    fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool {
+        layout.is_gcc_scalar_pair()
+    }
+
+    fn scalar_pair_element_backend_type(
+        &self,
+        layout: TyAndLayout<'tcx>,
+        index: usize,
+        _immediate: bool,
+    ) -> Type<'gcc> {
+        layout.scalar_pair_element_gcc_type(self, index)
+    }
+
+    fn cast_backend_type(&self, ty: &CastTarget) -> Type<'gcc> {
+        ty.gcc_type(self)
+    }
+
+    fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
+        fn_abi.ptr_to_gcc_type(self)
+    }
+
+    fn reg_backend_type(&self, _ty: &Reg) -> Type<'gcc> {
+        unimplemented!();
+    }
+
+    fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
+        // FIXME(antoyo): Should we do something with `FnAbiGcc::fn_attributes`?
+        let FnAbiGcc { return_type, arguments_type, is_c_variadic, .. } = fn_abi.gcc_type(self);
+        self.context.new_function_pointer_type(None, return_type, &arguments_type, is_c_variadic)
+    }
+}