about summary refs log tree commit diff
diff options
context:
space:
mode:
authorWANG Rui <wangrui@loongson.cn>2025-07-19 10:36:45 +0800
committerWANG Rui <wangrui@loongson.cn>2025-07-25 09:31:58 +0800
commit49ea48d952db3e9825508699e81d3d8af837b09e (patch)
tree0ff15b0908540eed4d71ca4c08166bd0ffbe4a4d
parent9b7d31c851cabc2e6e541d3cf146787d597a9166 (diff)
downloadrust-49ea48d952db3e9825508699e81d3d8af837b09e.tar.gz
rust-49ea48d952db3e9825508699e81d3d8af837b09e.zip
loongarch: Use unified data types for SIMD intrinsics
-rw-r--r--library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs4441
-rw-r--r--library/stdarch/crates/core_arch/src/loongarch64/lasx/types.rs157
-rw-r--r--library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs4321
-rw-r--r--library/stdarch/crates/core_arch/src/loongarch64/lsx/types.rs159
-rw-r--r--library/stdarch/crates/stdarch-gen-loongarch/src/main.rs224
5 files changed, 4780 insertions, 4522 deletions
diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs b/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs
index 4361acdc1fc..cda0ebec677 100644
--- a/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs
+++ b/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs
@@ -6,7058 +6,7059 @@
 // OUT_DIR=`pwd`/crates/core_arch cargo run -p stdarch-gen-loongarch -- crates/stdarch-gen-loongarch/lasx.spec
 // ```
 
+use crate::mem::transmute;
 use super::types::*;
 
 #[allow(improper_ctypes)]
 unsafe extern "unadjusted" {
     #[link_name = "llvm.loongarch.lasx.xvsll.b"]
-    fn __lasx_xvsll_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvsll_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsll.h"]
-    fn __lasx_xvsll_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvsll_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsll.w"]
-    fn __lasx_xvsll_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvsll_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsll.d"]
-    fn __lasx_xvsll_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsll_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvslli.b"]
-    fn __lasx_xvslli_b(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvslli_b(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvslli.h"]
-    fn __lasx_xvslli_h(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvslli_h(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvslli.w"]
-    fn __lasx_xvslli_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvslli_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvslli.d"]
-    fn __lasx_xvslli_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvslli_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsra.b"]
-    fn __lasx_xvsra_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvsra_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsra.h"]
-    fn __lasx_xvsra_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvsra_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsra.w"]
-    fn __lasx_xvsra_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvsra_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsra.d"]
-    fn __lasx_xvsra_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsra_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrai.b"]
-    fn __lasx_xvsrai_b(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvsrai_b(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrai.h"]
-    fn __lasx_xvsrai_h(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvsrai_h(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrai.w"]
-    fn __lasx_xvsrai_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvsrai_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrai.d"]
-    fn __lasx_xvsrai_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvsrai_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrar.b"]
-    fn __lasx_xvsrar_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvsrar_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrar.h"]
-    fn __lasx_xvsrar_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvsrar_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrar.w"]
-    fn __lasx_xvsrar_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvsrar_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrar.d"]
-    fn __lasx_xvsrar_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsrar_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrari.b"]
-    fn __lasx_xvsrari_b(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvsrari_b(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrari.h"]
-    fn __lasx_xvsrari_h(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvsrari_h(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrari.w"]
-    fn __lasx_xvsrari_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvsrari_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrari.d"]
-    fn __lasx_xvsrari_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvsrari_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrl.b"]
-    fn __lasx_xvsrl_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvsrl_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrl.h"]
-    fn __lasx_xvsrl_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvsrl_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrl.w"]
-    fn __lasx_xvsrl_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvsrl_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrl.d"]
-    fn __lasx_xvsrl_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsrl_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrli.b"]
-    fn __lasx_xvsrli_b(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvsrli_b(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrli.h"]
-    fn __lasx_xvsrli_h(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvsrli_h(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrli.w"]
-    fn __lasx_xvsrli_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvsrli_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrli.d"]
-    fn __lasx_xvsrli_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvsrli_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrlr.b"]
-    fn __lasx_xvsrlr_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvsrlr_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrlr.h"]
-    fn __lasx_xvsrlr_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvsrlr_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrlr.w"]
-    fn __lasx_xvsrlr_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvsrlr_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrlr.d"]
-    fn __lasx_xvsrlr_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsrlr_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrlri.b"]
-    fn __lasx_xvsrlri_b(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvsrlri_b(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrlri.h"]
-    fn __lasx_xvsrlri_h(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvsrlri_h(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrlri.w"]
-    fn __lasx_xvsrlri_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvsrlri_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrlri.d"]
-    fn __lasx_xvsrlri_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvsrlri_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvbitclr.b"]
-    fn __lasx_xvbitclr_b(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvbitclr_b(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvbitclr.h"]
-    fn __lasx_xvbitclr_h(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvbitclr_h(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvbitclr.w"]
-    fn __lasx_xvbitclr_w(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvbitclr_w(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvbitclr.d"]
-    fn __lasx_xvbitclr_d(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvbitclr_d(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvbitclri.b"]
-    fn __lasx_xvbitclri_b(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvbitclri_b(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvbitclri.h"]
-    fn __lasx_xvbitclri_h(a: v16u16, b: u32) -> v16u16;
+    fn __lasx_xvbitclri_h(a: __v16u16, b: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvbitclri.w"]
-    fn __lasx_xvbitclri_w(a: v8u32, b: u32) -> v8u32;
+    fn __lasx_xvbitclri_w(a: __v8u32, b: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvbitclri.d"]
-    fn __lasx_xvbitclri_d(a: v4u64, b: u32) -> v4u64;
+    fn __lasx_xvbitclri_d(a: __v4u64, b: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvbitset.b"]
-    fn __lasx_xvbitset_b(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvbitset_b(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvbitset.h"]
-    fn __lasx_xvbitset_h(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvbitset_h(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvbitset.w"]
-    fn __lasx_xvbitset_w(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvbitset_w(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvbitset.d"]
-    fn __lasx_xvbitset_d(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvbitset_d(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvbitseti.b"]
-    fn __lasx_xvbitseti_b(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvbitseti_b(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvbitseti.h"]
-    fn __lasx_xvbitseti_h(a: v16u16, b: u32) -> v16u16;
+    fn __lasx_xvbitseti_h(a: __v16u16, b: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvbitseti.w"]
-    fn __lasx_xvbitseti_w(a: v8u32, b: u32) -> v8u32;
+    fn __lasx_xvbitseti_w(a: __v8u32, b: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvbitseti.d"]
-    fn __lasx_xvbitseti_d(a: v4u64, b: u32) -> v4u64;
+    fn __lasx_xvbitseti_d(a: __v4u64, b: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvbitrev.b"]
-    fn __lasx_xvbitrev_b(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvbitrev_b(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvbitrev.h"]
-    fn __lasx_xvbitrev_h(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvbitrev_h(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvbitrev.w"]
-    fn __lasx_xvbitrev_w(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvbitrev_w(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvbitrev.d"]
-    fn __lasx_xvbitrev_d(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvbitrev_d(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvbitrevi.b"]
-    fn __lasx_xvbitrevi_b(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvbitrevi_b(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvbitrevi.h"]
-    fn __lasx_xvbitrevi_h(a: v16u16, b: u32) -> v16u16;
+    fn __lasx_xvbitrevi_h(a: __v16u16, b: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvbitrevi.w"]
-    fn __lasx_xvbitrevi_w(a: v8u32, b: u32) -> v8u32;
+    fn __lasx_xvbitrevi_w(a: __v8u32, b: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvbitrevi.d"]
-    fn __lasx_xvbitrevi_d(a: v4u64, b: u32) -> v4u64;
+    fn __lasx_xvbitrevi_d(a: __v4u64, b: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvadd.b"]
-    fn __lasx_xvadd_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvadd_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvadd.h"]
-    fn __lasx_xvadd_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvadd_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvadd.w"]
-    fn __lasx_xvadd_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvadd_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvadd.d"]
-    fn __lasx_xvadd_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvadd_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddi.bu"]
-    fn __lasx_xvaddi_bu(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvaddi_bu(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvaddi.hu"]
-    fn __lasx_xvaddi_hu(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvaddi_hu(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvaddi.wu"]
-    fn __lasx_xvaddi_wu(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvaddi_wu(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvaddi.du"]
-    fn __lasx_xvaddi_du(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvaddi_du(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsub.b"]
-    fn __lasx_xvsub_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvsub_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsub.h"]
-    fn __lasx_xvsub_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvsub_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsub.w"]
-    fn __lasx_xvsub_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvsub_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsub.d"]
-    fn __lasx_xvsub_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsub_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsubi.bu"]
-    fn __lasx_xvsubi_bu(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvsubi_bu(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsubi.hu"]
-    fn __lasx_xvsubi_hu(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvsubi_hu(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsubi.wu"]
-    fn __lasx_xvsubi_wu(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvsubi_wu(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsubi.du"]
-    fn __lasx_xvsubi_du(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvsubi_du(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmax.b"]
-    fn __lasx_xvmax_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvmax_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmax.h"]
-    fn __lasx_xvmax_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvmax_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmax.w"]
-    fn __lasx_xvmax_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvmax_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmax.d"]
-    fn __lasx_xvmax_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvmax_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmaxi.b"]
-    fn __lasx_xvmaxi_b(a: v32i8, b: i32) -> v32i8;
+    fn __lasx_xvmaxi_b(a: __v32i8, b: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmaxi.h"]
-    fn __lasx_xvmaxi_h(a: v16i16, b: i32) -> v16i16;
+    fn __lasx_xvmaxi_h(a: __v16i16, b: i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmaxi.w"]
-    fn __lasx_xvmaxi_w(a: v8i32, b: i32) -> v8i32;
+    fn __lasx_xvmaxi_w(a: __v8i32, b: i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmaxi.d"]
-    fn __lasx_xvmaxi_d(a: v4i64, b: i32) -> v4i64;
+    fn __lasx_xvmaxi_d(a: __v4i64, b: i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmax.bu"]
-    fn __lasx_xvmax_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvmax_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvmax.hu"]
-    fn __lasx_xvmax_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvmax_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvmax.wu"]
-    fn __lasx_xvmax_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvmax_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvmax.du"]
-    fn __lasx_xvmax_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvmax_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvmaxi.bu"]
-    fn __lasx_xvmaxi_bu(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvmaxi_bu(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvmaxi.hu"]
-    fn __lasx_xvmaxi_hu(a: v16u16, b: u32) -> v16u16;
+    fn __lasx_xvmaxi_hu(a: __v16u16, b: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvmaxi.wu"]
-    fn __lasx_xvmaxi_wu(a: v8u32, b: u32) -> v8u32;
+    fn __lasx_xvmaxi_wu(a: __v8u32, b: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvmaxi.du"]
-    fn __lasx_xvmaxi_du(a: v4u64, b: u32) -> v4u64;
+    fn __lasx_xvmaxi_du(a: __v4u64, b: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvmin.b"]
-    fn __lasx_xvmin_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvmin_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmin.h"]
-    fn __lasx_xvmin_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvmin_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmin.w"]
-    fn __lasx_xvmin_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvmin_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmin.d"]
-    fn __lasx_xvmin_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvmin_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmini.b"]
-    fn __lasx_xvmini_b(a: v32i8, b: i32) -> v32i8;
+    fn __lasx_xvmini_b(a: __v32i8, b: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmini.h"]
-    fn __lasx_xvmini_h(a: v16i16, b: i32) -> v16i16;
+    fn __lasx_xvmini_h(a: __v16i16, b: i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmini.w"]
-    fn __lasx_xvmini_w(a: v8i32, b: i32) -> v8i32;
+    fn __lasx_xvmini_w(a: __v8i32, b: i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmini.d"]
-    fn __lasx_xvmini_d(a: v4i64, b: i32) -> v4i64;
+    fn __lasx_xvmini_d(a: __v4i64, b: i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmin.bu"]
-    fn __lasx_xvmin_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvmin_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvmin.hu"]
-    fn __lasx_xvmin_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvmin_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvmin.wu"]
-    fn __lasx_xvmin_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvmin_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvmin.du"]
-    fn __lasx_xvmin_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvmin_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvmini.bu"]
-    fn __lasx_xvmini_bu(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvmini_bu(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvmini.hu"]
-    fn __lasx_xvmini_hu(a: v16u16, b: u32) -> v16u16;
+    fn __lasx_xvmini_hu(a: __v16u16, b: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvmini.wu"]
-    fn __lasx_xvmini_wu(a: v8u32, b: u32) -> v8u32;
+    fn __lasx_xvmini_wu(a: __v8u32, b: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvmini.du"]
-    fn __lasx_xvmini_du(a: v4u64, b: u32) -> v4u64;
+    fn __lasx_xvmini_du(a: __v4u64, b: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvseq.b"]
-    fn __lasx_xvseq_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvseq_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvseq.h"]
-    fn __lasx_xvseq_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvseq_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvseq.w"]
-    fn __lasx_xvseq_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvseq_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvseq.d"]
-    fn __lasx_xvseq_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvseq_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvseqi.b"]
-    fn __lasx_xvseqi_b(a: v32i8, b: i32) -> v32i8;
+    fn __lasx_xvseqi_b(a: __v32i8, b: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvseqi.h"]
-    fn __lasx_xvseqi_h(a: v16i16, b: i32) -> v16i16;
+    fn __lasx_xvseqi_h(a: __v16i16, b: i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvseqi.w"]
-    fn __lasx_xvseqi_w(a: v8i32, b: i32) -> v8i32;
+    fn __lasx_xvseqi_w(a: __v8i32, b: i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvseqi.d"]
-    fn __lasx_xvseqi_d(a: v4i64, b: i32) -> v4i64;
+    fn __lasx_xvseqi_d(a: __v4i64, b: i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvslt.b"]
-    fn __lasx_xvslt_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvslt_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvslt.h"]
-    fn __lasx_xvslt_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvslt_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvslt.w"]
-    fn __lasx_xvslt_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvslt_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvslt.d"]
-    fn __lasx_xvslt_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvslt_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvslti.b"]
-    fn __lasx_xvslti_b(a: v32i8, b: i32) -> v32i8;
+    fn __lasx_xvslti_b(a: __v32i8, b: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvslti.h"]
-    fn __lasx_xvslti_h(a: v16i16, b: i32) -> v16i16;
+    fn __lasx_xvslti_h(a: __v16i16, b: i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvslti.w"]
-    fn __lasx_xvslti_w(a: v8i32, b: i32) -> v8i32;
+    fn __lasx_xvslti_w(a: __v8i32, b: i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvslti.d"]
-    fn __lasx_xvslti_d(a: v4i64, b: i32) -> v4i64;
+    fn __lasx_xvslti_d(a: __v4i64, b: i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvslt.bu"]
-    fn __lasx_xvslt_bu(a: v32u8, b: v32u8) -> v32i8;
+    fn __lasx_xvslt_bu(a: __v32u8, b: __v32u8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvslt.hu"]
-    fn __lasx_xvslt_hu(a: v16u16, b: v16u16) -> v16i16;
+    fn __lasx_xvslt_hu(a: __v16u16, b: __v16u16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvslt.wu"]
-    fn __lasx_xvslt_wu(a: v8u32, b: v8u32) -> v8i32;
+    fn __lasx_xvslt_wu(a: __v8u32, b: __v8u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvslt.du"]
-    fn __lasx_xvslt_du(a: v4u64, b: v4u64) -> v4i64;
+    fn __lasx_xvslt_du(a: __v4u64, b: __v4u64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvslti.bu"]
-    fn __lasx_xvslti_bu(a: v32u8, b: u32) -> v32i8;
+    fn __lasx_xvslti_bu(a: __v32u8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvslti.hu"]
-    fn __lasx_xvslti_hu(a: v16u16, b: u32) -> v16i16;
+    fn __lasx_xvslti_hu(a: __v16u16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvslti.wu"]
-    fn __lasx_xvslti_wu(a: v8u32, b: u32) -> v8i32;
+    fn __lasx_xvslti_wu(a: __v8u32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvslti.du"]
-    fn __lasx_xvslti_du(a: v4u64, b: u32) -> v4i64;
+    fn __lasx_xvslti_du(a: __v4u64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsle.b"]
-    fn __lasx_xvsle_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvsle_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsle.h"]
-    fn __lasx_xvsle_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvsle_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsle.w"]
-    fn __lasx_xvsle_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvsle_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsle.d"]
-    fn __lasx_xvsle_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsle_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvslei.b"]
-    fn __lasx_xvslei_b(a: v32i8, b: i32) -> v32i8;
+    fn __lasx_xvslei_b(a: __v32i8, b: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvslei.h"]
-    fn __lasx_xvslei_h(a: v16i16, b: i32) -> v16i16;
+    fn __lasx_xvslei_h(a: __v16i16, b: i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvslei.w"]
-    fn __lasx_xvslei_w(a: v8i32, b: i32) -> v8i32;
+    fn __lasx_xvslei_w(a: __v8i32, b: i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvslei.d"]
-    fn __lasx_xvslei_d(a: v4i64, b: i32) -> v4i64;
+    fn __lasx_xvslei_d(a: __v4i64, b: i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsle.bu"]
-    fn __lasx_xvsle_bu(a: v32u8, b: v32u8) -> v32i8;
+    fn __lasx_xvsle_bu(a: __v32u8, b: __v32u8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsle.hu"]
-    fn __lasx_xvsle_hu(a: v16u16, b: v16u16) -> v16i16;
+    fn __lasx_xvsle_hu(a: __v16u16, b: __v16u16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsle.wu"]
-    fn __lasx_xvsle_wu(a: v8u32, b: v8u32) -> v8i32;
+    fn __lasx_xvsle_wu(a: __v8u32, b: __v8u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsle.du"]
-    fn __lasx_xvsle_du(a: v4u64, b: v4u64) -> v4i64;
+    fn __lasx_xvsle_du(a: __v4u64, b: __v4u64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvslei.bu"]
-    fn __lasx_xvslei_bu(a: v32u8, b: u32) -> v32i8;
+    fn __lasx_xvslei_bu(a: __v32u8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvslei.hu"]
-    fn __lasx_xvslei_hu(a: v16u16, b: u32) -> v16i16;
+    fn __lasx_xvslei_hu(a: __v16u16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvslei.wu"]
-    fn __lasx_xvslei_wu(a: v8u32, b: u32) -> v8i32;
+    fn __lasx_xvslei_wu(a: __v8u32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvslei.du"]
-    fn __lasx_xvslei_du(a: v4u64, b: u32) -> v4i64;
+    fn __lasx_xvslei_du(a: __v4u64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsat.b"]
-    fn __lasx_xvsat_b(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvsat_b(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsat.h"]
-    fn __lasx_xvsat_h(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvsat_h(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsat.w"]
-    fn __lasx_xvsat_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvsat_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsat.d"]
-    fn __lasx_xvsat_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvsat_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsat.bu"]
-    fn __lasx_xvsat_bu(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvsat_bu(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvsat.hu"]
-    fn __lasx_xvsat_hu(a: v16u16, b: u32) -> v16u16;
+    fn __lasx_xvsat_hu(a: __v16u16, b: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvsat.wu"]
-    fn __lasx_xvsat_wu(a: v8u32, b: u32) -> v8u32;
+    fn __lasx_xvsat_wu(a: __v8u32, b: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvsat.du"]
-    fn __lasx_xvsat_du(a: v4u64, b: u32) -> v4u64;
+    fn __lasx_xvsat_du(a: __v4u64, b: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvadda.b"]
-    fn __lasx_xvadda_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvadda_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvadda.h"]
-    fn __lasx_xvadda_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvadda_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvadda.w"]
-    fn __lasx_xvadda_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvadda_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvadda.d"]
-    fn __lasx_xvadda_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvadda_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsadd.b"]
-    fn __lasx_xvsadd_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvsadd_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsadd.h"]
-    fn __lasx_xvsadd_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvsadd_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsadd.w"]
-    fn __lasx_xvsadd_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvsadd_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsadd.d"]
-    fn __lasx_xvsadd_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsadd_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsadd.bu"]
-    fn __lasx_xvsadd_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvsadd_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvsadd.hu"]
-    fn __lasx_xvsadd_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvsadd_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvsadd.wu"]
-    fn __lasx_xvsadd_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvsadd_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvsadd.du"]
-    fn __lasx_xvsadd_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvsadd_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvavg.b"]
-    fn __lasx_xvavg_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvavg_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvavg.h"]
-    fn __lasx_xvavg_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvavg_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvavg.w"]
-    fn __lasx_xvavg_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvavg_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvavg.d"]
-    fn __lasx_xvavg_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvavg_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvavg.bu"]
-    fn __lasx_xvavg_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvavg_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvavg.hu"]
-    fn __lasx_xvavg_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvavg_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvavg.wu"]
-    fn __lasx_xvavg_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvavg_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvavg.du"]
-    fn __lasx_xvavg_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvavg_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvavgr.b"]
-    fn __lasx_xvavgr_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvavgr_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvavgr.h"]
-    fn __lasx_xvavgr_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvavgr_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvavgr.w"]
-    fn __lasx_xvavgr_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvavgr_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvavgr.d"]
-    fn __lasx_xvavgr_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvavgr_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvavgr.bu"]
-    fn __lasx_xvavgr_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvavgr_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvavgr.hu"]
-    fn __lasx_xvavgr_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvavgr_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvavgr.wu"]
-    fn __lasx_xvavgr_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvavgr_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvavgr.du"]
-    fn __lasx_xvavgr_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvavgr_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvssub.b"]
-    fn __lasx_xvssub_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvssub_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvssub.h"]
-    fn __lasx_xvssub_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvssub_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvssub.w"]
-    fn __lasx_xvssub_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvssub_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssub.d"]
-    fn __lasx_xvssub_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvssub_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvssub.bu"]
-    fn __lasx_xvssub_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvssub_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvssub.hu"]
-    fn __lasx_xvssub_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvssub_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvssub.wu"]
-    fn __lasx_xvssub_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvssub_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvssub.du"]
-    fn __lasx_xvssub_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvssub_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvabsd.b"]
-    fn __lasx_xvabsd_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvabsd_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvabsd.h"]
-    fn __lasx_xvabsd_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvabsd_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvabsd.w"]
-    fn __lasx_xvabsd_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvabsd_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvabsd.d"]
-    fn __lasx_xvabsd_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvabsd_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvabsd.bu"]
-    fn __lasx_xvabsd_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvabsd_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvabsd.hu"]
-    fn __lasx_xvabsd_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvabsd_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvabsd.wu"]
-    fn __lasx_xvabsd_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvabsd_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvabsd.du"]
-    fn __lasx_xvabsd_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvabsd_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvmul.b"]
-    fn __lasx_xvmul_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvmul_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmul.h"]
-    fn __lasx_xvmul_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvmul_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmul.w"]
-    fn __lasx_xvmul_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvmul_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmul.d"]
-    fn __lasx_xvmul_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvmul_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmadd.b"]
-    fn __lasx_xvmadd_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8;
+    fn __lasx_xvmadd_b(a: __v32i8, b: __v32i8, c: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmadd.h"]
-    fn __lasx_xvmadd_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16;
+    fn __lasx_xvmadd_h(a: __v16i16, b: __v16i16, c: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmadd.w"]
-    fn __lasx_xvmadd_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32;
+    fn __lasx_xvmadd_w(a: __v8i32, b: __v8i32, c: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmadd.d"]
-    fn __lasx_xvmadd_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64;
+    fn __lasx_xvmadd_d(a: __v4i64, b: __v4i64, c: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmsub.b"]
-    fn __lasx_xvmsub_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8;
+    fn __lasx_xvmsub_b(a: __v32i8, b: __v32i8, c: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmsub.h"]
-    fn __lasx_xvmsub_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16;
+    fn __lasx_xvmsub_h(a: __v16i16, b: __v16i16, c: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmsub.w"]
-    fn __lasx_xvmsub_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32;
+    fn __lasx_xvmsub_w(a: __v8i32, b: __v8i32, c: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmsub.d"]
-    fn __lasx_xvmsub_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64;
+    fn __lasx_xvmsub_d(a: __v4i64, b: __v4i64, c: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvdiv.b"]
-    fn __lasx_xvdiv_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvdiv_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvdiv.h"]
-    fn __lasx_xvdiv_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvdiv_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvdiv.w"]
-    fn __lasx_xvdiv_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvdiv_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvdiv.d"]
-    fn __lasx_xvdiv_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvdiv_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvdiv.bu"]
-    fn __lasx_xvdiv_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvdiv_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvdiv.hu"]
-    fn __lasx_xvdiv_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvdiv_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvdiv.wu"]
-    fn __lasx_xvdiv_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvdiv_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvdiv.du"]
-    fn __lasx_xvdiv_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvdiv_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvhaddw.h.b"]
-    fn __lasx_xvhaddw_h_b(a: v32i8, b: v32i8) -> v16i16;
+    fn __lasx_xvhaddw_h_b(a: __v32i8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvhaddw.w.h"]
-    fn __lasx_xvhaddw_w_h(a: v16i16, b: v16i16) -> v8i32;
+    fn __lasx_xvhaddw_w_h(a: __v16i16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvhaddw.d.w"]
-    fn __lasx_xvhaddw_d_w(a: v8i32, b: v8i32) -> v4i64;
+    fn __lasx_xvhaddw_d_w(a: __v8i32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvhaddw.hu.bu"]
-    fn __lasx_xvhaddw_hu_bu(a: v32u8, b: v32u8) -> v16u16;
+    fn __lasx_xvhaddw_hu_bu(a: __v32u8, b: __v32u8) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvhaddw.wu.hu"]
-    fn __lasx_xvhaddw_wu_hu(a: v16u16, b: v16u16) -> v8u32;
+    fn __lasx_xvhaddw_wu_hu(a: __v16u16, b: __v16u16) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvhaddw.du.wu"]
-    fn __lasx_xvhaddw_du_wu(a: v8u32, b: v8u32) -> v4u64;
+    fn __lasx_xvhaddw_du_wu(a: __v8u32, b: __v8u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvhsubw.h.b"]
-    fn __lasx_xvhsubw_h_b(a: v32i8, b: v32i8) -> v16i16;
+    fn __lasx_xvhsubw_h_b(a: __v32i8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvhsubw.w.h"]
-    fn __lasx_xvhsubw_w_h(a: v16i16, b: v16i16) -> v8i32;
+    fn __lasx_xvhsubw_w_h(a: __v16i16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvhsubw.d.w"]
-    fn __lasx_xvhsubw_d_w(a: v8i32, b: v8i32) -> v4i64;
+    fn __lasx_xvhsubw_d_w(a: __v8i32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvhsubw.hu.bu"]
-    fn __lasx_xvhsubw_hu_bu(a: v32u8, b: v32u8) -> v16i16;
+    fn __lasx_xvhsubw_hu_bu(a: __v32u8, b: __v32u8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvhsubw.wu.hu"]
-    fn __lasx_xvhsubw_wu_hu(a: v16u16, b: v16u16) -> v8i32;
+    fn __lasx_xvhsubw_wu_hu(a: __v16u16, b: __v16u16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvhsubw.du.wu"]
-    fn __lasx_xvhsubw_du_wu(a: v8u32, b: v8u32) -> v4i64;
+    fn __lasx_xvhsubw_du_wu(a: __v8u32, b: __v8u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmod.b"]
-    fn __lasx_xvmod_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvmod_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmod.h"]
-    fn __lasx_xvmod_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvmod_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmod.w"]
-    fn __lasx_xvmod_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvmod_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmod.d"]
-    fn __lasx_xvmod_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvmod_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmod.bu"]
-    fn __lasx_xvmod_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvmod_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvmod.hu"]
-    fn __lasx_xvmod_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvmod_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvmod.wu"]
-    fn __lasx_xvmod_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvmod_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvmod.du"]
-    fn __lasx_xvmod_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvmod_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvrepl128vei.b"]
-    fn __lasx_xvrepl128vei_b(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvrepl128vei_b(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvrepl128vei.h"]
-    fn __lasx_xvrepl128vei_h(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvrepl128vei_h(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvrepl128vei.w"]
-    fn __lasx_xvrepl128vei_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvrepl128vei_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvrepl128vei.d"]
-    fn __lasx_xvrepl128vei_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvrepl128vei_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvpickev.b"]
-    fn __lasx_xvpickev_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvpickev_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvpickev.h"]
-    fn __lasx_xvpickev_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvpickev_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvpickev.w"]
-    fn __lasx_xvpickev_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvpickev_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvpickev.d"]
-    fn __lasx_xvpickev_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvpickev_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvpickod.b"]
-    fn __lasx_xvpickod_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvpickod_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvpickod.h"]
-    fn __lasx_xvpickod_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvpickod_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvpickod.w"]
-    fn __lasx_xvpickod_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvpickod_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvpickod.d"]
-    fn __lasx_xvpickod_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvpickod_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvilvh.b"]
-    fn __lasx_xvilvh_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvilvh_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvilvh.h"]
-    fn __lasx_xvilvh_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvilvh_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvilvh.w"]
-    fn __lasx_xvilvh_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvilvh_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvilvh.d"]
-    fn __lasx_xvilvh_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvilvh_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvilvl.b"]
-    fn __lasx_xvilvl_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvilvl_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvilvl.h"]
-    fn __lasx_xvilvl_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvilvl_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvilvl.w"]
-    fn __lasx_xvilvl_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvilvl_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvilvl.d"]
-    fn __lasx_xvilvl_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvilvl_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvpackev.b"]
-    fn __lasx_xvpackev_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvpackev_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvpackev.h"]
-    fn __lasx_xvpackev_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvpackev_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvpackev.w"]
-    fn __lasx_xvpackev_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvpackev_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvpackev.d"]
-    fn __lasx_xvpackev_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvpackev_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvpackod.b"]
-    fn __lasx_xvpackod_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvpackod_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvpackod.h"]
-    fn __lasx_xvpackod_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvpackod_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvpackod.w"]
-    fn __lasx_xvpackod_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvpackod_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvpackod.d"]
-    fn __lasx_xvpackod_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvpackod_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvshuf.b"]
-    fn __lasx_xvshuf_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8;
+    fn __lasx_xvshuf_b(a: __v32i8, b: __v32i8, c: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvshuf.h"]
-    fn __lasx_xvshuf_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16;
+    fn __lasx_xvshuf_h(a: __v16i16, b: __v16i16, c: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvshuf.w"]
-    fn __lasx_xvshuf_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32;
+    fn __lasx_xvshuf_w(a: __v8i32, b: __v8i32, c: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvshuf.d"]
-    fn __lasx_xvshuf_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64;
+    fn __lasx_xvshuf_d(a: __v4i64, b: __v4i64, c: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvand.v"]
-    fn __lasx_xvand_v(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvand_v(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvandi.b"]
-    fn __lasx_xvandi_b(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvandi_b(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvor.v"]
-    fn __lasx_xvor_v(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvor_v(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvori.b"]
-    fn __lasx_xvori_b(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvori_b(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvnor.v"]
-    fn __lasx_xvnor_v(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvnor_v(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvnori.b"]
-    fn __lasx_xvnori_b(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvnori_b(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvxor.v"]
-    fn __lasx_xvxor_v(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvxor_v(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvxori.b"]
-    fn __lasx_xvxori_b(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvxori_b(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvbitsel.v"]
-    fn __lasx_xvbitsel_v(a: v32u8, b: v32u8, c: v32u8) -> v32u8;
+    fn __lasx_xvbitsel_v(a: __v32u8, b: __v32u8, c: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvbitseli.b"]
-    fn __lasx_xvbitseli_b(a: v32u8, b: v32u8, c: u32) -> v32u8;
+    fn __lasx_xvbitseli_b(a: __v32u8, b: __v32u8, c: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvshuf4i.b"]
-    fn __lasx_xvshuf4i_b(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvshuf4i_b(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvshuf4i.h"]
-    fn __lasx_xvshuf4i_h(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvshuf4i_h(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvshuf4i.w"]
-    fn __lasx_xvshuf4i_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvshuf4i_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvreplgr2vr.b"]
-    fn __lasx_xvreplgr2vr_b(a: i32) -> v32i8;
+    fn __lasx_xvreplgr2vr_b(a: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvreplgr2vr.h"]
-    fn __lasx_xvreplgr2vr_h(a: i32) -> v16i16;
+    fn __lasx_xvreplgr2vr_h(a: i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvreplgr2vr.w"]
-    fn __lasx_xvreplgr2vr_w(a: i32) -> v8i32;
+    fn __lasx_xvreplgr2vr_w(a: i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvreplgr2vr.d"]
-    fn __lasx_xvreplgr2vr_d(a: i64) -> v4i64;
+    fn __lasx_xvreplgr2vr_d(a: i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvpcnt.b"]
-    fn __lasx_xvpcnt_b(a: v32i8) -> v32i8;
+    fn __lasx_xvpcnt_b(a: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvpcnt.h"]
-    fn __lasx_xvpcnt_h(a: v16i16) -> v16i16;
+    fn __lasx_xvpcnt_h(a: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvpcnt.w"]
-    fn __lasx_xvpcnt_w(a: v8i32) -> v8i32;
+    fn __lasx_xvpcnt_w(a: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvpcnt.d"]
-    fn __lasx_xvpcnt_d(a: v4i64) -> v4i64;
+    fn __lasx_xvpcnt_d(a: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvclo.b"]
-    fn __lasx_xvclo_b(a: v32i8) -> v32i8;
+    fn __lasx_xvclo_b(a: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvclo.h"]
-    fn __lasx_xvclo_h(a: v16i16) -> v16i16;
+    fn __lasx_xvclo_h(a: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvclo.w"]
-    fn __lasx_xvclo_w(a: v8i32) -> v8i32;
+    fn __lasx_xvclo_w(a: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvclo.d"]
-    fn __lasx_xvclo_d(a: v4i64) -> v4i64;
+    fn __lasx_xvclo_d(a: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvclz.b"]
-    fn __lasx_xvclz_b(a: v32i8) -> v32i8;
+    fn __lasx_xvclz_b(a: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvclz.h"]
-    fn __lasx_xvclz_h(a: v16i16) -> v16i16;
+    fn __lasx_xvclz_h(a: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvclz.w"]
-    fn __lasx_xvclz_w(a: v8i32) -> v8i32;
+    fn __lasx_xvclz_w(a: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvclz.d"]
-    fn __lasx_xvclz_d(a: v4i64) -> v4i64;
+    fn __lasx_xvclz_d(a: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfadd.s"]
-    fn __lasx_xvfadd_s(a: v8f32, b: v8f32) -> v8f32;
+    fn __lasx_xvfadd_s(a: __v8f32, b: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfadd.d"]
-    fn __lasx_xvfadd_d(a: v4f64, b: v4f64) -> v4f64;
+    fn __lasx_xvfadd_d(a: __v4f64, b: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfsub.s"]
-    fn __lasx_xvfsub_s(a: v8f32, b: v8f32) -> v8f32;
+    fn __lasx_xvfsub_s(a: __v8f32, b: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfsub.d"]
-    fn __lasx_xvfsub_d(a: v4f64, b: v4f64) -> v4f64;
+    fn __lasx_xvfsub_d(a: __v4f64, b: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfmul.s"]
-    fn __lasx_xvfmul_s(a: v8f32, b: v8f32) -> v8f32;
+    fn __lasx_xvfmul_s(a: __v8f32, b: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfmul.d"]
-    fn __lasx_xvfmul_d(a: v4f64, b: v4f64) -> v4f64;
+    fn __lasx_xvfmul_d(a: __v4f64, b: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfdiv.s"]
-    fn __lasx_xvfdiv_s(a: v8f32, b: v8f32) -> v8f32;
+    fn __lasx_xvfdiv_s(a: __v8f32, b: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfdiv.d"]
-    fn __lasx_xvfdiv_d(a: v4f64, b: v4f64) -> v4f64;
+    fn __lasx_xvfdiv_d(a: __v4f64, b: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfcvt.h.s"]
-    fn __lasx_xvfcvt_h_s(a: v8f32, b: v8f32) -> v16i16;
+    fn __lasx_xvfcvt_h_s(a: __v8f32, b: __v8f32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvfcvt.s.d"]
-    fn __lasx_xvfcvt_s_d(a: v4f64, b: v4f64) -> v8f32;
+    fn __lasx_xvfcvt_s_d(a: __v4f64, b: __v4f64) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfmin.s"]
-    fn __lasx_xvfmin_s(a: v8f32, b: v8f32) -> v8f32;
+    fn __lasx_xvfmin_s(a: __v8f32, b: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfmin.d"]
-    fn __lasx_xvfmin_d(a: v4f64, b: v4f64) -> v4f64;
+    fn __lasx_xvfmin_d(a: __v4f64, b: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfmina.s"]
-    fn __lasx_xvfmina_s(a: v8f32, b: v8f32) -> v8f32;
+    fn __lasx_xvfmina_s(a: __v8f32, b: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfmina.d"]
-    fn __lasx_xvfmina_d(a: v4f64, b: v4f64) -> v4f64;
+    fn __lasx_xvfmina_d(a: __v4f64, b: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfmax.s"]
-    fn __lasx_xvfmax_s(a: v8f32, b: v8f32) -> v8f32;
+    fn __lasx_xvfmax_s(a: __v8f32, b: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfmax.d"]
-    fn __lasx_xvfmax_d(a: v4f64, b: v4f64) -> v4f64;
+    fn __lasx_xvfmax_d(a: __v4f64, b: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfmaxa.s"]
-    fn __lasx_xvfmaxa_s(a: v8f32, b: v8f32) -> v8f32;
+    fn __lasx_xvfmaxa_s(a: __v8f32, b: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfmaxa.d"]
-    fn __lasx_xvfmaxa_d(a: v4f64, b: v4f64) -> v4f64;
+    fn __lasx_xvfmaxa_d(a: __v4f64, b: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfclass.s"]
-    fn __lasx_xvfclass_s(a: v8f32) -> v8i32;
+    fn __lasx_xvfclass_s(a: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfclass.d"]
-    fn __lasx_xvfclass_d(a: v4f64) -> v4i64;
+    fn __lasx_xvfclass_d(a: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfsqrt.s"]
-    fn __lasx_xvfsqrt_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfsqrt_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfsqrt.d"]
-    fn __lasx_xvfsqrt_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfsqrt_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfrecip.s"]
-    fn __lasx_xvfrecip_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfrecip_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfrecip.d"]
-    fn __lasx_xvfrecip_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfrecip_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfrecipe.s"]
-    fn __lasx_xvfrecipe_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfrecipe_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfrecipe.d"]
-    fn __lasx_xvfrecipe_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfrecipe_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfrsqrte.s"]
-    fn __lasx_xvfrsqrte_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfrsqrte_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfrsqrte.d"]
-    fn __lasx_xvfrsqrte_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfrsqrte_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfrint.s"]
-    fn __lasx_xvfrint_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfrint_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfrint.d"]
-    fn __lasx_xvfrint_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfrint_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfrsqrt.s"]
-    fn __lasx_xvfrsqrt_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfrsqrt_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfrsqrt.d"]
-    fn __lasx_xvfrsqrt_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfrsqrt_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvflogb.s"]
-    fn __lasx_xvflogb_s(a: v8f32) -> v8f32;
+    fn __lasx_xvflogb_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvflogb.d"]
-    fn __lasx_xvflogb_d(a: v4f64) -> v4f64;
+    fn __lasx_xvflogb_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfcvth.s.h"]
-    fn __lasx_xvfcvth_s_h(a: v16i16) -> v8f32;
+    fn __lasx_xvfcvth_s_h(a: __v16i16) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfcvth.d.s"]
-    fn __lasx_xvfcvth_d_s(a: v8f32) -> v4f64;
+    fn __lasx_xvfcvth_d_s(a: __v8f32) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfcvtl.s.h"]
-    fn __lasx_xvfcvtl_s_h(a: v16i16) -> v8f32;
+    fn __lasx_xvfcvtl_s_h(a: __v16i16) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfcvtl.d.s"]
-    fn __lasx_xvfcvtl_d_s(a: v8f32) -> v4f64;
+    fn __lasx_xvfcvtl_d_s(a: __v8f32) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvftint.w.s"]
-    fn __lasx_xvftint_w_s(a: v8f32) -> v8i32;
+    fn __lasx_xvftint_w_s(a: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvftint.l.d"]
-    fn __lasx_xvftint_l_d(a: v4f64) -> v4i64;
+    fn __lasx_xvftint_l_d(a: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftint.wu.s"]
-    fn __lasx_xvftint_wu_s(a: v8f32) -> v8u32;
+    fn __lasx_xvftint_wu_s(a: __v8f32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvftint.lu.d"]
-    fn __lasx_xvftint_lu_d(a: v4f64) -> v4u64;
+    fn __lasx_xvftint_lu_d(a: __v4f64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvftintrz.w.s"]
-    fn __lasx_xvftintrz_w_s(a: v8f32) -> v8i32;
+    fn __lasx_xvftintrz_w_s(a: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvftintrz.l.d"]
-    fn __lasx_xvftintrz_l_d(a: v4f64) -> v4i64;
+    fn __lasx_xvftintrz_l_d(a: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrz.wu.s"]
-    fn __lasx_xvftintrz_wu_s(a: v8f32) -> v8u32;
+    fn __lasx_xvftintrz_wu_s(a: __v8f32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvftintrz.lu.d"]
-    fn __lasx_xvftintrz_lu_d(a: v4f64) -> v4u64;
+    fn __lasx_xvftintrz_lu_d(a: __v4f64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvffint.s.w"]
-    fn __lasx_xvffint_s_w(a: v8i32) -> v8f32;
+    fn __lasx_xvffint_s_w(a: __v8i32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvffint.d.l"]
-    fn __lasx_xvffint_d_l(a: v4i64) -> v4f64;
+    fn __lasx_xvffint_d_l(a: __v4i64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvffint.s.wu"]
-    fn __lasx_xvffint_s_wu(a: v8u32) -> v8f32;
+    fn __lasx_xvffint_s_wu(a: __v8u32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvffint.d.lu"]
-    fn __lasx_xvffint_d_lu(a: v4u64) -> v4f64;
+    fn __lasx_xvffint_d_lu(a: __v4u64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvreplve.b"]
-    fn __lasx_xvreplve_b(a: v32i8, b: i32) -> v32i8;
+    fn __lasx_xvreplve_b(a: __v32i8, b: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvreplve.h"]
-    fn __lasx_xvreplve_h(a: v16i16, b: i32) -> v16i16;
+    fn __lasx_xvreplve_h(a: __v16i16, b: i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvreplve.w"]
-    fn __lasx_xvreplve_w(a: v8i32, b: i32) -> v8i32;
+    fn __lasx_xvreplve_w(a: __v8i32, b: i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvreplve.d"]
-    fn __lasx_xvreplve_d(a: v4i64, b: i32) -> v4i64;
+    fn __lasx_xvreplve_d(a: __v4i64, b: i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvpermi.w"]
-    fn __lasx_xvpermi_w(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvpermi_w(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvandn.v"]
-    fn __lasx_xvandn_v(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvandn_v(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvneg.b"]
-    fn __lasx_xvneg_b(a: v32i8) -> v32i8;
+    fn __lasx_xvneg_b(a: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvneg.h"]
-    fn __lasx_xvneg_h(a: v16i16) -> v16i16;
+    fn __lasx_xvneg_h(a: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvneg.w"]
-    fn __lasx_xvneg_w(a: v8i32) -> v8i32;
+    fn __lasx_xvneg_w(a: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvneg.d"]
-    fn __lasx_xvneg_d(a: v4i64) -> v4i64;
+    fn __lasx_xvneg_d(a: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmuh.b"]
-    fn __lasx_xvmuh_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvmuh_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmuh.h"]
-    fn __lasx_xvmuh_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvmuh_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmuh.w"]
-    fn __lasx_xvmuh_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvmuh_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmuh.d"]
-    fn __lasx_xvmuh_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvmuh_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmuh.bu"]
-    fn __lasx_xvmuh_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvmuh_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvmuh.hu"]
-    fn __lasx_xvmuh_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvmuh_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvmuh.wu"]
-    fn __lasx_xvmuh_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvmuh_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvmuh.du"]
-    fn __lasx_xvmuh_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvmuh_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvsllwil.h.b"]
-    fn __lasx_xvsllwil_h_b(a: v32i8, b: u32) -> v16i16;
+    fn __lasx_xvsllwil_h_b(a: __v32i8, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsllwil.w.h"]
-    fn __lasx_xvsllwil_w_h(a: v16i16, b: u32) -> v8i32;
+    fn __lasx_xvsllwil_w_h(a: __v16i16, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsllwil.d.w"]
-    fn __lasx_xvsllwil_d_w(a: v8i32, b: u32) -> v4i64;
+    fn __lasx_xvsllwil_d_w(a: __v8i32, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsllwil.hu.bu"]
-    fn __lasx_xvsllwil_hu_bu(a: v32u8, b: u32) -> v16u16;
+    fn __lasx_xvsllwil_hu_bu(a: __v32u8, b: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvsllwil.wu.hu"]
-    fn __lasx_xvsllwil_wu_hu(a: v16u16, b: u32) -> v8u32;
+    fn __lasx_xvsllwil_wu_hu(a: __v16u16, b: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvsllwil.du.wu"]
-    fn __lasx_xvsllwil_du_wu(a: v8u32, b: u32) -> v4u64;
+    fn __lasx_xvsllwil_du_wu(a: __v8u32, b: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvsran.b.h"]
-    fn __lasx_xvsran_b_h(a: v16i16, b: v16i16) -> v32i8;
+    fn __lasx_xvsran_b_h(a: __v16i16, b: __v16i16) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsran.h.w"]
-    fn __lasx_xvsran_h_w(a: v8i32, b: v8i32) -> v16i16;
+    fn __lasx_xvsran_h_w(a: __v8i32, b: __v8i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsran.w.d"]
-    fn __lasx_xvsran_w_d(a: v4i64, b: v4i64) -> v8i32;
+    fn __lasx_xvsran_w_d(a: __v4i64, b: __v4i64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssran.b.h"]
-    fn __lasx_xvssran_b_h(a: v16i16, b: v16i16) -> v32i8;
+    fn __lasx_xvssran_b_h(a: __v16i16, b: __v16i16) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvssran.h.w"]
-    fn __lasx_xvssran_h_w(a: v8i32, b: v8i32) -> v16i16;
+    fn __lasx_xvssran_h_w(a: __v8i32, b: __v8i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvssran.w.d"]
-    fn __lasx_xvssran_w_d(a: v4i64, b: v4i64) -> v8i32;
+    fn __lasx_xvssran_w_d(a: __v4i64, b: __v4i64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssran.bu.h"]
-    fn __lasx_xvssran_bu_h(a: v16u16, b: v16u16) -> v32u8;
+    fn __lasx_xvssran_bu_h(a: __v16u16, b: __v16u16) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvssran.hu.w"]
-    fn __lasx_xvssran_hu_w(a: v8u32, b: v8u32) -> v16u16;
+    fn __lasx_xvssran_hu_w(a: __v8u32, b: __v8u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvssran.wu.d"]
-    fn __lasx_xvssran_wu_d(a: v4u64, b: v4u64) -> v8u32;
+    fn __lasx_xvssran_wu_d(a: __v4u64, b: __v4u64) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvsrarn.b.h"]
-    fn __lasx_xvsrarn_b_h(a: v16i16, b: v16i16) -> v32i8;
+    fn __lasx_xvsrarn_b_h(a: __v16i16, b: __v16i16) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrarn.h.w"]
-    fn __lasx_xvsrarn_h_w(a: v8i32, b: v8i32) -> v16i16;
+    fn __lasx_xvsrarn_h_w(a: __v8i32, b: __v8i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrarn.w.d"]
-    fn __lasx_xvsrarn_w_d(a: v4i64, b: v4i64) -> v8i32;
+    fn __lasx_xvsrarn_w_d(a: __v4i64, b: __v4i64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssrarn.b.h"]
-    fn __lasx_xvssrarn_b_h(a: v16i16, b: v16i16) -> v32i8;
+    fn __lasx_xvssrarn_b_h(a: __v16i16, b: __v16i16) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvssrarn.h.w"]
-    fn __lasx_xvssrarn_h_w(a: v8i32, b: v8i32) -> v16i16;
+    fn __lasx_xvssrarn_h_w(a: __v8i32, b: __v8i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvssrarn.w.d"]
-    fn __lasx_xvssrarn_w_d(a: v4i64, b: v4i64) -> v8i32;
+    fn __lasx_xvssrarn_w_d(a: __v4i64, b: __v4i64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssrarn.bu.h"]
-    fn __lasx_xvssrarn_bu_h(a: v16u16, b: v16u16) -> v32u8;
+    fn __lasx_xvssrarn_bu_h(a: __v16u16, b: __v16u16) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvssrarn.hu.w"]
-    fn __lasx_xvssrarn_hu_w(a: v8u32, b: v8u32) -> v16u16;
+    fn __lasx_xvssrarn_hu_w(a: __v8u32, b: __v8u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvssrarn.wu.d"]
-    fn __lasx_xvssrarn_wu_d(a: v4u64, b: v4u64) -> v8u32;
+    fn __lasx_xvssrarn_wu_d(a: __v4u64, b: __v4u64) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvsrln.b.h"]
-    fn __lasx_xvsrln_b_h(a: v16i16, b: v16i16) -> v32i8;
+    fn __lasx_xvsrln_b_h(a: __v16i16, b: __v16i16) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrln.h.w"]
-    fn __lasx_xvsrln_h_w(a: v8i32, b: v8i32) -> v16i16;
+    fn __lasx_xvsrln_h_w(a: __v8i32, b: __v8i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrln.w.d"]
-    fn __lasx_xvsrln_w_d(a: v4i64, b: v4i64) -> v8i32;
+    fn __lasx_xvsrln_w_d(a: __v4i64, b: __v4i64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssrln.bu.h"]
-    fn __lasx_xvssrln_bu_h(a: v16u16, b: v16u16) -> v32u8;
+    fn __lasx_xvssrln_bu_h(a: __v16u16, b: __v16u16) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvssrln.hu.w"]
-    fn __lasx_xvssrln_hu_w(a: v8u32, b: v8u32) -> v16u16;
+    fn __lasx_xvssrln_hu_w(a: __v8u32, b: __v8u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvssrln.wu.d"]
-    fn __lasx_xvssrln_wu_d(a: v4u64, b: v4u64) -> v8u32;
+    fn __lasx_xvssrln_wu_d(a: __v4u64, b: __v4u64) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvsrlrn.b.h"]
-    fn __lasx_xvsrlrn_b_h(a: v16i16, b: v16i16) -> v32i8;
+    fn __lasx_xvsrlrn_b_h(a: __v16i16, b: __v16i16) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrlrn.h.w"]
-    fn __lasx_xvsrlrn_h_w(a: v8i32, b: v8i32) -> v16i16;
+    fn __lasx_xvsrlrn_h_w(a: __v8i32, b: __v8i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrlrn.w.d"]
-    fn __lasx_xvsrlrn_w_d(a: v4i64, b: v4i64) -> v8i32;
+    fn __lasx_xvsrlrn_w_d(a: __v4i64, b: __v4i64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssrlrn.bu.h"]
-    fn __lasx_xvssrlrn_bu_h(a: v16u16, b: v16u16) -> v32u8;
+    fn __lasx_xvssrlrn_bu_h(a: __v16u16, b: __v16u16) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvssrlrn.hu.w"]
-    fn __lasx_xvssrlrn_hu_w(a: v8u32, b: v8u32) -> v16u16;
+    fn __lasx_xvssrlrn_hu_w(a: __v8u32, b: __v8u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvssrlrn.wu.d"]
-    fn __lasx_xvssrlrn_wu_d(a: v4u64, b: v4u64) -> v8u32;
+    fn __lasx_xvssrlrn_wu_d(a: __v4u64, b: __v4u64) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvfrstpi.b"]
-    fn __lasx_xvfrstpi_b(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvfrstpi_b(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvfrstpi.h"]
-    fn __lasx_xvfrstpi_h(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvfrstpi_h(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvfrstp.b"]
-    fn __lasx_xvfrstp_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8;
+    fn __lasx_xvfrstp_b(a: __v32i8, b: __v32i8, c: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvfrstp.h"]
-    fn __lasx_xvfrstp_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16;
+    fn __lasx_xvfrstp_h(a: __v16i16, b: __v16i16, c: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvshuf4i.d"]
-    fn __lasx_xvshuf4i_d(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvshuf4i_d(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvbsrl.v"]
-    fn __lasx_xvbsrl_v(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvbsrl_v(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvbsll.v"]
-    fn __lasx_xvbsll_v(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvbsll_v(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvextrins.b"]
-    fn __lasx_xvextrins_b(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvextrins_b(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvextrins.h"]
-    fn __lasx_xvextrins_h(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvextrins_h(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvextrins.w"]
-    fn __lasx_xvextrins_w(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvextrins_w(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvextrins.d"]
-    fn __lasx_xvextrins_d(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvextrins_d(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmskltz.b"]
-    fn __lasx_xvmskltz_b(a: v32i8) -> v32i8;
+    fn __lasx_xvmskltz_b(a: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmskltz.h"]
-    fn __lasx_xvmskltz_h(a: v16i16) -> v16i16;
+    fn __lasx_xvmskltz_h(a: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmskltz.w"]
-    fn __lasx_xvmskltz_w(a: v8i32) -> v8i32;
+    fn __lasx_xvmskltz_w(a: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmskltz.d"]
-    fn __lasx_xvmskltz_d(a: v4i64) -> v4i64;
+    fn __lasx_xvmskltz_d(a: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsigncov.b"]
-    fn __lasx_xvsigncov_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvsigncov_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsigncov.h"]
-    fn __lasx_xvsigncov_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvsigncov_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsigncov.w"]
-    fn __lasx_xvsigncov_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvsigncov_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsigncov.d"]
-    fn __lasx_xvsigncov_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsigncov_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfmadd.s"]
-    fn __lasx_xvfmadd_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32;
+    fn __lasx_xvfmadd_s(a: __v8f32, b: __v8f32, c: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfmadd.d"]
-    fn __lasx_xvfmadd_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64;
+    fn __lasx_xvfmadd_d(a: __v4f64, b: __v4f64, c: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfmsub.s"]
-    fn __lasx_xvfmsub_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32;
+    fn __lasx_xvfmsub_s(a: __v8f32, b: __v8f32, c: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfmsub.d"]
-    fn __lasx_xvfmsub_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64;
+    fn __lasx_xvfmsub_d(a: __v4f64, b: __v4f64, c: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfnmadd.s"]
-    fn __lasx_xvfnmadd_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32;
+    fn __lasx_xvfnmadd_s(a: __v8f32, b: __v8f32, c: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfnmadd.d"]
-    fn __lasx_xvfnmadd_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64;
+    fn __lasx_xvfnmadd_d(a: __v4f64, b: __v4f64, c: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfnmsub.s"]
-    fn __lasx_xvfnmsub_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32;
+    fn __lasx_xvfnmsub_s(a: __v8f32, b: __v8f32, c: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfnmsub.d"]
-    fn __lasx_xvfnmsub_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64;
+    fn __lasx_xvfnmsub_d(a: __v4f64, b: __v4f64, c: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvftintrne.w.s"]
-    fn __lasx_xvftintrne_w_s(a: v8f32) -> v8i32;
+    fn __lasx_xvftintrne_w_s(a: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvftintrne.l.d"]
-    fn __lasx_xvftintrne_l_d(a: v4f64) -> v4i64;
+    fn __lasx_xvftintrne_l_d(a: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrp.w.s"]
-    fn __lasx_xvftintrp_w_s(a: v8f32) -> v8i32;
+    fn __lasx_xvftintrp_w_s(a: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvftintrp.l.d"]
-    fn __lasx_xvftintrp_l_d(a: v4f64) -> v4i64;
+    fn __lasx_xvftintrp_l_d(a: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrm.w.s"]
-    fn __lasx_xvftintrm_w_s(a: v8f32) -> v8i32;
+    fn __lasx_xvftintrm_w_s(a: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvftintrm.l.d"]
-    fn __lasx_xvftintrm_l_d(a: v4f64) -> v4i64;
+    fn __lasx_xvftintrm_l_d(a: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftint.w.d"]
-    fn __lasx_xvftint_w_d(a: v4f64, b: v4f64) -> v8i32;
+    fn __lasx_xvftint_w_d(a: __v4f64, b: __v4f64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvffint.s.l"]
-    fn __lasx_xvffint_s_l(a: v4i64, b: v4i64) -> v8f32;
+    fn __lasx_xvffint_s_l(a: __v4i64, b: __v4i64) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvftintrz.w.d"]
-    fn __lasx_xvftintrz_w_d(a: v4f64, b: v4f64) -> v8i32;
+    fn __lasx_xvftintrz_w_d(a: __v4f64, b: __v4f64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvftintrp.w.d"]
-    fn __lasx_xvftintrp_w_d(a: v4f64, b: v4f64) -> v8i32;
+    fn __lasx_xvftintrp_w_d(a: __v4f64, b: __v4f64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvftintrm.w.d"]
-    fn __lasx_xvftintrm_w_d(a: v4f64, b: v4f64) -> v8i32;
+    fn __lasx_xvftintrm_w_d(a: __v4f64, b: __v4f64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvftintrne.w.d"]
-    fn __lasx_xvftintrne_w_d(a: v4f64, b: v4f64) -> v8i32;
+    fn __lasx_xvftintrne_w_d(a: __v4f64, b: __v4f64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvftinth.l.s"]
-    fn __lasx_xvftinth_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftinth_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintl.l.s"]
-    fn __lasx_xvftintl_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftintl_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvffinth.d.w"]
-    fn __lasx_xvffinth_d_w(a: v8i32) -> v4f64;
+    fn __lasx_xvffinth_d_w(a: __v8i32) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvffintl.d.w"]
-    fn __lasx_xvffintl_d_w(a: v8i32) -> v4f64;
+    fn __lasx_xvffintl_d_w(a: __v8i32) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvftintrzh.l.s"]
-    fn __lasx_xvftintrzh_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftintrzh_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrzl.l.s"]
-    fn __lasx_xvftintrzl_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftintrzl_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrph.l.s"]
-    fn __lasx_xvftintrph_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftintrph_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrpl.l.s"]
-    fn __lasx_xvftintrpl_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftintrpl_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrmh.l.s"]
-    fn __lasx_xvftintrmh_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftintrmh_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrml.l.s"]
-    fn __lasx_xvftintrml_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftintrml_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrneh.l.s"]
-    fn __lasx_xvftintrneh_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftintrneh_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrnel.l.s"]
-    fn __lasx_xvftintrnel_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftintrnel_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfrintrne.s"]
-    fn __lasx_xvfrintrne_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfrintrne_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfrintrne.d"]
-    fn __lasx_xvfrintrne_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfrintrne_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfrintrz.s"]
-    fn __lasx_xvfrintrz_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfrintrz_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfrintrz.d"]
-    fn __lasx_xvfrintrz_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfrintrz_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfrintrp.s"]
-    fn __lasx_xvfrintrp_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfrintrp_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfrintrp.d"]
-    fn __lasx_xvfrintrp_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfrintrp_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfrintrm.s"]
-    fn __lasx_xvfrintrm_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfrintrm_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfrintrm.d"]
-    fn __lasx_xvfrintrm_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfrintrm_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvld"]
-    fn __lasx_xvld(a: *const i8, b: i32) -> v32i8;
+    fn __lasx_xvld(a: *const i8, b: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvst"]
-    fn __lasx_xvst(a: v32i8, b: *mut i8, c: i32);
+    fn __lasx_xvst(a: __v32i8, b: *mut i8, c: i32);
     #[link_name = "llvm.loongarch.lasx.xvstelm.b"]
-    fn __lasx_xvstelm_b(a: v32i8, b: *mut i8, c: i32, d: u32);
+    fn __lasx_xvstelm_b(a: __v32i8, b: *mut i8, c: i32, d: u32);
     #[link_name = "llvm.loongarch.lasx.xvstelm.h"]
-    fn __lasx_xvstelm_h(a: v16i16, b: *mut i8, c: i32, d: u32);
+    fn __lasx_xvstelm_h(a: __v16i16, b: *mut i8, c: i32, d: u32);
     #[link_name = "llvm.loongarch.lasx.xvstelm.w"]
-    fn __lasx_xvstelm_w(a: v8i32, b: *mut i8, c: i32, d: u32);
+    fn __lasx_xvstelm_w(a: __v8i32, b: *mut i8, c: i32, d: u32);
     #[link_name = "llvm.loongarch.lasx.xvstelm.d"]
-    fn __lasx_xvstelm_d(a: v4i64, b: *mut i8, c: i32, d: u32);
+    fn __lasx_xvstelm_d(a: __v4i64, b: *mut i8, c: i32, d: u32);
     #[link_name = "llvm.loongarch.lasx.xvinsve0.w"]
-    fn __lasx_xvinsve0_w(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvinsve0_w(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvinsve0.d"]
-    fn __lasx_xvinsve0_d(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvinsve0_d(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvpickve.w"]
-    fn __lasx_xvpickve_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvpickve_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvpickve.d"]
-    fn __lasx_xvpickve_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvpickve_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvssrlrn.b.h"]
-    fn __lasx_xvssrlrn_b_h(a: v16i16, b: v16i16) -> v32i8;
+    fn __lasx_xvssrlrn_b_h(a: __v16i16, b: __v16i16) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvssrlrn.h.w"]
-    fn __lasx_xvssrlrn_h_w(a: v8i32, b: v8i32) -> v16i16;
+    fn __lasx_xvssrlrn_h_w(a: __v8i32, b: __v8i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvssrlrn.w.d"]
-    fn __lasx_xvssrlrn_w_d(a: v4i64, b: v4i64) -> v8i32;
+    fn __lasx_xvssrlrn_w_d(a: __v4i64, b: __v4i64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssrln.b.h"]
-    fn __lasx_xvssrln_b_h(a: v16i16, b: v16i16) -> v32i8;
+    fn __lasx_xvssrln_b_h(a: __v16i16, b: __v16i16) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvssrln.h.w"]
-    fn __lasx_xvssrln_h_w(a: v8i32, b: v8i32) -> v16i16;
+    fn __lasx_xvssrln_h_w(a: __v8i32, b: __v8i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvssrln.w.d"]
-    fn __lasx_xvssrln_w_d(a: v4i64, b: v4i64) -> v8i32;
+    fn __lasx_xvssrln_w_d(a: __v4i64, b: __v4i64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvorn.v"]
-    fn __lasx_xvorn_v(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvorn_v(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvldi"]
-    fn __lasx_xvldi(a: i32) -> v4i64;
+    fn __lasx_xvldi(a: i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvldx"]
-    fn __lasx_xvldx(a: *const i8, b: i64) -> v32i8;
+    fn __lasx_xvldx(a: *const i8, b: i64) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvstx"]
-    fn __lasx_xvstx(a: v32i8, b: *mut i8, c: i64);
+    fn __lasx_xvstx(a: __v32i8, b: *mut i8, c: i64);
     #[link_name = "llvm.loongarch.lasx.xvextl.qu.du"]
-    fn __lasx_xvextl_qu_du(a: v4u64) -> v4u64;
+    fn __lasx_xvextl_qu_du(a: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvinsgr2vr.w"]
-    fn __lasx_xvinsgr2vr_w(a: v8i32, b: i32, c: u32) -> v8i32;
+    fn __lasx_xvinsgr2vr_w(a: __v8i32, b: i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvinsgr2vr.d"]
-    fn __lasx_xvinsgr2vr_d(a: v4i64, b: i64, c: u32) -> v4i64;
+    fn __lasx_xvinsgr2vr_d(a: __v4i64, b: i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvreplve0.b"]
-    fn __lasx_xvreplve0_b(a: v32i8) -> v32i8;
+    fn __lasx_xvreplve0_b(a: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvreplve0.h"]
-    fn __lasx_xvreplve0_h(a: v16i16) -> v16i16;
+    fn __lasx_xvreplve0_h(a: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvreplve0.w"]
-    fn __lasx_xvreplve0_w(a: v8i32) -> v8i32;
+    fn __lasx_xvreplve0_w(a: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvreplve0.d"]
-    fn __lasx_xvreplve0_d(a: v4i64) -> v4i64;
+    fn __lasx_xvreplve0_d(a: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvreplve0.q"]
-    fn __lasx_xvreplve0_q(a: v32i8) -> v32i8;
+    fn __lasx_xvreplve0_q(a: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.vext2xv.h.b"]
-    fn __lasx_vext2xv_h_b(a: v32i8) -> v16i16;
+    fn __lasx_vext2xv_h_b(a: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.vext2xv.w.h"]
-    fn __lasx_vext2xv_w_h(a: v16i16) -> v8i32;
+    fn __lasx_vext2xv_w_h(a: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.vext2xv.d.w"]
-    fn __lasx_vext2xv_d_w(a: v8i32) -> v4i64;
+    fn __lasx_vext2xv_d_w(a: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.vext2xv.w.b"]
-    fn __lasx_vext2xv_w_b(a: v32i8) -> v8i32;
+    fn __lasx_vext2xv_w_b(a: __v32i8) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.vext2xv.d.h"]
-    fn __lasx_vext2xv_d_h(a: v16i16) -> v4i64;
+    fn __lasx_vext2xv_d_h(a: __v16i16) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.vext2xv.d.b"]
-    fn __lasx_vext2xv_d_b(a: v32i8) -> v4i64;
+    fn __lasx_vext2xv_d_b(a: __v32i8) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.vext2xv.hu.bu"]
-    fn __lasx_vext2xv_hu_bu(a: v32i8) -> v16i16;
+    fn __lasx_vext2xv_hu_bu(a: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.vext2xv.wu.hu"]
-    fn __lasx_vext2xv_wu_hu(a: v16i16) -> v8i32;
+    fn __lasx_vext2xv_wu_hu(a: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.vext2xv.du.wu"]
-    fn __lasx_vext2xv_du_wu(a: v8i32) -> v4i64;
+    fn __lasx_vext2xv_du_wu(a: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.vext2xv.wu.bu"]
-    fn __lasx_vext2xv_wu_bu(a: v32i8) -> v8i32;
+    fn __lasx_vext2xv_wu_bu(a: __v32i8) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.vext2xv.du.hu"]
-    fn __lasx_vext2xv_du_hu(a: v16i16) -> v4i64;
+    fn __lasx_vext2xv_du_hu(a: __v16i16) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.vext2xv.du.bu"]
-    fn __lasx_vext2xv_du_bu(a: v32i8) -> v4i64;
+    fn __lasx_vext2xv_du_bu(a: __v32i8) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvpermi.q"]
-    fn __lasx_xvpermi_q(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvpermi_q(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvpermi.d"]
-    fn __lasx_xvpermi_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvpermi_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvperm.w"]
-    fn __lasx_xvperm_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvperm_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvldrepl.b"]
-    fn __lasx_xvldrepl_b(a: *const i8, b: i32) -> v32i8;
+    fn __lasx_xvldrepl_b(a: *const i8, b: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvldrepl.h"]
-    fn __lasx_xvldrepl_h(a: *const i8, b: i32) -> v16i16;
+    fn __lasx_xvldrepl_h(a: *const i8, b: i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvldrepl.w"]
-    fn __lasx_xvldrepl_w(a: *const i8, b: i32) -> v8i32;
+    fn __lasx_xvldrepl_w(a: *const i8, b: i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvldrepl.d"]
-    fn __lasx_xvldrepl_d(a: *const i8, b: i32) -> v4i64;
+    fn __lasx_xvldrepl_d(a: *const i8, b: i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvpickve2gr.w"]
-    fn __lasx_xvpickve2gr_w(a: v8i32, b: u32) -> i32;
+    fn __lasx_xvpickve2gr_w(a: __v8i32, b: u32) -> i32;
     #[link_name = "llvm.loongarch.lasx.xvpickve2gr.wu"]
-    fn __lasx_xvpickve2gr_wu(a: v8i32, b: u32) -> u32;
+    fn __lasx_xvpickve2gr_wu(a: __v8i32, b: u32) -> u32;
     #[link_name = "llvm.loongarch.lasx.xvpickve2gr.d"]
-    fn __lasx_xvpickve2gr_d(a: v4i64, b: u32) -> i64;
+    fn __lasx_xvpickve2gr_d(a: __v4i64, b: u32) -> i64;
     #[link_name = "llvm.loongarch.lasx.xvpickve2gr.du"]
-    fn __lasx_xvpickve2gr_du(a: v4i64, b: u32) -> u64;
+    fn __lasx_xvpickve2gr_du(a: __v4i64, b: u32) -> u64;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.q.d"]
-    fn __lasx_xvaddwev_q_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvaddwev_q_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.d.w"]
-    fn __lasx_xvaddwev_d_w(a: v8i32, b: v8i32) -> v4i64;
+    fn __lasx_xvaddwev_d_w(a: __v8i32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.w.h"]
-    fn __lasx_xvaddwev_w_h(a: v16i16, b: v16i16) -> v8i32;
+    fn __lasx_xvaddwev_w_h(a: __v16i16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.h.b"]
-    fn __lasx_xvaddwev_h_b(a: v32i8, b: v32i8) -> v16i16;
+    fn __lasx_xvaddwev_h_b(a: __v32i8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.q.du"]
-    fn __lasx_xvaddwev_q_du(a: v4u64, b: v4u64) -> v4i64;
+    fn __lasx_xvaddwev_q_du(a: __v4u64, b: __v4u64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.d.wu"]
-    fn __lasx_xvaddwev_d_wu(a: v8u32, b: v8u32) -> v4i64;
+    fn __lasx_xvaddwev_d_wu(a: __v8u32, b: __v8u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.w.hu"]
-    fn __lasx_xvaddwev_w_hu(a: v16u16, b: v16u16) -> v8i32;
+    fn __lasx_xvaddwev_w_hu(a: __v16u16, b: __v16u16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.h.bu"]
-    fn __lasx_xvaddwev_h_bu(a: v32u8, b: v32u8) -> v16i16;
+    fn __lasx_xvaddwev_h_bu(a: __v32u8, b: __v32u8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsubwev.q.d"]
-    fn __lasx_xvsubwev_q_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsubwev_q_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsubwev.d.w"]
-    fn __lasx_xvsubwev_d_w(a: v8i32, b: v8i32) -> v4i64;
+    fn __lasx_xvsubwev_d_w(a: __v8i32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsubwev.w.h"]
-    fn __lasx_xvsubwev_w_h(a: v16i16, b: v16i16) -> v8i32;
+    fn __lasx_xvsubwev_w_h(a: __v16i16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsubwev.h.b"]
-    fn __lasx_xvsubwev_h_b(a: v32i8, b: v32i8) -> v16i16;
+    fn __lasx_xvsubwev_h_b(a: __v32i8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsubwev.q.du"]
-    fn __lasx_xvsubwev_q_du(a: v4u64, b: v4u64) -> v4i64;
+    fn __lasx_xvsubwev_q_du(a: __v4u64, b: __v4u64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsubwev.d.wu"]
-    fn __lasx_xvsubwev_d_wu(a: v8u32, b: v8u32) -> v4i64;
+    fn __lasx_xvsubwev_d_wu(a: __v8u32, b: __v8u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsubwev.w.hu"]
-    fn __lasx_xvsubwev_w_hu(a: v16u16, b: v16u16) -> v8i32;
+    fn __lasx_xvsubwev_w_hu(a: __v16u16, b: __v16u16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsubwev.h.bu"]
-    fn __lasx_xvsubwev_h_bu(a: v32u8, b: v32u8) -> v16i16;
+    fn __lasx_xvsubwev_h_bu(a: __v32u8, b: __v32u8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.q.d"]
-    fn __lasx_xvmulwev_q_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvmulwev_q_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.d.w"]
-    fn __lasx_xvmulwev_d_w(a: v8i32, b: v8i32) -> v4i64;
+    fn __lasx_xvmulwev_d_w(a: __v8i32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.w.h"]
-    fn __lasx_xvmulwev_w_h(a: v16i16, b: v16i16) -> v8i32;
+    fn __lasx_xvmulwev_w_h(a: __v16i16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.h.b"]
-    fn __lasx_xvmulwev_h_b(a: v32i8, b: v32i8) -> v16i16;
+    fn __lasx_xvmulwev_h_b(a: __v32i8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.q.du"]
-    fn __lasx_xvmulwev_q_du(a: v4u64, b: v4u64) -> v4i64;
+    fn __lasx_xvmulwev_q_du(a: __v4u64, b: __v4u64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.d.wu"]
-    fn __lasx_xvmulwev_d_wu(a: v8u32, b: v8u32) -> v4i64;
+    fn __lasx_xvmulwev_d_wu(a: __v8u32, b: __v8u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.w.hu"]
-    fn __lasx_xvmulwev_w_hu(a: v16u16, b: v16u16) -> v8i32;
+    fn __lasx_xvmulwev_w_hu(a: __v16u16, b: __v16u16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.h.bu"]
-    fn __lasx_xvmulwev_h_bu(a: v32u8, b: v32u8) -> v16i16;
+    fn __lasx_xvmulwev_h_bu(a: __v32u8, b: __v32u8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.q.d"]
-    fn __lasx_xvaddwod_q_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvaddwod_q_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.d.w"]
-    fn __lasx_xvaddwod_d_w(a: v8i32, b: v8i32) -> v4i64;
+    fn __lasx_xvaddwod_d_w(a: __v8i32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.w.h"]
-    fn __lasx_xvaddwod_w_h(a: v16i16, b: v16i16) -> v8i32;
+    fn __lasx_xvaddwod_w_h(a: __v16i16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.h.b"]
-    fn __lasx_xvaddwod_h_b(a: v32i8, b: v32i8) -> v16i16;
+    fn __lasx_xvaddwod_h_b(a: __v32i8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.q.du"]
-    fn __lasx_xvaddwod_q_du(a: v4u64, b: v4u64) -> v4i64;
+    fn __lasx_xvaddwod_q_du(a: __v4u64, b: __v4u64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.d.wu"]
-    fn __lasx_xvaddwod_d_wu(a: v8u32, b: v8u32) -> v4i64;
+    fn __lasx_xvaddwod_d_wu(a: __v8u32, b: __v8u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.w.hu"]
-    fn __lasx_xvaddwod_w_hu(a: v16u16, b: v16u16) -> v8i32;
+    fn __lasx_xvaddwod_w_hu(a: __v16u16, b: __v16u16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.h.bu"]
-    fn __lasx_xvaddwod_h_bu(a: v32u8, b: v32u8) -> v16i16;
+    fn __lasx_xvaddwod_h_bu(a: __v32u8, b: __v32u8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsubwod.q.d"]
-    fn __lasx_xvsubwod_q_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsubwod_q_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsubwod.d.w"]
-    fn __lasx_xvsubwod_d_w(a: v8i32, b: v8i32) -> v4i64;
+    fn __lasx_xvsubwod_d_w(a: __v8i32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsubwod.w.h"]
-    fn __lasx_xvsubwod_w_h(a: v16i16, b: v16i16) -> v8i32;
+    fn __lasx_xvsubwod_w_h(a: __v16i16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsubwod.h.b"]
-    fn __lasx_xvsubwod_h_b(a: v32i8, b: v32i8) -> v16i16;
+    fn __lasx_xvsubwod_h_b(a: __v32i8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsubwod.q.du"]
-    fn __lasx_xvsubwod_q_du(a: v4u64, b: v4u64) -> v4i64;
+    fn __lasx_xvsubwod_q_du(a: __v4u64, b: __v4u64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsubwod.d.wu"]
-    fn __lasx_xvsubwod_d_wu(a: v8u32, b: v8u32) -> v4i64;
+    fn __lasx_xvsubwod_d_wu(a: __v8u32, b: __v8u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsubwod.w.hu"]
-    fn __lasx_xvsubwod_w_hu(a: v16u16, b: v16u16) -> v8i32;
+    fn __lasx_xvsubwod_w_hu(a: __v16u16, b: __v16u16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsubwod.h.bu"]
-    fn __lasx_xvsubwod_h_bu(a: v32u8, b: v32u8) -> v16i16;
+    fn __lasx_xvsubwod_h_bu(a: __v32u8, b: __v32u8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.q.d"]
-    fn __lasx_xvmulwod_q_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvmulwod_q_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.d.w"]
-    fn __lasx_xvmulwod_d_w(a: v8i32, b: v8i32) -> v4i64;
+    fn __lasx_xvmulwod_d_w(a: __v8i32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.w.h"]
-    fn __lasx_xvmulwod_w_h(a: v16i16, b: v16i16) -> v8i32;
+    fn __lasx_xvmulwod_w_h(a: __v16i16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.h.b"]
-    fn __lasx_xvmulwod_h_b(a: v32i8, b: v32i8) -> v16i16;
+    fn __lasx_xvmulwod_h_b(a: __v32i8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.q.du"]
-    fn __lasx_xvmulwod_q_du(a: v4u64, b: v4u64) -> v4i64;
+    fn __lasx_xvmulwod_q_du(a: __v4u64, b: __v4u64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.d.wu"]
-    fn __lasx_xvmulwod_d_wu(a: v8u32, b: v8u32) -> v4i64;
+    fn __lasx_xvmulwod_d_wu(a: __v8u32, b: __v8u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.w.hu"]
-    fn __lasx_xvmulwod_w_hu(a: v16u16, b: v16u16) -> v8i32;
+    fn __lasx_xvmulwod_w_hu(a: __v16u16, b: __v16u16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.h.bu"]
-    fn __lasx_xvmulwod_h_bu(a: v32u8, b: v32u8) -> v16i16;
+    fn __lasx_xvmulwod_h_bu(a: __v32u8, b: __v32u8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.d.wu.w"]
-    fn __lasx_xvaddwev_d_wu_w(a: v8u32, b: v8i32) -> v4i64;
+    fn __lasx_xvaddwev_d_wu_w(a: __v8u32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.w.hu.h"]
-    fn __lasx_xvaddwev_w_hu_h(a: v16u16, b: v16i16) -> v8i32;
+    fn __lasx_xvaddwev_w_hu_h(a: __v16u16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.h.bu.b"]
-    fn __lasx_xvaddwev_h_bu_b(a: v32u8, b: v32i8) -> v16i16;
+    fn __lasx_xvaddwev_h_bu_b(a: __v32u8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.d.wu.w"]
-    fn __lasx_xvmulwev_d_wu_w(a: v8u32, b: v8i32) -> v4i64;
+    fn __lasx_xvmulwev_d_wu_w(a: __v8u32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.w.hu.h"]
-    fn __lasx_xvmulwev_w_hu_h(a: v16u16, b: v16i16) -> v8i32;
+    fn __lasx_xvmulwev_w_hu_h(a: __v16u16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.h.bu.b"]
-    fn __lasx_xvmulwev_h_bu_b(a: v32u8, b: v32i8) -> v16i16;
+    fn __lasx_xvmulwev_h_bu_b(a: __v32u8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.d.wu.w"]
-    fn __lasx_xvaddwod_d_wu_w(a: v8u32, b: v8i32) -> v4i64;
+    fn __lasx_xvaddwod_d_wu_w(a: __v8u32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.w.hu.h"]
-    fn __lasx_xvaddwod_w_hu_h(a: v16u16, b: v16i16) -> v8i32;
+    fn __lasx_xvaddwod_w_hu_h(a: __v16u16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.h.bu.b"]
-    fn __lasx_xvaddwod_h_bu_b(a: v32u8, b: v32i8) -> v16i16;
+    fn __lasx_xvaddwod_h_bu_b(a: __v32u8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.d.wu.w"]
-    fn __lasx_xvmulwod_d_wu_w(a: v8u32, b: v8i32) -> v4i64;
+    fn __lasx_xvmulwod_d_wu_w(a: __v8u32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.w.hu.h"]
-    fn __lasx_xvmulwod_w_hu_h(a: v16u16, b: v16i16) -> v8i32;
+    fn __lasx_xvmulwod_w_hu_h(a: __v16u16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.h.bu.b"]
-    fn __lasx_xvmulwod_h_bu_b(a: v32u8, b: v32i8) -> v16i16;
+    fn __lasx_xvmulwod_h_bu_b(a: __v32u8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvhaddw.q.d"]
-    fn __lasx_xvhaddw_q_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvhaddw_q_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvhaddw.qu.du"]
-    fn __lasx_xvhaddw_qu_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvhaddw_qu_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvhsubw.q.d"]
-    fn __lasx_xvhsubw_q_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvhsubw_q_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvhsubw.qu.du"]
-    fn __lasx_xvhsubw_qu_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvhsubw_qu_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.q.d"]
-    fn __lasx_xvmaddwev_q_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64;
+    fn __lasx_xvmaddwev_q_d(a: __v4i64, b: __v4i64, c: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.d.w"]
-    fn __lasx_xvmaddwev_d_w(a: v4i64, b: v8i32, c: v8i32) -> v4i64;
+    fn __lasx_xvmaddwev_d_w(a: __v4i64, b: __v8i32, c: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.w.h"]
-    fn __lasx_xvmaddwev_w_h(a: v8i32, b: v16i16, c: v16i16) -> v8i32;
+    fn __lasx_xvmaddwev_w_h(a: __v8i32, b: __v16i16, c: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.h.b"]
-    fn __lasx_xvmaddwev_h_b(a: v16i16, b: v32i8, c: v32i8) -> v16i16;
+    fn __lasx_xvmaddwev_h_b(a: __v16i16, b: __v32i8, c: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.q.du"]
-    fn __lasx_xvmaddwev_q_du(a: v4u64, b: v4u64, c: v4u64) -> v4u64;
+    fn __lasx_xvmaddwev_q_du(a: __v4u64, b: __v4u64, c: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.d.wu"]
-    fn __lasx_xvmaddwev_d_wu(a: v4u64, b: v8u32, c: v8u32) -> v4u64;
+    fn __lasx_xvmaddwev_d_wu(a: __v4u64, b: __v8u32, c: __v8u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.w.hu"]
-    fn __lasx_xvmaddwev_w_hu(a: v8u32, b: v16u16, c: v16u16) -> v8u32;
+    fn __lasx_xvmaddwev_w_hu(a: __v8u32, b: __v16u16, c: __v16u16) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.h.bu"]
-    fn __lasx_xvmaddwev_h_bu(a: v16u16, b: v32u8, c: v32u8) -> v16u16;
+    fn __lasx_xvmaddwev_h_bu(a: __v16u16, b: __v32u8, c: __v32u8) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.q.d"]
-    fn __lasx_xvmaddwod_q_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64;
+    fn __lasx_xvmaddwod_q_d(a: __v4i64, b: __v4i64, c: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.d.w"]
-    fn __lasx_xvmaddwod_d_w(a: v4i64, b: v8i32, c: v8i32) -> v4i64;
+    fn __lasx_xvmaddwod_d_w(a: __v4i64, b: __v8i32, c: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.w.h"]
-    fn __lasx_xvmaddwod_w_h(a: v8i32, b: v16i16, c: v16i16) -> v8i32;
+    fn __lasx_xvmaddwod_w_h(a: __v8i32, b: __v16i16, c: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.h.b"]
-    fn __lasx_xvmaddwod_h_b(a: v16i16, b: v32i8, c: v32i8) -> v16i16;
+    fn __lasx_xvmaddwod_h_b(a: __v16i16, b: __v32i8, c: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.q.du"]
-    fn __lasx_xvmaddwod_q_du(a: v4u64, b: v4u64, c: v4u64) -> v4u64;
+    fn __lasx_xvmaddwod_q_du(a: __v4u64, b: __v4u64, c: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.d.wu"]
-    fn __lasx_xvmaddwod_d_wu(a: v4u64, b: v8u32, c: v8u32) -> v4u64;
+    fn __lasx_xvmaddwod_d_wu(a: __v4u64, b: __v8u32, c: __v8u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.w.hu"]
-    fn __lasx_xvmaddwod_w_hu(a: v8u32, b: v16u16, c: v16u16) -> v8u32;
+    fn __lasx_xvmaddwod_w_hu(a: __v8u32, b: __v16u16, c: __v16u16) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.h.bu"]
-    fn __lasx_xvmaddwod_h_bu(a: v16u16, b: v32u8, c: v32u8) -> v16u16;
+    fn __lasx_xvmaddwod_h_bu(a: __v16u16, b: __v32u8, c: __v32u8) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.q.du.d"]
-    fn __lasx_xvmaddwev_q_du_d(a: v4i64, b: v4u64, c: v4i64) -> v4i64;
+    fn __lasx_xvmaddwev_q_du_d(a: __v4i64, b: __v4u64, c: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.d.wu.w"]
-    fn __lasx_xvmaddwev_d_wu_w(a: v4i64, b: v8u32, c: v8i32) -> v4i64;
+    fn __lasx_xvmaddwev_d_wu_w(a: __v4i64, b: __v8u32, c: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.w.hu.h"]
-    fn __lasx_xvmaddwev_w_hu_h(a: v8i32, b: v16u16, c: v16i16) -> v8i32;
+    fn __lasx_xvmaddwev_w_hu_h(a: __v8i32, b: __v16u16, c: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.h.bu.b"]
-    fn __lasx_xvmaddwev_h_bu_b(a: v16i16, b: v32u8, c: v32i8) -> v16i16;
+    fn __lasx_xvmaddwev_h_bu_b(a: __v16i16, b: __v32u8, c: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.q.du.d"]
-    fn __lasx_xvmaddwod_q_du_d(a: v4i64, b: v4u64, c: v4i64) -> v4i64;
+    fn __lasx_xvmaddwod_q_du_d(a: __v4i64, b: __v4u64, c: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.d.wu.w"]
-    fn __lasx_xvmaddwod_d_wu_w(a: v4i64, b: v8u32, c: v8i32) -> v4i64;
+    fn __lasx_xvmaddwod_d_wu_w(a: __v4i64, b: __v8u32, c: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.w.hu.h"]
-    fn __lasx_xvmaddwod_w_hu_h(a: v8i32, b: v16u16, c: v16i16) -> v8i32;
+    fn __lasx_xvmaddwod_w_hu_h(a: __v8i32, b: __v16u16, c: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.h.bu.b"]
-    fn __lasx_xvmaddwod_h_bu_b(a: v16i16, b: v32u8, c: v32i8) -> v16i16;
+    fn __lasx_xvmaddwod_h_bu_b(a: __v16i16, b: __v32u8, c: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvrotr.b"]
-    fn __lasx_xvrotr_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvrotr_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvrotr.h"]
-    fn __lasx_xvrotr_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvrotr_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvrotr.w"]
-    fn __lasx_xvrotr_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvrotr_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvrotr.d"]
-    fn __lasx_xvrotr_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvrotr_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvadd.q"]
-    fn __lasx_xvadd_q(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvadd_q(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsub.q"]
-    fn __lasx_xvsub_q(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsub_q(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.q.du.d"]
-    fn __lasx_xvaddwev_q_du_d(a: v4u64, b: v4i64) -> v4i64;
+    fn __lasx_xvaddwev_q_du_d(a: __v4u64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.q.du.d"]
-    fn __lasx_xvaddwod_q_du_d(a: v4u64, b: v4i64) -> v4i64;
+    fn __lasx_xvaddwod_q_du_d(a: __v4u64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.q.du.d"]
-    fn __lasx_xvmulwev_q_du_d(a: v4u64, b: v4i64) -> v4i64;
+    fn __lasx_xvmulwev_q_du_d(a: __v4u64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.q.du.d"]
-    fn __lasx_xvmulwod_q_du_d(a: v4u64, b: v4i64) -> v4i64;
+    fn __lasx_xvmulwod_q_du_d(a: __v4u64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmskgez.b"]
-    fn __lasx_xvmskgez_b(a: v32i8) -> v32i8;
+    fn __lasx_xvmskgez_b(a: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmsknz.b"]
-    fn __lasx_xvmsknz_b(a: v32i8) -> v32i8;
+    fn __lasx_xvmsknz_b(a: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvexth.h.b"]
-    fn __lasx_xvexth_h_b(a: v32i8) -> v16i16;
+    fn __lasx_xvexth_h_b(a: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvexth.w.h"]
-    fn __lasx_xvexth_w_h(a: v16i16) -> v8i32;
+    fn __lasx_xvexth_w_h(a: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvexth.d.w"]
-    fn __lasx_xvexth_d_w(a: v8i32) -> v4i64;
+    fn __lasx_xvexth_d_w(a: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvexth.q.d"]
-    fn __lasx_xvexth_q_d(a: v4i64) -> v4i64;
+    fn __lasx_xvexth_q_d(a: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvexth.hu.bu"]
-    fn __lasx_xvexth_hu_bu(a: v32u8) -> v16u16;
+    fn __lasx_xvexth_hu_bu(a: __v32u8) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvexth.wu.hu"]
-    fn __lasx_xvexth_wu_hu(a: v16u16) -> v8u32;
+    fn __lasx_xvexth_wu_hu(a: __v16u16) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvexth.du.wu"]
-    fn __lasx_xvexth_du_wu(a: v8u32) -> v4u64;
+    fn __lasx_xvexth_du_wu(a: __v8u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvexth.qu.du"]
-    fn __lasx_xvexth_qu_du(a: v4u64) -> v4u64;
+    fn __lasx_xvexth_qu_du(a: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvrotri.b"]
-    fn __lasx_xvrotri_b(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvrotri_b(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvrotri.h"]
-    fn __lasx_xvrotri_h(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvrotri_h(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvrotri.w"]
-    fn __lasx_xvrotri_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvrotri_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvrotri.d"]
-    fn __lasx_xvrotri_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvrotri_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvextl.q.d"]
-    fn __lasx_xvextl_q_d(a: v4i64) -> v4i64;
+    fn __lasx_xvextl_q_d(a: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrlni.b.h"]
-    fn __lasx_xvsrlni_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvsrlni_b_h(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrlni.h.w"]
-    fn __lasx_xvsrlni_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvsrlni_h_w(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrlni.w.d"]
-    fn __lasx_xvsrlni_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvsrlni_w_d(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrlni.d.q"]
-    fn __lasx_xvsrlni_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvsrlni_d_q(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrlrni.b.h"]
-    fn __lasx_xvsrlrni_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvsrlrni_b_h(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrlrni.h.w"]
-    fn __lasx_xvsrlrni_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvsrlrni_h_w(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrlrni.w.d"]
-    fn __lasx_xvsrlrni_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvsrlrni_w_d(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrlrni.d.q"]
-    fn __lasx_xvsrlrni_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvsrlrni_d_q(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvssrlni.b.h"]
-    fn __lasx_xvssrlni_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvssrlni_b_h(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvssrlni.h.w"]
-    fn __lasx_xvssrlni_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvssrlni_h_w(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvssrlni.w.d"]
-    fn __lasx_xvssrlni_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvssrlni_w_d(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssrlni.d.q"]
-    fn __lasx_xvssrlni_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvssrlni_d_q(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvssrlni.bu.h"]
-    fn __lasx_xvssrlni_bu_h(a: v32u8, b: v32i8, c: u32) -> v32u8;
+    fn __lasx_xvssrlni_bu_h(a: __v32u8, b: __v32i8, c: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvssrlni.hu.w"]
-    fn __lasx_xvssrlni_hu_w(a: v16u16, b: v16i16, c: u32) -> v16u16;
+    fn __lasx_xvssrlni_hu_w(a: __v16u16, b: __v16i16, c: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvssrlni.wu.d"]
-    fn __lasx_xvssrlni_wu_d(a: v8u32, b: v8i32, c: u32) -> v8u32;
+    fn __lasx_xvssrlni_wu_d(a: __v8u32, b: __v8i32, c: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvssrlni.du.q"]
-    fn __lasx_xvssrlni_du_q(a: v4u64, b: v4i64, c: u32) -> v4u64;
+    fn __lasx_xvssrlni_du_q(a: __v4u64, b: __v4i64, c: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvssrlrni.b.h"]
-    fn __lasx_xvssrlrni_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvssrlrni_b_h(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvssrlrni.h.w"]
-    fn __lasx_xvssrlrni_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvssrlrni_h_w(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvssrlrni.w.d"]
-    fn __lasx_xvssrlrni_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvssrlrni_w_d(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssrlrni.d.q"]
-    fn __lasx_xvssrlrni_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvssrlrni_d_q(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvssrlrni.bu.h"]
-    fn __lasx_xvssrlrni_bu_h(a: v32u8, b: v32i8, c: u32) -> v32u8;
+    fn __lasx_xvssrlrni_bu_h(a: __v32u8, b: __v32i8, c: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvssrlrni.hu.w"]
-    fn __lasx_xvssrlrni_hu_w(a: v16u16, b: v16i16, c: u32) -> v16u16;
+    fn __lasx_xvssrlrni_hu_w(a: __v16u16, b: __v16i16, c: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvssrlrni.wu.d"]
-    fn __lasx_xvssrlrni_wu_d(a: v8u32, b: v8i32, c: u32) -> v8u32;
+    fn __lasx_xvssrlrni_wu_d(a: __v8u32, b: __v8i32, c: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvssrlrni.du.q"]
-    fn __lasx_xvssrlrni_du_q(a: v4u64, b: v4i64, c: u32) -> v4u64;
+    fn __lasx_xvssrlrni_du_q(a: __v4u64, b: __v4i64, c: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvsrani.b.h"]
-    fn __lasx_xvsrani_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvsrani_b_h(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrani.h.w"]
-    fn __lasx_xvsrani_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvsrani_h_w(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrani.w.d"]
-    fn __lasx_xvsrani_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvsrani_w_d(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrani.d.q"]
-    fn __lasx_xvsrani_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvsrani_d_q(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrarni.b.h"]
-    fn __lasx_xvsrarni_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvsrarni_b_h(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrarni.h.w"]
-    fn __lasx_xvsrarni_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvsrarni_h_w(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrarni.w.d"]
-    fn __lasx_xvsrarni_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvsrarni_w_d(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrarni.d.q"]
-    fn __lasx_xvsrarni_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvsrarni_d_q(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvssrani.b.h"]
-    fn __lasx_xvssrani_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvssrani_b_h(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvssrani.h.w"]
-    fn __lasx_xvssrani_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvssrani_h_w(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvssrani.w.d"]
-    fn __lasx_xvssrani_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvssrani_w_d(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssrani.d.q"]
-    fn __lasx_xvssrani_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvssrani_d_q(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvssrani.bu.h"]
-    fn __lasx_xvssrani_bu_h(a: v32u8, b: v32i8, c: u32) -> v32u8;
+    fn __lasx_xvssrani_bu_h(a: __v32u8, b: __v32i8, c: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvssrani.hu.w"]
-    fn __lasx_xvssrani_hu_w(a: v16u16, b: v16i16, c: u32) -> v16u16;
+    fn __lasx_xvssrani_hu_w(a: __v16u16, b: __v16i16, c: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvssrani.wu.d"]
-    fn __lasx_xvssrani_wu_d(a: v8u32, b: v8i32, c: u32) -> v8u32;
+    fn __lasx_xvssrani_wu_d(a: __v8u32, b: __v8i32, c: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvssrani.du.q"]
-    fn __lasx_xvssrani_du_q(a: v4u64, b: v4i64, c: u32) -> v4u64;
+    fn __lasx_xvssrani_du_q(a: __v4u64, b: __v4i64, c: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvssrarni.b.h"]
-    fn __lasx_xvssrarni_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvssrarni_b_h(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvssrarni.h.w"]
-    fn __lasx_xvssrarni_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvssrarni_h_w(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvssrarni.w.d"]
-    fn __lasx_xvssrarni_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvssrarni_w_d(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssrarni.d.q"]
-    fn __lasx_xvssrarni_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvssrarni_d_q(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvssrarni.bu.h"]
-    fn __lasx_xvssrarni_bu_h(a: v32u8, b: v32i8, c: u32) -> v32u8;
+    fn __lasx_xvssrarni_bu_h(a: __v32u8, b: __v32i8, c: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvssrarni.hu.w"]
-    fn __lasx_xvssrarni_hu_w(a: v16u16, b: v16i16, c: u32) -> v16u16;
+    fn __lasx_xvssrarni_hu_w(a: __v16u16, b: __v16i16, c: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvssrarni.wu.d"]
-    fn __lasx_xvssrarni_wu_d(a: v8u32, b: v8i32, c: u32) -> v8u32;
+    fn __lasx_xvssrarni_wu_d(a: __v8u32, b: __v8i32, c: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvssrarni.du.q"]
-    fn __lasx_xvssrarni_du_q(a: v4u64, b: v4i64, c: u32) -> v4u64;
+    fn __lasx_xvssrarni_du_q(a: __v4u64, b: __v4i64, c: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xbnz.b"]
-    fn __lasx_xbnz_b(a: v32u8) -> i32;
+    fn __lasx_xbnz_b(a: __v32u8) -> i32;
     #[link_name = "llvm.loongarch.lasx.xbnz.d"]
-    fn __lasx_xbnz_d(a: v4u64) -> i32;
+    fn __lasx_xbnz_d(a: __v4u64) -> i32;
     #[link_name = "llvm.loongarch.lasx.xbnz.h"]
-    fn __lasx_xbnz_h(a: v16u16) -> i32;
+    fn __lasx_xbnz_h(a: __v16u16) -> i32;
     #[link_name = "llvm.loongarch.lasx.xbnz.v"]
-    fn __lasx_xbnz_v(a: v32u8) -> i32;
+    fn __lasx_xbnz_v(a: __v32u8) -> i32;
     #[link_name = "llvm.loongarch.lasx.xbnz.w"]
-    fn __lasx_xbnz_w(a: v8u32) -> i32;
+    fn __lasx_xbnz_w(a: __v8u32) -> i32;
     #[link_name = "llvm.loongarch.lasx.xbz.b"]
-    fn __lasx_xbz_b(a: v32u8) -> i32;
+    fn __lasx_xbz_b(a: __v32u8) -> i32;
     #[link_name = "llvm.loongarch.lasx.xbz.d"]
-    fn __lasx_xbz_d(a: v4u64) -> i32;
+    fn __lasx_xbz_d(a: __v4u64) -> i32;
     #[link_name = "llvm.loongarch.lasx.xbz.h"]
-    fn __lasx_xbz_h(a: v16u16) -> i32;
+    fn __lasx_xbz_h(a: __v16u16) -> i32;
     #[link_name = "llvm.loongarch.lasx.xbz.v"]
-    fn __lasx_xbz_v(a: v32u8) -> i32;
+    fn __lasx_xbz_v(a: __v32u8) -> i32;
     #[link_name = "llvm.loongarch.lasx.xbz.w"]
-    fn __lasx_xbz_w(a: v8u32) -> i32;
+    fn __lasx_xbz_w(a: __v8u32) -> i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.caf.d"]
-    fn __lasx_xvfcmp_caf_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_caf_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.caf.s"]
-    fn __lasx_xvfcmp_caf_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_caf_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.ceq.d"]
-    fn __lasx_xvfcmp_ceq_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_ceq_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.ceq.s"]
-    fn __lasx_xvfcmp_ceq_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_ceq_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cle.d"]
-    fn __lasx_xvfcmp_cle_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_cle_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cle.s"]
-    fn __lasx_xvfcmp_cle_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_cle_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.clt.d"]
-    fn __lasx_xvfcmp_clt_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_clt_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.clt.s"]
-    fn __lasx_xvfcmp_clt_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_clt_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cne.d"]
-    fn __lasx_xvfcmp_cne_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_cne_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cne.s"]
-    fn __lasx_xvfcmp_cne_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_cne_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cor.d"]
-    fn __lasx_xvfcmp_cor_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_cor_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cor.s"]
-    fn __lasx_xvfcmp_cor_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_cor_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cueq.d"]
-    fn __lasx_xvfcmp_cueq_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_cueq_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cueq.s"]
-    fn __lasx_xvfcmp_cueq_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_cueq_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cule.d"]
-    fn __lasx_xvfcmp_cule_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_cule_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cule.s"]
-    fn __lasx_xvfcmp_cule_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_cule_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cult.d"]
-    fn __lasx_xvfcmp_cult_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_cult_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cult.s"]
-    fn __lasx_xvfcmp_cult_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_cult_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cun.d"]
-    fn __lasx_xvfcmp_cun_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_cun_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cune.d"]
-    fn __lasx_xvfcmp_cune_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_cune_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cune.s"]
-    fn __lasx_xvfcmp_cune_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_cune_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cun.s"]
-    fn __lasx_xvfcmp_cun_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_cun_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.saf.d"]
-    fn __lasx_xvfcmp_saf_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_saf_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.saf.s"]
-    fn __lasx_xvfcmp_saf_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_saf_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.seq.d"]
-    fn __lasx_xvfcmp_seq_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_seq_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.seq.s"]
-    fn __lasx_xvfcmp_seq_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_seq_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sle.d"]
-    fn __lasx_xvfcmp_sle_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_sle_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sle.s"]
-    fn __lasx_xvfcmp_sle_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_sle_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.slt.d"]
-    fn __lasx_xvfcmp_slt_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_slt_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.slt.s"]
-    fn __lasx_xvfcmp_slt_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_slt_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sne.d"]
-    fn __lasx_xvfcmp_sne_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_sne_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sne.s"]
-    fn __lasx_xvfcmp_sne_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_sne_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sor.d"]
-    fn __lasx_xvfcmp_sor_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_sor_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sor.s"]
-    fn __lasx_xvfcmp_sor_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_sor_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sueq.d"]
-    fn __lasx_xvfcmp_sueq_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_sueq_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sueq.s"]
-    fn __lasx_xvfcmp_sueq_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_sueq_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sule.d"]
-    fn __lasx_xvfcmp_sule_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_sule_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sule.s"]
-    fn __lasx_xvfcmp_sule_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_sule_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sult.d"]
-    fn __lasx_xvfcmp_sult_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_sult_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sult.s"]
-    fn __lasx_xvfcmp_sult_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_sult_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sun.d"]
-    fn __lasx_xvfcmp_sun_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_sun_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sune.d"]
-    fn __lasx_xvfcmp_sune_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_sune_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sune.s"]
-    fn __lasx_xvfcmp_sune_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_sune_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sun.s"]
-    fn __lasx_xvfcmp_sun_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_sun_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvpickve.d.f"]
-    fn __lasx_xvpickve_d_f(a: v4f64, b: u32) -> v4f64;
+    fn __lasx_xvpickve_d_f(a: __v4f64, b: u32) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvpickve.w.f"]
-    fn __lasx_xvpickve_w_f(a: v8f32, b: u32) -> v8f32;
+    fn __lasx_xvpickve_w_f(a: __v8f32, b: u32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvrepli.b"]
-    fn __lasx_xvrepli_b(a: i32) -> v32i8;
+    fn __lasx_xvrepli_b(a: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvrepli.d"]
-    fn __lasx_xvrepli_d(a: i32) -> v4i64;
+    fn __lasx_xvrepli_d(a: i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvrepli.h"]
-    fn __lasx_xvrepli_h(a: i32) -> v16i16;
+    fn __lasx_xvrepli_h(a: i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvrepli.w"]
-    fn __lasx_xvrepli_w(a: i32) -> v8i32;
+    fn __lasx_xvrepli_w(a: i32) -> __v8i32;
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsll_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvsll_b(a, b) }
+pub fn lasx_xvsll_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsll_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsll_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvsll_h(a, b) }
+pub fn lasx_xvsll_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsll_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsll_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvsll_w(a, b) }
+pub fn lasx_xvsll_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsll_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsll_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsll_d(a, b) }
+pub fn lasx_xvsll_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsll_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslli_b<const IMM3: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvslli_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvslli_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvslli_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslli_h<const IMM4: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvslli_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvslli_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvslli_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslli_w<const IMM5: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvslli_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvslli_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvslli_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslli_d<const IMM6: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvslli_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvslli_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvslli_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsra_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvsra_b(a, b) }
+pub fn lasx_xvsra_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsra_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsra_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvsra_h(a, b) }
+pub fn lasx_xvsra_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsra_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsra_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvsra_w(a, b) }
+pub fn lasx_xvsra_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsra_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsra_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsra_d(a, b) }
+pub fn lasx_xvsra_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsra_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrai_b<const IMM3: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvsrai_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvsrai_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvsrai_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrai_h<const IMM4: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvsrai_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsrai_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvsrai_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrai_w<const IMM5: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvsrai_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsrai_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvsrai_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrai_d<const IMM6: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvsrai_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsrai_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvsrai_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrar_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvsrar_b(a, b) }
+pub fn lasx_xvsrar_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrar_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrar_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvsrar_h(a, b) }
+pub fn lasx_xvsrar_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrar_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrar_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvsrar_w(a, b) }
+pub fn lasx_xvsrar_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrar_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrar_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsrar_d(a, b) }
+pub fn lasx_xvsrar_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrar_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrari_b<const IMM3: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvsrari_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvsrari_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvsrari_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrari_h<const IMM4: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvsrari_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsrari_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvsrari_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrari_w<const IMM5: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvsrari_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsrari_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvsrari_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrari_d<const IMM6: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvsrari_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsrari_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvsrari_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrl_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvsrl_b(a, b) }
+pub fn lasx_xvsrl_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrl_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrl_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvsrl_h(a, b) }
+pub fn lasx_xvsrl_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrl_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrl_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvsrl_w(a, b) }
+pub fn lasx_xvsrl_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrl_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrl_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsrl_d(a, b) }
+pub fn lasx_xvsrl_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrl_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrli_b<const IMM3: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvsrli_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvsrli_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvsrli_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrli_h<const IMM4: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvsrli_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsrli_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvsrli_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrli_w<const IMM5: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvsrli_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsrli_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvsrli_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrli_d<const IMM6: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvsrli_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsrli_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvsrli_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlr_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvsrlr_b(a, b) }
+pub fn lasx_xvsrlr_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrlr_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlr_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvsrlr_h(a, b) }
+pub fn lasx_xvsrlr_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrlr_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlr_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvsrlr_w(a, b) }
+pub fn lasx_xvsrlr_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrlr_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlr_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsrlr_d(a, b) }
+pub fn lasx_xvsrlr_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrlr_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlri_b<const IMM3: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvsrlri_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvsrlri_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvsrlri_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlri_h<const IMM4: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvsrlri_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsrlri_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvsrlri_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlri_w<const IMM5: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvsrlri_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsrlri_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvsrlri_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlri_d<const IMM6: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvsrlri_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsrlri_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvsrlri_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitclr_b(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvbitclr_b(a, b) }
+pub fn lasx_xvbitclr_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitclr_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitclr_h(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvbitclr_h(a, b) }
+pub fn lasx_xvbitclr_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitclr_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitclr_w(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvbitclr_w(a, b) }
+pub fn lasx_xvbitclr_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitclr_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitclr_d(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvbitclr_d(a, b) }
+pub fn lasx_xvbitclr_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitclr_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitclri_b<const IMM3: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvbitclri_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvbitclri_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvbitclri_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitclri_h<const IMM4: u32>(a: v16u16) -> v16u16 {
+pub fn lasx_xvbitclri_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvbitclri_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvbitclri_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitclri_w<const IMM5: u32>(a: v8u32) -> v8u32 {
+pub fn lasx_xvbitclri_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvbitclri_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvbitclri_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitclri_d<const IMM6: u32>(a: v4u64) -> v4u64 {
+pub fn lasx_xvbitclri_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvbitclri_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvbitclri_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitset_b(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvbitset_b(a, b) }
+pub fn lasx_xvbitset_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitset_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitset_h(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvbitset_h(a, b) }
+pub fn lasx_xvbitset_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitset_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitset_w(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvbitset_w(a, b) }
+pub fn lasx_xvbitset_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitset_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitset_d(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvbitset_d(a, b) }
+pub fn lasx_xvbitset_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitset_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitseti_b<const IMM3: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvbitseti_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvbitseti_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvbitseti_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitseti_h<const IMM4: u32>(a: v16u16) -> v16u16 {
+pub fn lasx_xvbitseti_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvbitseti_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvbitseti_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitseti_w<const IMM5: u32>(a: v8u32) -> v8u32 {
+pub fn lasx_xvbitseti_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvbitseti_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvbitseti_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitseti_d<const IMM6: u32>(a: v4u64) -> v4u64 {
+pub fn lasx_xvbitseti_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvbitseti_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvbitseti_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitrev_b(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvbitrev_b(a, b) }
+pub fn lasx_xvbitrev_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitrev_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitrev_h(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvbitrev_h(a, b) }
+pub fn lasx_xvbitrev_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitrev_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitrev_w(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvbitrev_w(a, b) }
+pub fn lasx_xvbitrev_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitrev_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitrev_d(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvbitrev_d(a, b) }
+pub fn lasx_xvbitrev_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitrev_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitrevi_b<const IMM3: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvbitrevi_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvbitrevi_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvbitrevi_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitrevi_h<const IMM4: u32>(a: v16u16) -> v16u16 {
+pub fn lasx_xvbitrevi_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvbitrevi_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvbitrevi_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitrevi_w<const IMM5: u32>(a: v8u32) -> v8u32 {
+pub fn lasx_xvbitrevi_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvbitrevi_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvbitrevi_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitrevi_d<const IMM6: u32>(a: v4u64) -> v4u64 {
+pub fn lasx_xvbitrevi_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvbitrevi_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvbitrevi_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvadd_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvadd_b(a, b) }
+pub fn lasx_xvadd_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvadd_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvadd_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvadd_h(a, b) }
+pub fn lasx_xvadd_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvadd_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvadd_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvadd_w(a, b) }
+pub fn lasx_xvadd_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvadd_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvadd_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvadd_d(a, b) }
+pub fn lasx_xvadd_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvadd_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddi_bu<const IMM5: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvaddi_bu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvaddi_bu(a, IMM5) }
+    unsafe { transmute(__lasx_xvaddi_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddi_hu<const IMM5: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvaddi_hu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvaddi_hu(a, IMM5) }
+    unsafe { transmute(__lasx_xvaddi_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddi_wu<const IMM5: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvaddi_wu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvaddi_wu(a, IMM5) }
+    unsafe { transmute(__lasx_xvaddi_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddi_du<const IMM5: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvaddi_du<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvaddi_du(a, IMM5) }
+    unsafe { transmute(__lasx_xvaddi_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsub_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvsub_b(a, b) }
+pub fn lasx_xvsub_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsub_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsub_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvsub_h(a, b) }
+pub fn lasx_xvsub_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsub_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsub_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvsub_w(a, b) }
+pub fn lasx_xvsub_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsub_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsub_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsub_d(a, b) }
+pub fn lasx_xvsub_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsub_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubi_bu<const IMM5: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvsubi_bu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsubi_bu(a, IMM5) }
+    unsafe { transmute(__lasx_xvsubi_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubi_hu<const IMM5: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvsubi_hu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsubi_hu(a, IMM5) }
+    unsafe { transmute(__lasx_xvsubi_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubi_wu<const IMM5: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvsubi_wu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsubi_wu(a, IMM5) }
+    unsafe { transmute(__lasx_xvsubi_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubi_du<const IMM5: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvsubi_du<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsubi_du(a, IMM5) }
+    unsafe { transmute(__lasx_xvsubi_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmax_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmax_b(a, b) }
+pub fn lasx_xvmax_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmax_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmax_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvmax_h(a, b) }
+pub fn lasx_xvmax_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmax_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmax_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvmax_w(a, b) }
+pub fn lasx_xvmax_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmax_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmax_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmax_d(a, b) }
+pub fn lasx_xvmax_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmax_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaxi_b<const IMM_S5: i32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvmaxi_b<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvmaxi_b(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvmaxi_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaxi_h<const IMM_S5: i32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvmaxi_h<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvmaxi_h(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvmaxi_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaxi_w<const IMM_S5: i32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvmaxi_w<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvmaxi_w(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvmaxi_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaxi_d<const IMM_S5: i32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvmaxi_d<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvmaxi_d(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvmaxi_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmax_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvmax_bu(a, b) }
+pub fn lasx_xvmax_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmax_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmax_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvmax_hu(a, b) }
+pub fn lasx_xvmax_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmax_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmax_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvmax_wu(a, b) }
+pub fn lasx_xvmax_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmax_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmax_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvmax_du(a, b) }
+pub fn lasx_xvmax_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmax_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaxi_bu<const IMM5: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvmaxi_bu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvmaxi_bu(a, IMM5) }
+    unsafe { transmute(__lasx_xvmaxi_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaxi_hu<const IMM5: u32>(a: v16u16) -> v16u16 {
+pub fn lasx_xvmaxi_hu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvmaxi_hu(a, IMM5) }
+    unsafe { transmute(__lasx_xvmaxi_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaxi_wu<const IMM5: u32>(a: v8u32) -> v8u32 {
+pub fn lasx_xvmaxi_wu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvmaxi_wu(a, IMM5) }
+    unsafe { transmute(__lasx_xvmaxi_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaxi_du<const IMM5: u32>(a: v4u64) -> v4u64 {
+pub fn lasx_xvmaxi_du<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvmaxi_du(a, IMM5) }
+    unsafe { transmute(__lasx_xvmaxi_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmin_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmin_b(a, b) }
+pub fn lasx_xvmin_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmin_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmin_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvmin_h(a, b) }
+pub fn lasx_xvmin_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmin_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmin_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvmin_w(a, b) }
+pub fn lasx_xvmin_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmin_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmin_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmin_d(a, b) }
+pub fn lasx_xvmin_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmin_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmini_b<const IMM_S5: i32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvmini_b<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvmini_b(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvmini_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmini_h<const IMM_S5: i32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvmini_h<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvmini_h(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvmini_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmini_w<const IMM_S5: i32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvmini_w<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvmini_w(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvmini_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmini_d<const IMM_S5: i32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvmini_d<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvmini_d(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvmini_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmin_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvmin_bu(a, b) }
+pub fn lasx_xvmin_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmin_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmin_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvmin_hu(a, b) }
+pub fn lasx_xvmin_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmin_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmin_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvmin_wu(a, b) }
+pub fn lasx_xvmin_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmin_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmin_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvmin_du(a, b) }
+pub fn lasx_xvmin_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmin_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmini_bu<const IMM5: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvmini_bu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvmini_bu(a, IMM5) }
+    unsafe { transmute(__lasx_xvmini_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmini_hu<const IMM5: u32>(a: v16u16) -> v16u16 {
+pub fn lasx_xvmini_hu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvmini_hu(a, IMM5) }
+    unsafe { transmute(__lasx_xvmini_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmini_wu<const IMM5: u32>(a: v8u32) -> v8u32 {
+pub fn lasx_xvmini_wu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvmini_wu(a, IMM5) }
+    unsafe { transmute(__lasx_xvmini_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmini_du<const IMM5: u32>(a: v4u64) -> v4u64 {
+pub fn lasx_xvmini_du<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvmini_du(a, IMM5) }
+    unsafe { transmute(__lasx_xvmini_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvseq_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvseq_b(a, b) }
+pub fn lasx_xvseq_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvseq_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvseq_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvseq_h(a, b) }
+pub fn lasx_xvseq_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvseq_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvseq_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvseq_w(a, b) }
+pub fn lasx_xvseq_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvseq_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvseq_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvseq_d(a, b) }
+pub fn lasx_xvseq_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvseq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvseqi_b<const IMM_S5: i32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvseqi_b<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvseqi_b(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvseqi_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvseqi_h<const IMM_S5: i32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvseqi_h<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvseqi_h(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvseqi_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvseqi_w<const IMM_S5: i32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvseqi_w<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvseqi_w(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvseqi_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvseqi_d<const IMM_S5: i32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvseqi_d<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvseqi_d(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvseqi_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslt_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvslt_b(a, b) }
+pub fn lasx_xvslt_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvslt_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslt_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvslt_h(a, b) }
+pub fn lasx_xvslt_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvslt_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslt_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvslt_w(a, b) }
+pub fn lasx_xvslt_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvslt_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslt_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvslt_d(a, b) }
+pub fn lasx_xvslt_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvslt_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslti_b<const IMM_S5: i32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvslti_b<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvslti_b(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvslti_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslti_h<const IMM_S5: i32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvslti_h<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvslti_h(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvslti_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslti_w<const IMM_S5: i32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvslti_w<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvslti_w(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvslti_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslti_d<const IMM_S5: i32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvslti_d<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvslti_d(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvslti_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslt_bu(a: v32u8, b: v32u8) -> v32i8 {
-    unsafe { __lasx_xvslt_bu(a, b) }
+pub fn lasx_xvslt_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvslt_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslt_hu(a: v16u16, b: v16u16) -> v16i16 {
-    unsafe { __lasx_xvslt_hu(a, b) }
+pub fn lasx_xvslt_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvslt_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslt_wu(a: v8u32, b: v8u32) -> v8i32 {
-    unsafe { __lasx_xvslt_wu(a, b) }
+pub fn lasx_xvslt_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvslt_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslt_du(a: v4u64, b: v4u64) -> v4i64 {
-    unsafe { __lasx_xvslt_du(a, b) }
+pub fn lasx_xvslt_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvslt_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslti_bu<const IMM5: u32>(a: v32u8) -> v32i8 {
+pub fn lasx_xvslti_bu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvslti_bu(a, IMM5) }
+    unsafe { transmute(__lasx_xvslti_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslti_hu<const IMM5: u32>(a: v16u16) -> v16i16 {
+pub fn lasx_xvslti_hu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvslti_hu(a, IMM5) }
+    unsafe { transmute(__lasx_xvslti_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslti_wu<const IMM5: u32>(a: v8u32) -> v8i32 {
+pub fn lasx_xvslti_wu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvslti_wu(a, IMM5) }
+    unsafe { transmute(__lasx_xvslti_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslti_du<const IMM5: u32>(a: v4u64) -> v4i64 {
+pub fn lasx_xvslti_du<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvslti_du(a, IMM5) }
+    unsafe { transmute(__lasx_xvslti_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsle_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvsle_b(a, b) }
+pub fn lasx_xvsle_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsle_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsle_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvsle_h(a, b) }
+pub fn lasx_xvsle_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsle_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsle_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvsle_w(a, b) }
+pub fn lasx_xvsle_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsle_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsle_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsle_d(a, b) }
+pub fn lasx_xvsle_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsle_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslei_b<const IMM_S5: i32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvslei_b<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvslei_b(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvslei_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslei_h<const IMM_S5: i32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvslei_h<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvslei_h(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvslei_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslei_w<const IMM_S5: i32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvslei_w<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvslei_w(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvslei_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslei_d<const IMM_S5: i32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvslei_d<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvslei_d(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvslei_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsle_bu(a: v32u8, b: v32u8) -> v32i8 {
-    unsafe { __lasx_xvsle_bu(a, b) }
+pub fn lasx_xvsle_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsle_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsle_hu(a: v16u16, b: v16u16) -> v16i16 {
-    unsafe { __lasx_xvsle_hu(a, b) }
+pub fn lasx_xvsle_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsle_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsle_wu(a: v8u32, b: v8u32) -> v8i32 {
-    unsafe { __lasx_xvsle_wu(a, b) }
+pub fn lasx_xvsle_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsle_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsle_du(a: v4u64, b: v4u64) -> v4i64 {
-    unsafe { __lasx_xvsle_du(a, b) }
+pub fn lasx_xvsle_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsle_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslei_bu<const IMM5: u32>(a: v32u8) -> v32i8 {
+pub fn lasx_xvslei_bu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvslei_bu(a, IMM5) }
+    unsafe { transmute(__lasx_xvslei_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslei_hu<const IMM5: u32>(a: v16u16) -> v16i16 {
+pub fn lasx_xvslei_hu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvslei_hu(a, IMM5) }
+    unsafe { transmute(__lasx_xvslei_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslei_wu<const IMM5: u32>(a: v8u32) -> v8i32 {
+pub fn lasx_xvslei_wu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvslei_wu(a, IMM5) }
+    unsafe { transmute(__lasx_xvslei_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslei_du<const IMM5: u32>(a: v4u64) -> v4i64 {
+pub fn lasx_xvslei_du<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvslei_du(a, IMM5) }
+    unsafe { transmute(__lasx_xvslei_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsat_b<const IMM3: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvsat_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvsat_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvsat_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsat_h<const IMM4: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvsat_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsat_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvsat_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsat_w<const IMM5: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvsat_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsat_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvsat_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsat_d<const IMM6: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvsat_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsat_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvsat_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsat_bu<const IMM3: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvsat_bu<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvsat_bu(a, IMM3) }
+    unsafe { transmute(__lasx_xvsat_bu(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsat_hu<const IMM4: u32>(a: v16u16) -> v16u16 {
+pub fn lasx_xvsat_hu<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsat_hu(a, IMM4) }
+    unsafe { transmute(__lasx_xvsat_hu(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsat_wu<const IMM5: u32>(a: v8u32) -> v8u32 {
+pub fn lasx_xvsat_wu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsat_wu(a, IMM5) }
+    unsafe { transmute(__lasx_xvsat_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsat_du<const IMM6: u32>(a: v4u64) -> v4u64 {
+pub fn lasx_xvsat_du<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsat_du(a, IMM6) }
+    unsafe { transmute(__lasx_xvsat_du(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvadda_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvadda_b(a, b) }
+pub fn lasx_xvadda_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvadda_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvadda_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvadda_h(a, b) }
+pub fn lasx_xvadda_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvadda_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvadda_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvadda_w(a, b) }
+pub fn lasx_xvadda_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvadda_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvadda_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvadda_d(a, b) }
+pub fn lasx_xvadda_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvadda_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsadd_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvsadd_b(a, b) }
+pub fn lasx_xvsadd_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsadd_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsadd_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvsadd_h(a, b) }
+pub fn lasx_xvsadd_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsadd_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsadd_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvsadd_w(a, b) }
+pub fn lasx_xvsadd_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsadd_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsadd_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsadd_d(a, b) }
+pub fn lasx_xvsadd_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsadd_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsadd_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvsadd_bu(a, b) }
+pub fn lasx_xvsadd_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsadd_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsadd_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvsadd_hu(a, b) }
+pub fn lasx_xvsadd_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsadd_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsadd_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvsadd_wu(a, b) }
+pub fn lasx_xvsadd_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsadd_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsadd_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvsadd_du(a, b) }
+pub fn lasx_xvsadd_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsadd_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavg_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvavg_b(a, b) }
+pub fn lasx_xvavg_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavg_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavg_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvavg_h(a, b) }
+pub fn lasx_xvavg_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavg_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavg_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvavg_w(a, b) }
+pub fn lasx_xvavg_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavg_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavg_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvavg_d(a, b) }
+pub fn lasx_xvavg_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavg_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavg_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvavg_bu(a, b) }
+pub fn lasx_xvavg_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavg_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavg_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvavg_hu(a, b) }
+pub fn lasx_xvavg_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavg_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavg_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvavg_wu(a, b) }
+pub fn lasx_xvavg_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavg_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavg_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvavg_du(a, b) }
+pub fn lasx_xvavg_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavg_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavgr_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvavgr_b(a, b) }
+pub fn lasx_xvavgr_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavgr_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavgr_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvavgr_h(a, b) }
+pub fn lasx_xvavgr_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavgr_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavgr_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvavgr_w(a, b) }
+pub fn lasx_xvavgr_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavgr_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavgr_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvavgr_d(a, b) }
+pub fn lasx_xvavgr_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavgr_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavgr_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvavgr_bu(a, b) }
+pub fn lasx_xvavgr_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavgr_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavgr_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvavgr_hu(a, b) }
+pub fn lasx_xvavgr_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavgr_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavgr_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvavgr_wu(a, b) }
+pub fn lasx_xvavgr_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavgr_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavgr_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvavgr_du(a, b) }
+pub fn lasx_xvavgr_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavgr_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssub_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvssub_b(a, b) }
+pub fn lasx_xvssub_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssub_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssub_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvssub_h(a, b) }
+pub fn lasx_xvssub_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssub_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssub_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvssub_w(a, b) }
+pub fn lasx_xvssub_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssub_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssub_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvssub_d(a, b) }
+pub fn lasx_xvssub_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssub_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssub_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvssub_bu(a, b) }
+pub fn lasx_xvssub_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssub_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssub_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvssub_hu(a, b) }
+pub fn lasx_xvssub_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssub_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssub_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvssub_wu(a, b) }
+pub fn lasx_xvssub_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssub_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssub_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvssub_du(a, b) }
+pub fn lasx_xvssub_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssub_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvabsd_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvabsd_b(a, b) }
+pub fn lasx_xvabsd_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvabsd_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvabsd_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvabsd_h(a, b) }
+pub fn lasx_xvabsd_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvabsd_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvabsd_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvabsd_w(a, b) }
+pub fn lasx_xvabsd_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvabsd_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvabsd_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvabsd_d(a, b) }
+pub fn lasx_xvabsd_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvabsd_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvabsd_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvabsd_bu(a, b) }
+pub fn lasx_xvabsd_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvabsd_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvabsd_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvabsd_hu(a, b) }
+pub fn lasx_xvabsd_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvabsd_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvabsd_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvabsd_wu(a, b) }
+pub fn lasx_xvabsd_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvabsd_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvabsd_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvabsd_du(a, b) }
+pub fn lasx_xvabsd_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvabsd_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmul_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmul_b(a, b) }
+pub fn lasx_xvmul_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmul_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmul_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvmul_h(a, b) }
+pub fn lasx_xvmul_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmul_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmul_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvmul_w(a, b) }
+pub fn lasx_xvmul_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmul_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmul_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmul_d(a, b) }
+pub fn lasx_xvmul_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmul_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmadd_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmadd_b(a, b, c) }
+pub fn lasx_xvmadd_b(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmadd_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmadd_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 {
-    unsafe { __lasx_xvmadd_h(a, b, c) }
+pub fn lasx_xvmadd_h(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmadd_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmadd_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32 {
-    unsafe { __lasx_xvmadd_w(a, b, c) }
+pub fn lasx_xvmadd_w(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmadd_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmadd_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmadd_d(a, b, c) }
+pub fn lasx_xvmadd_d(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmadd_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmsub_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmsub_b(a, b, c) }
+pub fn lasx_xvmsub_b(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmsub_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmsub_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 {
-    unsafe { __lasx_xvmsub_h(a, b, c) }
+pub fn lasx_xvmsub_h(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmsub_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmsub_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32 {
-    unsafe { __lasx_xvmsub_w(a, b, c) }
+pub fn lasx_xvmsub_w(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmsub_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmsub_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmsub_d(a, b, c) }
+pub fn lasx_xvmsub_d(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmsub_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvdiv_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvdiv_b(a, b) }
+pub fn lasx_xvdiv_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvdiv_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvdiv_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvdiv_h(a, b) }
+pub fn lasx_xvdiv_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvdiv_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvdiv_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvdiv_w(a, b) }
+pub fn lasx_xvdiv_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvdiv_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvdiv_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvdiv_d(a, b) }
+pub fn lasx_xvdiv_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvdiv_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvdiv_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvdiv_bu(a, b) }
+pub fn lasx_xvdiv_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvdiv_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvdiv_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvdiv_hu(a, b) }
+pub fn lasx_xvdiv_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvdiv_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvdiv_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvdiv_wu(a, b) }
+pub fn lasx_xvdiv_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvdiv_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvdiv_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvdiv_du(a, b) }
+pub fn lasx_xvdiv_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvdiv_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhaddw_h_b(a: v32i8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvhaddw_h_b(a, b) }
+pub fn lasx_xvhaddw_h_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhaddw_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhaddw_w_h(a: v16i16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvhaddw_w_h(a, b) }
+pub fn lasx_xvhaddw_w_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhaddw_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhaddw_d_w(a: v8i32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvhaddw_d_w(a, b) }
+pub fn lasx_xvhaddw_d_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhaddw_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhaddw_hu_bu(a: v32u8, b: v32u8) -> v16u16 {
-    unsafe { __lasx_xvhaddw_hu_bu(a, b) }
+pub fn lasx_xvhaddw_hu_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhaddw_hu_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhaddw_wu_hu(a: v16u16, b: v16u16) -> v8u32 {
-    unsafe { __lasx_xvhaddw_wu_hu(a, b) }
+pub fn lasx_xvhaddw_wu_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhaddw_wu_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhaddw_du_wu(a: v8u32, b: v8u32) -> v4u64 {
-    unsafe { __lasx_xvhaddw_du_wu(a, b) }
+pub fn lasx_xvhaddw_du_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhaddw_du_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhsubw_h_b(a: v32i8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvhsubw_h_b(a, b) }
+pub fn lasx_xvhsubw_h_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhsubw_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhsubw_w_h(a: v16i16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvhsubw_w_h(a, b) }
+pub fn lasx_xvhsubw_w_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhsubw_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhsubw_d_w(a: v8i32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvhsubw_d_w(a, b) }
+pub fn lasx_xvhsubw_d_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhsubw_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhsubw_hu_bu(a: v32u8, b: v32u8) -> v16i16 {
-    unsafe { __lasx_xvhsubw_hu_bu(a, b) }
+pub fn lasx_xvhsubw_hu_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhsubw_hu_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhsubw_wu_hu(a: v16u16, b: v16u16) -> v8i32 {
-    unsafe { __lasx_xvhsubw_wu_hu(a, b) }
+pub fn lasx_xvhsubw_wu_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhsubw_wu_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhsubw_du_wu(a: v8u32, b: v8u32) -> v4i64 {
-    unsafe { __lasx_xvhsubw_du_wu(a, b) }
+pub fn lasx_xvhsubw_du_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhsubw_du_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmod_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmod_b(a, b) }
+pub fn lasx_xvmod_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmod_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmod_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvmod_h(a, b) }
+pub fn lasx_xvmod_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmod_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmod_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvmod_w(a, b) }
+pub fn lasx_xvmod_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmod_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmod_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmod_d(a, b) }
+pub fn lasx_xvmod_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmod_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmod_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvmod_bu(a, b) }
+pub fn lasx_xvmod_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmod_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmod_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvmod_hu(a, b) }
+pub fn lasx_xvmod_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmod_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmod_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvmod_wu(a, b) }
+pub fn lasx_xvmod_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmod_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmod_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvmod_du(a, b) }
+pub fn lasx_xvmod_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmod_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrepl128vei_b<const IMM4: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvrepl128vei_b<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvrepl128vei_b(a, IMM4) }
+    unsafe { transmute(__lasx_xvrepl128vei_b(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrepl128vei_h<const IMM3: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvrepl128vei_h<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvrepl128vei_h(a, IMM3) }
+    unsafe { transmute(__lasx_xvrepl128vei_h(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrepl128vei_w<const IMM2: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvrepl128vei_w<const IMM2: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lasx_xvrepl128vei_w(a, IMM2) }
+    unsafe { transmute(__lasx_xvrepl128vei_w(transmute(a), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrepl128vei_d<const IMM1: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvrepl128vei_d<const IMM1: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM1, 1);
-    unsafe { __lasx_xvrepl128vei_d(a, IMM1) }
+    unsafe { transmute(__lasx_xvrepl128vei_d(transmute(a), IMM1)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickev_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvpickev_b(a, b) }
+pub fn lasx_xvpickev_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpickev_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickev_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvpickev_h(a, b) }
+pub fn lasx_xvpickev_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpickev_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickev_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvpickev_w(a, b) }
+pub fn lasx_xvpickev_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpickev_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickev_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvpickev_d(a, b) }
+pub fn lasx_xvpickev_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpickev_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickod_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvpickod_b(a, b) }
+pub fn lasx_xvpickod_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpickod_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickod_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvpickod_h(a, b) }
+pub fn lasx_xvpickod_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpickod_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickod_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvpickod_w(a, b) }
+pub fn lasx_xvpickod_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpickod_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickod_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvpickod_d(a, b) }
+pub fn lasx_xvpickod_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpickod_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvilvh_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvilvh_b(a, b) }
+pub fn lasx_xvilvh_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvilvh_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvilvh_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvilvh_h(a, b) }
+pub fn lasx_xvilvh_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvilvh_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvilvh_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvilvh_w(a, b) }
+pub fn lasx_xvilvh_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvilvh_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvilvh_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvilvh_d(a, b) }
+pub fn lasx_xvilvh_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvilvh_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvilvl_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvilvl_b(a, b) }
+pub fn lasx_xvilvl_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvilvl_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvilvl_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvilvl_h(a, b) }
+pub fn lasx_xvilvl_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvilvl_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvilvl_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvilvl_w(a, b) }
+pub fn lasx_xvilvl_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvilvl_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvilvl_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvilvl_d(a, b) }
+pub fn lasx_xvilvl_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvilvl_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpackev_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvpackev_b(a, b) }
+pub fn lasx_xvpackev_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpackev_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpackev_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvpackev_h(a, b) }
+pub fn lasx_xvpackev_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpackev_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpackev_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvpackev_w(a, b) }
+pub fn lasx_xvpackev_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpackev_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpackev_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvpackev_d(a, b) }
+pub fn lasx_xvpackev_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpackev_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpackod_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvpackod_b(a, b) }
+pub fn lasx_xvpackod_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpackod_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpackod_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvpackod_h(a, b) }
+pub fn lasx_xvpackod_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpackod_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpackod_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvpackod_w(a, b) }
+pub fn lasx_xvpackod_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpackod_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpackod_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvpackod_d(a, b) }
+pub fn lasx_xvpackod_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpackod_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvshuf_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 {
-    unsafe { __lasx_xvshuf_b(a, b, c) }
+pub fn lasx_xvshuf_b(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvshuf_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvshuf_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 {
-    unsafe { __lasx_xvshuf_h(a, b, c) }
+pub fn lasx_xvshuf_h(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvshuf_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvshuf_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32 {
-    unsafe { __lasx_xvshuf_w(a, b, c) }
+pub fn lasx_xvshuf_w(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvshuf_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvshuf_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 {
-    unsafe { __lasx_xvshuf_d(a, b, c) }
+pub fn lasx_xvshuf_d(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvshuf_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvand_v(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvand_v(a, b) }
+pub fn lasx_xvand_v(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvand_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvandi_b<const IMM8: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvandi_b<const IMM8: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvandi_b(a, IMM8) }
+    unsafe { transmute(__lasx_xvandi_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvor_v(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvor_v(a, b) }
+pub fn lasx_xvor_v(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvor_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvori_b<const IMM8: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvori_b<const IMM8: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvori_b(a, IMM8) }
+    unsafe { transmute(__lasx_xvori_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvnor_v(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvnor_v(a, b) }
+pub fn lasx_xvnor_v(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvnor_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvnori_b<const IMM8: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvnori_b<const IMM8: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvnori_b(a, IMM8) }
+    unsafe { transmute(__lasx_xvnori_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvxor_v(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvxor_v(a, b) }
+pub fn lasx_xvxor_v(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvxor_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvxori_b<const IMM8: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvxori_b<const IMM8: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvxori_b(a, IMM8) }
+    unsafe { transmute(__lasx_xvxori_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitsel_v(a: v32u8, b: v32u8, c: v32u8) -> v32u8 {
-    unsafe { __lasx_xvbitsel_v(a, b, c) }
+pub fn lasx_xvbitsel_v(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitsel_v(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitseli_b<const IMM8: u32>(a: v32u8, b: v32u8) -> v32u8 {
+pub fn lasx_xvbitseli_b<const IMM8: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvbitseli_b(a, b, IMM8) }
+    unsafe { transmute(__lasx_xvbitseli_b(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvshuf4i_b<const IMM8: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvshuf4i_b<const IMM8: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvshuf4i_b(a, IMM8) }
+    unsafe { transmute(__lasx_xvshuf4i_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvshuf4i_h<const IMM8: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvshuf4i_h<const IMM8: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvshuf4i_h(a, IMM8) }
+    unsafe { transmute(__lasx_xvshuf4i_h(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvshuf4i_w<const IMM8: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvshuf4i_w<const IMM8: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvshuf4i_w(a, IMM8) }
+    unsafe { transmute(__lasx_xvshuf4i_w(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplgr2vr_b(a: i32) -> v32i8 {
-    unsafe { __lasx_xvreplgr2vr_b(a) }
+pub fn lasx_xvreplgr2vr_b(a: i32) -> m256i {
+    unsafe { transmute(__lasx_xvreplgr2vr_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplgr2vr_h(a: i32) -> v16i16 {
-    unsafe { __lasx_xvreplgr2vr_h(a) }
+pub fn lasx_xvreplgr2vr_h(a: i32) -> m256i {
+    unsafe { transmute(__lasx_xvreplgr2vr_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplgr2vr_w(a: i32) -> v8i32 {
-    unsafe { __lasx_xvreplgr2vr_w(a) }
+pub fn lasx_xvreplgr2vr_w(a: i32) -> m256i {
+    unsafe { transmute(__lasx_xvreplgr2vr_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplgr2vr_d(a: i64) -> v4i64 {
-    unsafe { __lasx_xvreplgr2vr_d(a) }
+pub fn lasx_xvreplgr2vr_d(a: i64) -> m256i {
+    unsafe { transmute(__lasx_xvreplgr2vr_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpcnt_b(a: v32i8) -> v32i8 {
-    unsafe { __lasx_xvpcnt_b(a) }
+pub fn lasx_xvpcnt_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpcnt_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpcnt_h(a: v16i16) -> v16i16 {
-    unsafe { __lasx_xvpcnt_h(a) }
+pub fn lasx_xvpcnt_h(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpcnt_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpcnt_w(a: v8i32) -> v8i32 {
-    unsafe { __lasx_xvpcnt_w(a) }
+pub fn lasx_xvpcnt_w(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpcnt_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpcnt_d(a: v4i64) -> v4i64 {
-    unsafe { __lasx_xvpcnt_d(a) }
+pub fn lasx_xvpcnt_d(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpcnt_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvclo_b(a: v32i8) -> v32i8 {
-    unsafe { __lasx_xvclo_b(a) }
+pub fn lasx_xvclo_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvclo_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvclo_h(a: v16i16) -> v16i16 {
-    unsafe { __lasx_xvclo_h(a) }
+pub fn lasx_xvclo_h(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvclo_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvclo_w(a: v8i32) -> v8i32 {
-    unsafe { __lasx_xvclo_w(a) }
+pub fn lasx_xvclo_w(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvclo_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvclo_d(a: v4i64) -> v4i64 {
-    unsafe { __lasx_xvclo_d(a) }
+pub fn lasx_xvclo_d(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvclo_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvclz_b(a: v32i8) -> v32i8 {
-    unsafe { __lasx_xvclz_b(a) }
+pub fn lasx_xvclz_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvclz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvclz_h(a: v16i16) -> v16i16 {
-    unsafe { __lasx_xvclz_h(a) }
+pub fn lasx_xvclz_h(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvclz_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvclz_w(a: v8i32) -> v8i32 {
-    unsafe { __lasx_xvclz_w(a) }
+pub fn lasx_xvclz_w(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvclz_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvclz_d(a: v4i64) -> v4i64 {
-    unsafe { __lasx_xvclz_d(a) }
+pub fn lasx_xvclz_d(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvclz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfadd_s(a: v8f32, b: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfadd_s(a, b) }
+pub fn lasx_xvfadd_s(a: m256, b: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfadd_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfadd_d(a: v4f64, b: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfadd_d(a, b) }
+pub fn lasx_xvfadd_d(a: m256d, b: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfadd_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfsub_s(a: v8f32, b: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfsub_s(a, b) }
+pub fn lasx_xvfsub_s(a: m256, b: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfsub_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfsub_d(a: v4f64, b: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfsub_d(a, b) }
+pub fn lasx_xvfsub_d(a: m256d, b: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfsub_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmul_s(a: v8f32, b: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfmul_s(a, b) }
+pub fn lasx_xvfmul_s(a: m256, b: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfmul_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmul_d(a: v4f64, b: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfmul_d(a, b) }
+pub fn lasx_xvfmul_d(a: m256d, b: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfmul_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfdiv_s(a: v8f32, b: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfdiv_s(a, b) }
+pub fn lasx_xvfdiv_s(a: m256, b: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfdiv_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfdiv_d(a: v4f64, b: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfdiv_d(a, b) }
+pub fn lasx_xvfdiv_d(a: m256d, b: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfdiv_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcvt_h_s(a: v8f32, b: v8f32) -> v16i16 {
-    unsafe { __lasx_xvfcvt_h_s(a, b) }
+pub fn lasx_xvfcvt_h_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcvt_h_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcvt_s_d(a: v4f64, b: v4f64) -> v8f32 {
-    unsafe { __lasx_xvfcvt_s_d(a, b) }
+pub fn lasx_xvfcvt_s_d(a: m256d, b: m256d) -> m256 {
+    unsafe { transmute(__lasx_xvfcvt_s_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmin_s(a: v8f32, b: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfmin_s(a, b) }
+pub fn lasx_xvfmin_s(a: m256, b: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfmin_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmin_d(a: v4f64, b: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfmin_d(a, b) }
+pub fn lasx_xvfmin_d(a: m256d, b: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfmin_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmina_s(a: v8f32, b: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfmina_s(a, b) }
+pub fn lasx_xvfmina_s(a: m256, b: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfmina_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmina_d(a: v4f64, b: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfmina_d(a, b) }
+pub fn lasx_xvfmina_d(a: m256d, b: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfmina_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmax_s(a: v8f32, b: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfmax_s(a, b) }
+pub fn lasx_xvfmax_s(a: m256, b: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfmax_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmax_d(a: v4f64, b: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfmax_d(a, b) }
+pub fn lasx_xvfmax_d(a: m256d, b: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfmax_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmaxa_s(a: v8f32, b: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfmaxa_s(a, b) }
+pub fn lasx_xvfmaxa_s(a: m256, b: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfmaxa_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmaxa_d(a: v4f64, b: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfmaxa_d(a, b) }
+pub fn lasx_xvfmaxa_d(a: m256d, b: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfmaxa_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfclass_s(a: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfclass_s(a) }
+pub fn lasx_xvfclass_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfclass_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfclass_d(a: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfclass_d(a) }
+pub fn lasx_xvfclass_d(a: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfclass_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfsqrt_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfsqrt_s(a) }
+pub fn lasx_xvfsqrt_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfsqrt_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfsqrt_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfsqrt_d(a) }
+pub fn lasx_xvfsqrt_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfsqrt_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrecip_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfrecip_s(a) }
+pub fn lasx_xvfrecip_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfrecip_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrecip_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfrecip_d(a) }
+pub fn lasx_xvfrecip_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfrecip_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx,frecipe")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrecipe_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfrecipe_s(a) }
+pub fn lasx_xvfrecipe_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfrecipe_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx,frecipe")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrecipe_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfrecipe_d(a) }
+pub fn lasx_xvfrecipe_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfrecipe_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx,frecipe")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrsqrte_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfrsqrte_s(a) }
+pub fn lasx_xvfrsqrte_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfrsqrte_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx,frecipe")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrsqrte_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfrsqrte_d(a) }
+pub fn lasx_xvfrsqrte_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfrsqrte_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrint_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfrint_s(a) }
+pub fn lasx_xvfrint_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfrint_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrint_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfrint_d(a) }
+pub fn lasx_xvfrint_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfrint_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrsqrt_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfrsqrt_s(a) }
+pub fn lasx_xvfrsqrt_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfrsqrt_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrsqrt_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfrsqrt_d(a) }
+pub fn lasx_xvfrsqrt_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfrsqrt_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvflogb_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvflogb_s(a) }
+pub fn lasx_xvflogb_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvflogb_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvflogb_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvflogb_d(a) }
+pub fn lasx_xvflogb_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvflogb_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcvth_s_h(a: v16i16) -> v8f32 {
-    unsafe { __lasx_xvfcvth_s_h(a) }
+pub fn lasx_xvfcvth_s_h(a: m256i) -> m256 {
+    unsafe { transmute(__lasx_xvfcvth_s_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcvth_d_s(a: v8f32) -> v4f64 {
-    unsafe { __lasx_xvfcvth_d_s(a) }
+pub fn lasx_xvfcvth_d_s(a: m256) -> m256d {
+    unsafe { transmute(__lasx_xvfcvth_d_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcvtl_s_h(a: v16i16) -> v8f32 {
-    unsafe { __lasx_xvfcvtl_s_h(a) }
+pub fn lasx_xvfcvtl_s_h(a: m256i) -> m256 {
+    unsafe { transmute(__lasx_xvfcvtl_s_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcvtl_d_s(a: v8f32) -> v4f64 {
-    unsafe { __lasx_xvfcvtl_d_s(a) }
+pub fn lasx_xvfcvtl_d_s(a: m256) -> m256d {
+    unsafe { transmute(__lasx_xvfcvtl_d_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftint_w_s(a: v8f32) -> v8i32 {
-    unsafe { __lasx_xvftint_w_s(a) }
+pub fn lasx_xvftint_w_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftint_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftint_l_d(a: v4f64) -> v4i64 {
-    unsafe { __lasx_xvftint_l_d(a) }
+pub fn lasx_xvftint_l_d(a: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftint_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftint_wu_s(a: v8f32) -> v8u32 {
-    unsafe { __lasx_xvftint_wu_s(a) }
+pub fn lasx_xvftint_wu_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftint_wu_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftint_lu_d(a: v4f64) -> v4u64 {
-    unsafe { __lasx_xvftint_lu_d(a) }
+pub fn lasx_xvftint_lu_d(a: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftint_lu_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrz_w_s(a: v8f32) -> v8i32 {
-    unsafe { __lasx_xvftintrz_w_s(a) }
+pub fn lasx_xvftintrz_w_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrz_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrz_l_d(a: v4f64) -> v4i64 {
-    unsafe { __lasx_xvftintrz_l_d(a) }
+pub fn lasx_xvftintrz_l_d(a: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftintrz_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrz_wu_s(a: v8f32) -> v8u32 {
-    unsafe { __lasx_xvftintrz_wu_s(a) }
+pub fn lasx_xvftintrz_wu_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrz_wu_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrz_lu_d(a: v4f64) -> v4u64 {
-    unsafe { __lasx_xvftintrz_lu_d(a) }
+pub fn lasx_xvftintrz_lu_d(a: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftintrz_lu_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvffint_s_w(a: v8i32) -> v8f32 {
-    unsafe { __lasx_xvffint_s_w(a) }
+pub fn lasx_xvffint_s_w(a: m256i) -> m256 {
+    unsafe { transmute(__lasx_xvffint_s_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvffint_d_l(a: v4i64) -> v4f64 {
-    unsafe { __lasx_xvffint_d_l(a) }
+pub fn lasx_xvffint_d_l(a: m256i) -> m256d {
+    unsafe { transmute(__lasx_xvffint_d_l(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvffint_s_wu(a: v8u32) -> v8f32 {
-    unsafe { __lasx_xvffint_s_wu(a) }
+pub fn lasx_xvffint_s_wu(a: m256i) -> m256 {
+    unsafe { transmute(__lasx_xvffint_s_wu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvffint_d_lu(a: v4u64) -> v4f64 {
-    unsafe { __lasx_xvffint_d_lu(a) }
+pub fn lasx_xvffint_d_lu(a: m256i) -> m256d {
+    unsafe { transmute(__lasx_xvffint_d_lu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplve_b(a: v32i8, b: i32) -> v32i8 {
-    unsafe { __lasx_xvreplve_b(a, b) }
+pub fn lasx_xvreplve_b(a: m256i, b: i32) -> m256i {
+    unsafe { transmute(__lasx_xvreplve_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplve_h(a: v16i16, b: i32) -> v16i16 {
-    unsafe { __lasx_xvreplve_h(a, b) }
+pub fn lasx_xvreplve_h(a: m256i, b: i32) -> m256i {
+    unsafe { transmute(__lasx_xvreplve_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplve_w(a: v8i32, b: i32) -> v8i32 {
-    unsafe { __lasx_xvreplve_w(a, b) }
+pub fn lasx_xvreplve_w(a: m256i, b: i32) -> m256i {
+    unsafe { transmute(__lasx_xvreplve_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplve_d(a: v4i64, b: i32) -> v4i64 {
-    unsafe { __lasx_xvreplve_d(a, b) }
+pub fn lasx_xvreplve_d(a: m256i, b: i32) -> m256i {
+    unsafe { transmute(__lasx_xvreplve_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpermi_w<const IMM8: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvpermi_w<const IMM8: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvpermi_w(a, b, IMM8) }
+    unsafe { transmute(__lasx_xvpermi_w(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvandn_v(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvandn_v(a, b) }
+pub fn lasx_xvandn_v(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvandn_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvneg_b(a: v32i8) -> v32i8 {
-    unsafe { __lasx_xvneg_b(a) }
+pub fn lasx_xvneg_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvneg_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvneg_h(a: v16i16) -> v16i16 {
-    unsafe { __lasx_xvneg_h(a) }
+pub fn lasx_xvneg_h(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvneg_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvneg_w(a: v8i32) -> v8i32 {
-    unsafe { __lasx_xvneg_w(a) }
+pub fn lasx_xvneg_w(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvneg_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvneg_d(a: v4i64) -> v4i64 {
-    unsafe { __lasx_xvneg_d(a) }
+pub fn lasx_xvneg_d(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvneg_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmuh_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmuh_b(a, b) }
+pub fn lasx_xvmuh_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmuh_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmuh_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvmuh_h(a, b) }
+pub fn lasx_xvmuh_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmuh_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmuh_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvmuh_w(a, b) }
+pub fn lasx_xvmuh_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmuh_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmuh_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmuh_d(a, b) }
+pub fn lasx_xvmuh_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmuh_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmuh_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvmuh_bu(a, b) }
+pub fn lasx_xvmuh_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmuh_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmuh_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvmuh_hu(a, b) }
+pub fn lasx_xvmuh_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmuh_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmuh_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvmuh_wu(a, b) }
+pub fn lasx_xvmuh_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmuh_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmuh_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvmuh_du(a, b) }
+pub fn lasx_xvmuh_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmuh_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsllwil_h_b<const IMM3: u32>(a: v32i8) -> v16i16 {
+pub fn lasx_xvsllwil_h_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvsllwil_h_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvsllwil_h_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsllwil_w_h<const IMM4: u32>(a: v16i16) -> v8i32 {
+pub fn lasx_xvsllwil_w_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsllwil_w_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvsllwil_w_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsllwil_d_w<const IMM5: u32>(a: v8i32) -> v4i64 {
+pub fn lasx_xvsllwil_d_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsllwil_d_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvsllwil_d_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsllwil_hu_bu<const IMM3: u32>(a: v32u8) -> v16u16 {
+pub fn lasx_xvsllwil_hu_bu<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvsllwil_hu_bu(a, IMM3) }
+    unsafe { transmute(__lasx_xvsllwil_hu_bu(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsllwil_wu_hu<const IMM4: u32>(a: v16u16) -> v8u32 {
+pub fn lasx_xvsllwil_wu_hu<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsllwil_wu_hu(a, IMM4) }
+    unsafe { transmute(__lasx_xvsllwil_wu_hu(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsllwil_du_wu<const IMM5: u32>(a: v8u32) -> v4u64 {
+pub fn lasx_xvsllwil_du_wu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsllwil_du_wu(a, IMM5) }
+    unsafe { transmute(__lasx_xvsllwil_du_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsran_b_h(a: v16i16, b: v16i16) -> v32i8 {
-    unsafe { __lasx_xvsran_b_h(a, b) }
+pub fn lasx_xvsran_b_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsran_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsran_h_w(a: v8i32, b: v8i32) -> v16i16 {
-    unsafe { __lasx_xvsran_h_w(a, b) }
+pub fn lasx_xvsran_h_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsran_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsran_w_d(a: v4i64, b: v4i64) -> v8i32 {
-    unsafe { __lasx_xvsran_w_d(a, b) }
+pub fn lasx_xvsran_w_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsran_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssran_b_h(a: v16i16, b: v16i16) -> v32i8 {
-    unsafe { __lasx_xvssran_b_h(a, b) }
+pub fn lasx_xvssran_b_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssran_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssran_h_w(a: v8i32, b: v8i32) -> v16i16 {
-    unsafe { __lasx_xvssran_h_w(a, b) }
+pub fn lasx_xvssran_h_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssran_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssran_w_d(a: v4i64, b: v4i64) -> v8i32 {
-    unsafe { __lasx_xvssran_w_d(a, b) }
+pub fn lasx_xvssran_w_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssran_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssran_bu_h(a: v16u16, b: v16u16) -> v32u8 {
-    unsafe { __lasx_xvssran_bu_h(a, b) }
+pub fn lasx_xvssran_bu_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssran_bu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssran_hu_w(a: v8u32, b: v8u32) -> v16u16 {
-    unsafe { __lasx_xvssran_hu_w(a, b) }
+pub fn lasx_xvssran_hu_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssran_hu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssran_wu_d(a: v4u64, b: v4u64) -> v8u32 {
-    unsafe { __lasx_xvssran_wu_d(a, b) }
+pub fn lasx_xvssran_wu_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssran_wu_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrarn_b_h(a: v16i16, b: v16i16) -> v32i8 {
-    unsafe { __lasx_xvsrarn_b_h(a, b) }
+pub fn lasx_xvsrarn_b_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrarn_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrarn_h_w(a: v8i32, b: v8i32) -> v16i16 {
-    unsafe { __lasx_xvsrarn_h_w(a, b) }
+pub fn lasx_xvsrarn_h_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrarn_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrarn_w_d(a: v4i64, b: v4i64) -> v8i32 {
-    unsafe { __lasx_xvsrarn_w_d(a, b) }
+pub fn lasx_xvsrarn_w_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrarn_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarn_b_h(a: v16i16, b: v16i16) -> v32i8 {
-    unsafe { __lasx_xvssrarn_b_h(a, b) }
+pub fn lasx_xvssrarn_b_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrarn_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarn_h_w(a: v8i32, b: v8i32) -> v16i16 {
-    unsafe { __lasx_xvssrarn_h_w(a, b) }
+pub fn lasx_xvssrarn_h_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrarn_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarn_w_d(a: v4i64, b: v4i64) -> v8i32 {
-    unsafe { __lasx_xvssrarn_w_d(a, b) }
+pub fn lasx_xvssrarn_w_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrarn_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarn_bu_h(a: v16u16, b: v16u16) -> v32u8 {
-    unsafe { __lasx_xvssrarn_bu_h(a, b) }
+pub fn lasx_xvssrarn_bu_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrarn_bu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarn_hu_w(a: v8u32, b: v8u32) -> v16u16 {
-    unsafe { __lasx_xvssrarn_hu_w(a, b) }
+pub fn lasx_xvssrarn_hu_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrarn_hu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarn_wu_d(a: v4u64, b: v4u64) -> v8u32 {
-    unsafe { __lasx_xvssrarn_wu_d(a, b) }
+pub fn lasx_xvssrarn_wu_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrarn_wu_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrln_b_h(a: v16i16, b: v16i16) -> v32i8 {
-    unsafe { __lasx_xvsrln_b_h(a, b) }
+pub fn lasx_xvsrln_b_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrln_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrln_h_w(a: v8i32, b: v8i32) -> v16i16 {
-    unsafe { __lasx_xvsrln_h_w(a, b) }
+pub fn lasx_xvsrln_h_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrln_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrln_w_d(a: v4i64, b: v4i64) -> v8i32 {
-    unsafe { __lasx_xvsrln_w_d(a, b) }
+pub fn lasx_xvsrln_w_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrln_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrln_bu_h(a: v16u16, b: v16u16) -> v32u8 {
-    unsafe { __lasx_xvssrln_bu_h(a, b) }
+pub fn lasx_xvssrln_bu_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrln_bu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrln_hu_w(a: v8u32, b: v8u32) -> v16u16 {
-    unsafe { __lasx_xvssrln_hu_w(a, b) }
+pub fn lasx_xvssrln_hu_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrln_hu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrln_wu_d(a: v4u64, b: v4u64) -> v8u32 {
-    unsafe { __lasx_xvssrln_wu_d(a, b) }
+pub fn lasx_xvssrln_wu_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrln_wu_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlrn_b_h(a: v16i16, b: v16i16) -> v32i8 {
-    unsafe { __lasx_xvsrlrn_b_h(a, b) }
+pub fn lasx_xvsrlrn_b_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrlrn_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlrn_h_w(a: v8i32, b: v8i32) -> v16i16 {
-    unsafe { __lasx_xvsrlrn_h_w(a, b) }
+pub fn lasx_xvsrlrn_h_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrlrn_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlrn_w_d(a: v4i64, b: v4i64) -> v8i32 {
-    unsafe { __lasx_xvsrlrn_w_d(a, b) }
+pub fn lasx_xvsrlrn_w_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrlrn_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrn_bu_h(a: v16u16, b: v16u16) -> v32u8 {
-    unsafe { __lasx_xvssrlrn_bu_h(a, b) }
+pub fn lasx_xvssrlrn_bu_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrlrn_bu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrn_hu_w(a: v8u32, b: v8u32) -> v16u16 {
-    unsafe { __lasx_xvssrlrn_hu_w(a, b) }
+pub fn lasx_xvssrlrn_hu_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrlrn_hu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrn_wu_d(a: v4u64, b: v4u64) -> v8u32 {
-    unsafe { __lasx_xvssrlrn_wu_d(a, b) }
+pub fn lasx_xvssrlrn_wu_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrlrn_wu_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrstpi_b<const IMM5: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvfrstpi_b<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvfrstpi_b(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvfrstpi_b(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrstpi_h<const IMM5: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvfrstpi_h<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvfrstpi_h(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvfrstpi_h(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrstp_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 {
-    unsafe { __lasx_xvfrstp_b(a, b, c) }
+pub fn lasx_xvfrstp_b(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvfrstp_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrstp_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 {
-    unsafe { __lasx_xvfrstp_h(a, b, c) }
+pub fn lasx_xvfrstp_h(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvfrstp_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvshuf4i_d<const IMM8: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvshuf4i_d<const IMM8: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvshuf4i_d(a, b, IMM8) }
+    unsafe { transmute(__lasx_xvshuf4i_d(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbsrl_v<const IMM5: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvbsrl_v<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvbsrl_v(a, IMM5) }
+    unsafe { transmute(__lasx_xvbsrl_v(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbsll_v<const IMM5: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvbsll_v<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvbsll_v(a, IMM5) }
+    unsafe { transmute(__lasx_xvbsll_v(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvextrins_b<const IMM8: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvextrins_b<const IMM8: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvextrins_b(a, b, IMM8) }
+    unsafe { transmute(__lasx_xvextrins_b(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvextrins_h<const IMM8: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvextrins_h<const IMM8: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvextrins_h(a, b, IMM8) }
+    unsafe { transmute(__lasx_xvextrins_h(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvextrins_w<const IMM8: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvextrins_w<const IMM8: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvextrins_w(a, b, IMM8) }
+    unsafe { transmute(__lasx_xvextrins_w(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvextrins_d<const IMM8: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvextrins_d<const IMM8: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvextrins_d(a, b, IMM8) }
+    unsafe { transmute(__lasx_xvextrins_d(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmskltz_b(a: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmskltz_b(a) }
+pub fn lasx_xvmskltz_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmskltz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmskltz_h(a: v16i16) -> v16i16 {
-    unsafe { __lasx_xvmskltz_h(a) }
+pub fn lasx_xvmskltz_h(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmskltz_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmskltz_w(a: v8i32) -> v8i32 {
-    unsafe { __lasx_xvmskltz_w(a) }
+pub fn lasx_xvmskltz_w(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmskltz_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmskltz_d(a: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmskltz_d(a) }
+pub fn lasx_xvmskltz_d(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmskltz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsigncov_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvsigncov_b(a, b) }
+pub fn lasx_xvsigncov_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsigncov_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsigncov_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvsigncov_h(a, b) }
+pub fn lasx_xvsigncov_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsigncov_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsigncov_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvsigncov_w(a, b) }
+pub fn lasx_xvsigncov_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsigncov_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsigncov_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsigncov_d(a, b) }
+pub fn lasx_xvsigncov_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsigncov_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmadd_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfmadd_s(a, b, c) }
+pub fn lasx_xvfmadd_s(a: m256, b: m256, c: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfmadd_s(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmadd_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfmadd_d(a, b, c) }
+pub fn lasx_xvfmadd_d(a: m256d, b: m256d, c: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfmadd_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmsub_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfmsub_s(a, b, c) }
+pub fn lasx_xvfmsub_s(a: m256, b: m256, c: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfmsub_s(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmsub_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfmsub_d(a, b, c) }
+pub fn lasx_xvfmsub_d(a: m256d, b: m256d, c: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfmsub_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfnmadd_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfnmadd_s(a, b, c) }
+pub fn lasx_xvfnmadd_s(a: m256, b: m256, c: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfnmadd_s(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfnmadd_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfnmadd_d(a, b, c) }
+pub fn lasx_xvfnmadd_d(a: m256d, b: m256d, c: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfnmadd_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfnmsub_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfnmsub_s(a, b, c) }
+pub fn lasx_xvfnmsub_s(a: m256, b: m256, c: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfnmsub_s(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfnmsub_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfnmsub_d(a, b, c) }
+pub fn lasx_xvfnmsub_d(a: m256d, b: m256d, c: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfnmsub_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrne_w_s(a: v8f32) -> v8i32 {
-    unsafe { __lasx_xvftintrne_w_s(a) }
+pub fn lasx_xvftintrne_w_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrne_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrne_l_d(a: v4f64) -> v4i64 {
-    unsafe { __lasx_xvftintrne_l_d(a) }
+pub fn lasx_xvftintrne_l_d(a: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftintrne_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrp_w_s(a: v8f32) -> v8i32 {
-    unsafe { __lasx_xvftintrp_w_s(a) }
+pub fn lasx_xvftintrp_w_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrp_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrp_l_d(a: v4f64) -> v4i64 {
-    unsafe { __lasx_xvftintrp_l_d(a) }
+pub fn lasx_xvftintrp_l_d(a: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftintrp_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrm_w_s(a: v8f32) -> v8i32 {
-    unsafe { __lasx_xvftintrm_w_s(a) }
+pub fn lasx_xvftintrm_w_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrm_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrm_l_d(a: v4f64) -> v4i64 {
-    unsafe { __lasx_xvftintrm_l_d(a) }
+pub fn lasx_xvftintrm_l_d(a: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftintrm_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftint_w_d(a: v4f64, b: v4f64) -> v8i32 {
-    unsafe { __lasx_xvftint_w_d(a, b) }
+pub fn lasx_xvftint_w_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftint_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvffint_s_l(a: v4i64, b: v4i64) -> v8f32 {
-    unsafe { __lasx_xvffint_s_l(a, b) }
+pub fn lasx_xvffint_s_l(a: m256i, b: m256i) -> m256 {
+    unsafe { transmute(__lasx_xvffint_s_l(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrz_w_d(a: v4f64, b: v4f64) -> v8i32 {
-    unsafe { __lasx_xvftintrz_w_d(a, b) }
+pub fn lasx_xvftintrz_w_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftintrz_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrp_w_d(a: v4f64, b: v4f64) -> v8i32 {
-    unsafe { __lasx_xvftintrp_w_d(a, b) }
+pub fn lasx_xvftintrp_w_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftintrp_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrm_w_d(a: v4f64, b: v4f64) -> v8i32 {
-    unsafe { __lasx_xvftintrm_w_d(a, b) }
+pub fn lasx_xvftintrm_w_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftintrm_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrne_w_d(a: v4f64, b: v4f64) -> v8i32 {
-    unsafe { __lasx_xvftintrne_w_d(a, b) }
+pub fn lasx_xvftintrne_w_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftintrne_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftinth_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftinth_l_s(a) }
+pub fn lasx_xvftinth_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftinth_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintl_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftintl_l_s(a) }
+pub fn lasx_xvftintl_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintl_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvffinth_d_w(a: v8i32) -> v4f64 {
-    unsafe { __lasx_xvffinth_d_w(a) }
+pub fn lasx_xvffinth_d_w(a: m256i) -> m256d {
+    unsafe { transmute(__lasx_xvffinth_d_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvffintl_d_w(a: v8i32) -> v4f64 {
-    unsafe { __lasx_xvffintl_d_w(a) }
+pub fn lasx_xvffintl_d_w(a: m256i) -> m256d {
+    unsafe { transmute(__lasx_xvffintl_d_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrzh_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftintrzh_l_s(a) }
+pub fn lasx_xvftintrzh_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrzh_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrzl_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftintrzl_l_s(a) }
+pub fn lasx_xvftintrzl_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrzl_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrph_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftintrph_l_s(a) }
+pub fn lasx_xvftintrph_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrph_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrpl_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftintrpl_l_s(a) }
+pub fn lasx_xvftintrpl_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrpl_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrmh_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftintrmh_l_s(a) }
+pub fn lasx_xvftintrmh_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrmh_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrml_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftintrml_l_s(a) }
+pub fn lasx_xvftintrml_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrml_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrneh_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftintrneh_l_s(a) }
+pub fn lasx_xvftintrneh_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrneh_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrnel_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftintrnel_l_s(a) }
+pub fn lasx_xvftintrnel_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrnel_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrintrne_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfrintrne_s(a) }
+pub fn lasx_xvfrintrne_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfrintrne_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrintrne_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfrintrne_d(a) }
+pub fn lasx_xvfrintrne_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfrintrne_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrintrz_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfrintrz_s(a) }
+pub fn lasx_xvfrintrz_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfrintrz_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrintrz_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfrintrz_d(a) }
+pub fn lasx_xvfrintrz_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfrintrz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrintrp_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfrintrp_s(a) }
+pub fn lasx_xvfrintrp_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfrintrp_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrintrp_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfrintrp_d(a) }
+pub fn lasx_xvfrintrp_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfrintrp_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrintrm_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfrintrm_s(a) }
+pub fn lasx_xvfrintrm_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfrintrm_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrintrm_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfrintrm_d(a) }
+pub fn lasx_xvfrintrm_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfrintrm_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvld<const IMM_S12: i32>(mem_addr: *const i8) -> v32i8 {
+pub unsafe fn lasx_xvld<const IMM_S12: i32>(mem_addr: *const i8) -> m256i {
     static_assert_simm_bits!(IMM_S12, 12);
-    __lasx_xvld(mem_addr, IMM_S12)
+    transmute(__lasx_xvld(mem_addr, IMM_S12))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvst<const IMM_S12: i32>(a: v32i8, mem_addr: *mut i8) {
+pub unsafe fn lasx_xvst<const IMM_S12: i32>(a: m256i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S12, 12);
-    __lasx_xvst(a, mem_addr, IMM_S12)
+    transmute(__lasx_xvst(transmute(a), mem_addr, IMM_S12))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2, 3)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvstelm_b<const IMM_S8: i32, const IMM4: u32>(a: v32i8, mem_addr: *mut i8) {
+pub unsafe fn lasx_xvstelm_b<const IMM_S8: i32, const IMM4: u32>(a: m256i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S8, 8);
     static_assert_uimm_bits!(IMM4, 4);
-    __lasx_xvstelm_b(a, mem_addr, IMM_S8, IMM4)
+    transmute(__lasx_xvstelm_b(transmute(a), mem_addr, IMM_S8, IMM4))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2, 3)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvstelm_h<const IMM_S8: i32, const IMM3: u32>(a: v16i16, mem_addr: *mut i8) {
+pub unsafe fn lasx_xvstelm_h<const IMM_S8: i32, const IMM3: u32>(a: m256i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S8, 8);
     static_assert_uimm_bits!(IMM3, 3);
-    __lasx_xvstelm_h(a, mem_addr, IMM_S8, IMM3)
+    transmute(__lasx_xvstelm_h(transmute(a), mem_addr, IMM_S8, IMM3))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2, 3)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvstelm_w<const IMM_S8: i32, const IMM2: u32>(a: v8i32, mem_addr: *mut i8) {
+pub unsafe fn lasx_xvstelm_w<const IMM_S8: i32, const IMM2: u32>(a: m256i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S8, 8);
     static_assert_uimm_bits!(IMM2, 2);
-    __lasx_xvstelm_w(a, mem_addr, IMM_S8, IMM2)
+    transmute(__lasx_xvstelm_w(transmute(a), mem_addr, IMM_S8, IMM2))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2, 3)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvstelm_d<const IMM_S8: i32, const IMM1: u32>(a: v4i64, mem_addr: *mut i8) {
+pub unsafe fn lasx_xvstelm_d<const IMM_S8: i32, const IMM1: u32>(a: m256i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S8, 8);
     static_assert_uimm_bits!(IMM1, 1);
-    __lasx_xvstelm_d(a, mem_addr, IMM_S8, IMM1)
+    transmute(__lasx_xvstelm_d(transmute(a), mem_addr, IMM_S8, IMM1))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvinsve0_w<const IMM3: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvinsve0_w<const IMM3: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvinsve0_w(a, b, IMM3) }
+    unsafe { transmute(__lasx_xvinsve0_w(transmute(a), transmute(b), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvinsve0_d<const IMM2: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvinsve0_d<const IMM2: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lasx_xvinsve0_d(a, b, IMM2) }
+    unsafe { transmute(__lasx_xvinsve0_d(transmute(a), transmute(b), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickve_w<const IMM3: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvpickve_w<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvpickve_w(a, IMM3) }
+    unsafe { transmute(__lasx_xvpickve_w(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickve_d<const IMM2: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvpickve_d<const IMM2: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lasx_xvpickve_d(a, IMM2) }
+    unsafe { transmute(__lasx_xvpickve_d(transmute(a), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrn_b_h(a: v16i16, b: v16i16) -> v32i8 {
-    unsafe { __lasx_xvssrlrn_b_h(a, b) }
+pub fn lasx_xvssrlrn_b_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrlrn_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrn_h_w(a: v8i32, b: v8i32) -> v16i16 {
-    unsafe { __lasx_xvssrlrn_h_w(a, b) }
+pub fn lasx_xvssrlrn_h_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrlrn_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrn_w_d(a: v4i64, b: v4i64) -> v8i32 {
-    unsafe { __lasx_xvssrlrn_w_d(a, b) }
+pub fn lasx_xvssrlrn_w_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrlrn_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrln_b_h(a: v16i16, b: v16i16) -> v32i8 {
-    unsafe { __lasx_xvssrln_b_h(a, b) }
+pub fn lasx_xvssrln_b_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrln_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrln_h_w(a: v8i32, b: v8i32) -> v16i16 {
-    unsafe { __lasx_xvssrln_h_w(a, b) }
+pub fn lasx_xvssrln_h_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrln_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrln_w_d(a: v4i64, b: v4i64) -> v8i32 {
-    unsafe { __lasx_xvssrln_w_d(a, b) }
+pub fn lasx_xvssrln_w_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrln_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvorn_v(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvorn_v(a, b) }
+pub fn lasx_xvorn_v(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvorn_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvldi<const IMM_S13: i32>() -> v4i64 {
+pub fn lasx_xvldi<const IMM_S13: i32>() -> m256i {
     static_assert_simm_bits!(IMM_S13, 13);
-    unsafe { __lasx_xvldi(IMM_S13) }
+    unsafe { transmute(__lasx_xvldi(IMM_S13)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvldx(mem_addr: *const i8, b: i64) -> v32i8 {
-    __lasx_xvldx(mem_addr, b)
+pub unsafe fn lasx_xvldx(mem_addr: *const i8, b: i64) -> m256i {
+    transmute(__lasx_xvldx(mem_addr, transmute(b)))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvstx(a: v32i8, mem_addr: *mut i8, b: i64) {
-    __lasx_xvstx(a, mem_addr, b)
+pub unsafe fn lasx_xvstx(a: m256i, mem_addr: *mut i8, b: i64) {
+    transmute(__lasx_xvstx(transmute(a), mem_addr, transmute(b)))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvextl_qu_du(a: v4u64) -> v4u64 {
-    unsafe { __lasx_xvextl_qu_du(a) }
+pub fn lasx_xvextl_qu_du(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvextl_qu_du(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvinsgr2vr_w<const IMM3: u32>(a: v8i32, b: i32) -> v8i32 {
+pub fn lasx_xvinsgr2vr_w<const IMM3: u32>(a: m256i, b: i32) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvinsgr2vr_w(a, b, IMM3) }
+    unsafe { transmute(__lasx_xvinsgr2vr_w(transmute(a), transmute(b), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvinsgr2vr_d<const IMM2: u32>(a: v4i64, b: i64) -> v4i64 {
+pub fn lasx_xvinsgr2vr_d<const IMM2: u32>(a: m256i, b: i64) -> m256i {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lasx_xvinsgr2vr_d(a, b, IMM2) }
+    unsafe { transmute(__lasx_xvinsgr2vr_d(transmute(a), transmute(b), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplve0_b(a: v32i8) -> v32i8 {
-    unsafe { __lasx_xvreplve0_b(a) }
+pub fn lasx_xvreplve0_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvreplve0_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplve0_h(a: v16i16) -> v16i16 {
-    unsafe { __lasx_xvreplve0_h(a) }
+pub fn lasx_xvreplve0_h(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvreplve0_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplve0_w(a: v8i32) -> v8i32 {
-    unsafe { __lasx_xvreplve0_w(a) }
+pub fn lasx_xvreplve0_w(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvreplve0_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplve0_d(a: v4i64) -> v4i64 {
-    unsafe { __lasx_xvreplve0_d(a) }
+pub fn lasx_xvreplve0_d(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvreplve0_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplve0_q(a: v32i8) -> v32i8 {
-    unsafe { __lasx_xvreplve0_q(a) }
+pub fn lasx_xvreplve0_q(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvreplve0_q(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_h_b(a: v32i8) -> v16i16 {
-    unsafe { __lasx_vext2xv_h_b(a) }
+pub fn lasx_vext2xv_h_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_h_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_w_h(a: v16i16) -> v8i32 {
-    unsafe { __lasx_vext2xv_w_h(a) }
+pub fn lasx_vext2xv_w_h(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_w_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_d_w(a: v8i32) -> v4i64 {
-    unsafe { __lasx_vext2xv_d_w(a) }
+pub fn lasx_vext2xv_d_w(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_d_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_w_b(a: v32i8) -> v8i32 {
-    unsafe { __lasx_vext2xv_w_b(a) }
+pub fn lasx_vext2xv_w_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_w_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_d_h(a: v16i16) -> v4i64 {
-    unsafe { __lasx_vext2xv_d_h(a) }
+pub fn lasx_vext2xv_d_h(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_d_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_d_b(a: v32i8) -> v4i64 {
-    unsafe { __lasx_vext2xv_d_b(a) }
+pub fn lasx_vext2xv_d_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_d_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_hu_bu(a: v32i8) -> v16i16 {
-    unsafe { __lasx_vext2xv_hu_bu(a) }
+pub fn lasx_vext2xv_hu_bu(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_hu_bu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_wu_hu(a: v16i16) -> v8i32 {
-    unsafe { __lasx_vext2xv_wu_hu(a) }
+pub fn lasx_vext2xv_wu_hu(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_wu_hu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_du_wu(a: v8i32) -> v4i64 {
-    unsafe { __lasx_vext2xv_du_wu(a) }
+pub fn lasx_vext2xv_du_wu(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_du_wu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_wu_bu(a: v32i8) -> v8i32 {
-    unsafe { __lasx_vext2xv_wu_bu(a) }
+pub fn lasx_vext2xv_wu_bu(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_wu_bu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_du_hu(a: v16i16) -> v4i64 {
-    unsafe { __lasx_vext2xv_du_hu(a) }
+pub fn lasx_vext2xv_du_hu(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_du_hu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_du_bu(a: v32i8) -> v4i64 {
-    unsafe { __lasx_vext2xv_du_bu(a) }
+pub fn lasx_vext2xv_du_bu(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_du_bu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpermi_q<const IMM8: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvpermi_q<const IMM8: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvpermi_q(a, b, IMM8) }
+    unsafe { transmute(__lasx_xvpermi_q(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpermi_d<const IMM8: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvpermi_d<const IMM8: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvpermi_d(a, IMM8) }
+    unsafe { transmute(__lasx_xvpermi_d(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvperm_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvperm_w(a, b) }
+pub fn lasx_xvperm_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvperm_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvldrepl_b<const IMM_S12: i32>(mem_addr: *const i8) -> v32i8 {
+pub unsafe fn lasx_xvldrepl_b<const IMM_S12: i32>(mem_addr: *const i8) -> m256i {
     static_assert_simm_bits!(IMM_S12, 12);
-    __lasx_xvldrepl_b(mem_addr, IMM_S12)
+    transmute(__lasx_xvldrepl_b(mem_addr, IMM_S12))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvldrepl_h<const IMM_S11: i32>(mem_addr: *const i8) -> v16i16 {
+pub unsafe fn lasx_xvldrepl_h<const IMM_S11: i32>(mem_addr: *const i8) -> m256i {
     static_assert_simm_bits!(IMM_S11, 11);
-    __lasx_xvldrepl_h(mem_addr, IMM_S11)
+    transmute(__lasx_xvldrepl_h(mem_addr, IMM_S11))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvldrepl_w<const IMM_S10: i32>(mem_addr: *const i8) -> v8i32 {
+pub unsafe fn lasx_xvldrepl_w<const IMM_S10: i32>(mem_addr: *const i8) -> m256i {
     static_assert_simm_bits!(IMM_S10, 10);
-    __lasx_xvldrepl_w(mem_addr, IMM_S10)
+    transmute(__lasx_xvldrepl_w(mem_addr, IMM_S10))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvldrepl_d<const IMM_S9: i32>(mem_addr: *const i8) -> v4i64 {
+pub unsafe fn lasx_xvldrepl_d<const IMM_S9: i32>(mem_addr: *const i8) -> m256i {
     static_assert_simm_bits!(IMM_S9, 9);
-    __lasx_xvldrepl_d(mem_addr, IMM_S9)
+    transmute(__lasx_xvldrepl_d(mem_addr, IMM_S9))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickve2gr_w<const IMM3: u32>(a: v8i32) -> i32 {
+pub fn lasx_xvpickve2gr_w<const IMM3: u32>(a: m256i) -> i32 {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvpickve2gr_w(a, IMM3) }
+    unsafe { transmute(__lasx_xvpickve2gr_w(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickve2gr_wu<const IMM3: u32>(a: v8i32) -> u32 {
+pub fn lasx_xvpickve2gr_wu<const IMM3: u32>(a: m256i) -> u32 {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvpickve2gr_wu(a, IMM3) }
+    unsafe { transmute(__lasx_xvpickve2gr_wu(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickve2gr_d<const IMM2: u32>(a: v4i64) -> i64 {
+pub fn lasx_xvpickve2gr_d<const IMM2: u32>(a: m256i) -> i64 {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lasx_xvpickve2gr_d(a, IMM2) }
+    unsafe { transmute(__lasx_xvpickve2gr_d(transmute(a), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickve2gr_du<const IMM2: u32>(a: v4i64) -> u64 {
+pub fn lasx_xvpickve2gr_du<const IMM2: u32>(a: m256i) -> u64 {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lasx_xvpickve2gr_du(a, IMM2) }
+    unsafe { transmute(__lasx_xvpickve2gr_du(transmute(a), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_q_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvaddwev_q_d(a, b) }
+pub fn lasx_xvaddwev_q_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_d_w(a: v8i32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvaddwev_d_w(a, b) }
+pub fn lasx_xvaddwev_d_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_w_h(a: v16i16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvaddwev_w_h(a, b) }
+pub fn lasx_xvaddwev_w_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_h_b(a: v32i8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvaddwev_h_b(a, b) }
+pub fn lasx_xvaddwev_h_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_q_du(a: v4u64, b: v4u64) -> v4i64 {
-    unsafe { __lasx_xvaddwev_q_du(a, b) }
+pub fn lasx_xvaddwev_q_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_d_wu(a: v8u32, b: v8u32) -> v4i64 {
-    unsafe { __lasx_xvaddwev_d_wu(a, b) }
+pub fn lasx_xvaddwev_d_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_w_hu(a: v16u16, b: v16u16) -> v8i32 {
-    unsafe { __lasx_xvaddwev_w_hu(a, b) }
+pub fn lasx_xvaddwev_w_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_h_bu(a: v32u8, b: v32u8) -> v16i16 {
-    unsafe { __lasx_xvaddwev_h_bu(a, b) }
+pub fn lasx_xvaddwev_h_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwev_q_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsubwev_q_d(a, b) }
+pub fn lasx_xvsubwev_q_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwev_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwev_d_w(a: v8i32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvsubwev_d_w(a, b) }
+pub fn lasx_xvsubwev_d_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwev_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwev_w_h(a: v16i16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvsubwev_w_h(a, b) }
+pub fn lasx_xvsubwev_w_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwev_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwev_h_b(a: v32i8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvsubwev_h_b(a, b) }
+pub fn lasx_xvsubwev_h_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwev_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwev_q_du(a: v4u64, b: v4u64) -> v4i64 {
-    unsafe { __lasx_xvsubwev_q_du(a, b) }
+pub fn lasx_xvsubwev_q_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwev_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwev_d_wu(a: v8u32, b: v8u32) -> v4i64 {
-    unsafe { __lasx_xvsubwev_d_wu(a, b) }
+pub fn lasx_xvsubwev_d_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwev_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwev_w_hu(a: v16u16, b: v16u16) -> v8i32 {
-    unsafe { __lasx_xvsubwev_w_hu(a, b) }
+pub fn lasx_xvsubwev_w_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwev_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwev_h_bu(a: v32u8, b: v32u8) -> v16i16 {
-    unsafe { __lasx_xvsubwev_h_bu(a, b) }
+pub fn lasx_xvsubwev_h_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwev_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_q_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmulwev_q_d(a, b) }
+pub fn lasx_xvmulwev_q_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_d_w(a: v8i32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvmulwev_d_w(a, b) }
+pub fn lasx_xvmulwev_d_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_w_h(a: v16i16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvmulwev_w_h(a, b) }
+pub fn lasx_xvmulwev_w_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_h_b(a: v32i8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvmulwev_h_b(a, b) }
+pub fn lasx_xvmulwev_h_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_q_du(a: v4u64, b: v4u64) -> v4i64 {
-    unsafe { __lasx_xvmulwev_q_du(a, b) }
+pub fn lasx_xvmulwev_q_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_d_wu(a: v8u32, b: v8u32) -> v4i64 {
-    unsafe { __lasx_xvmulwev_d_wu(a, b) }
+pub fn lasx_xvmulwev_d_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_w_hu(a: v16u16, b: v16u16) -> v8i32 {
-    unsafe { __lasx_xvmulwev_w_hu(a, b) }
+pub fn lasx_xvmulwev_w_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_h_bu(a: v32u8, b: v32u8) -> v16i16 {
-    unsafe { __lasx_xvmulwev_h_bu(a, b) }
+pub fn lasx_xvmulwev_h_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_q_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvaddwod_q_d(a, b) }
+pub fn lasx_xvaddwod_q_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_d_w(a: v8i32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvaddwod_d_w(a, b) }
+pub fn lasx_xvaddwod_d_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_w_h(a: v16i16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvaddwod_w_h(a, b) }
+pub fn lasx_xvaddwod_w_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_h_b(a: v32i8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvaddwod_h_b(a, b) }
+pub fn lasx_xvaddwod_h_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_q_du(a: v4u64, b: v4u64) -> v4i64 {
-    unsafe { __lasx_xvaddwod_q_du(a, b) }
+pub fn lasx_xvaddwod_q_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_d_wu(a: v8u32, b: v8u32) -> v4i64 {
-    unsafe { __lasx_xvaddwod_d_wu(a, b) }
+pub fn lasx_xvaddwod_d_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_w_hu(a: v16u16, b: v16u16) -> v8i32 {
-    unsafe { __lasx_xvaddwod_w_hu(a, b) }
+pub fn lasx_xvaddwod_w_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_h_bu(a: v32u8, b: v32u8) -> v16i16 {
-    unsafe { __lasx_xvaddwod_h_bu(a, b) }
+pub fn lasx_xvaddwod_h_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwod_q_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsubwod_q_d(a, b) }
+pub fn lasx_xvsubwod_q_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwod_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwod_d_w(a: v8i32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvsubwod_d_w(a, b) }
+pub fn lasx_xvsubwod_d_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwod_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwod_w_h(a: v16i16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvsubwod_w_h(a, b) }
+pub fn lasx_xvsubwod_w_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwod_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwod_h_b(a: v32i8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvsubwod_h_b(a, b) }
+pub fn lasx_xvsubwod_h_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwod_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwod_q_du(a: v4u64, b: v4u64) -> v4i64 {
-    unsafe { __lasx_xvsubwod_q_du(a, b) }
+pub fn lasx_xvsubwod_q_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwod_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwod_d_wu(a: v8u32, b: v8u32) -> v4i64 {
-    unsafe { __lasx_xvsubwod_d_wu(a, b) }
+pub fn lasx_xvsubwod_d_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwod_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwod_w_hu(a: v16u16, b: v16u16) -> v8i32 {
-    unsafe { __lasx_xvsubwod_w_hu(a, b) }
+pub fn lasx_xvsubwod_w_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwod_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwod_h_bu(a: v32u8, b: v32u8) -> v16i16 {
-    unsafe { __lasx_xvsubwod_h_bu(a, b) }
+pub fn lasx_xvsubwod_h_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwod_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_q_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmulwod_q_d(a, b) }
+pub fn lasx_xvmulwod_q_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_d_w(a: v8i32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvmulwod_d_w(a, b) }
+pub fn lasx_xvmulwod_d_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_w_h(a: v16i16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvmulwod_w_h(a, b) }
+pub fn lasx_xvmulwod_w_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_h_b(a: v32i8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvmulwod_h_b(a, b) }
+pub fn lasx_xvmulwod_h_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_q_du(a: v4u64, b: v4u64) -> v4i64 {
-    unsafe { __lasx_xvmulwod_q_du(a, b) }
+pub fn lasx_xvmulwod_q_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_d_wu(a: v8u32, b: v8u32) -> v4i64 {
-    unsafe { __lasx_xvmulwod_d_wu(a, b) }
+pub fn lasx_xvmulwod_d_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_w_hu(a: v16u16, b: v16u16) -> v8i32 {
-    unsafe { __lasx_xvmulwod_w_hu(a, b) }
+pub fn lasx_xvmulwod_w_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_h_bu(a: v32u8, b: v32u8) -> v16i16 {
-    unsafe { __lasx_xvmulwod_h_bu(a, b) }
+pub fn lasx_xvmulwod_h_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_d_wu_w(a: v8u32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvaddwev_d_wu_w(a, b) }
+pub fn lasx_xvaddwev_d_wu_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_d_wu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_w_hu_h(a: v16u16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvaddwev_w_hu_h(a, b) }
+pub fn lasx_xvaddwev_w_hu_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_w_hu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_h_bu_b(a: v32u8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvaddwev_h_bu_b(a, b) }
+pub fn lasx_xvaddwev_h_bu_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_h_bu_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_d_wu_w(a: v8u32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvmulwev_d_wu_w(a, b) }
+pub fn lasx_xvmulwev_d_wu_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_d_wu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_w_hu_h(a: v16u16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvmulwev_w_hu_h(a, b) }
+pub fn lasx_xvmulwev_w_hu_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_w_hu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_h_bu_b(a: v32u8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvmulwev_h_bu_b(a, b) }
+pub fn lasx_xvmulwev_h_bu_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_h_bu_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_d_wu_w(a: v8u32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvaddwod_d_wu_w(a, b) }
+pub fn lasx_xvaddwod_d_wu_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_d_wu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_w_hu_h(a: v16u16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvaddwod_w_hu_h(a, b) }
+pub fn lasx_xvaddwod_w_hu_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_w_hu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_h_bu_b(a: v32u8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvaddwod_h_bu_b(a, b) }
+pub fn lasx_xvaddwod_h_bu_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_h_bu_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_d_wu_w(a: v8u32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvmulwod_d_wu_w(a, b) }
+pub fn lasx_xvmulwod_d_wu_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_d_wu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_w_hu_h(a: v16u16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvmulwod_w_hu_h(a, b) }
+pub fn lasx_xvmulwod_w_hu_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_w_hu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_h_bu_b(a: v32u8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvmulwod_h_bu_b(a, b) }
+pub fn lasx_xvmulwod_h_bu_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_h_bu_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhaddw_q_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvhaddw_q_d(a, b) }
+pub fn lasx_xvhaddw_q_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhaddw_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhaddw_qu_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvhaddw_qu_du(a, b) }
+pub fn lasx_xvhaddw_qu_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhaddw_qu_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhsubw_q_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvhsubw_q_d(a, b) }
+pub fn lasx_xvhsubw_q_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhsubw_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhsubw_qu_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvhsubw_qu_du(a, b) }
+pub fn lasx_xvhsubw_qu_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhsubw_qu_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_q_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmaddwev_q_d(a, b, c) }
+pub fn lasx_xvmaddwev_q_d(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_q_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_d_w(a: v4i64, b: v8i32, c: v8i32) -> v4i64 {
-    unsafe { __lasx_xvmaddwev_d_w(a, b, c) }
+pub fn lasx_xvmaddwev_d_w(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_d_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_w_h(a: v8i32, b: v16i16, c: v16i16) -> v8i32 {
-    unsafe { __lasx_xvmaddwev_w_h(a, b, c) }
+pub fn lasx_xvmaddwev_w_h(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_w_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_h_b(a: v16i16, b: v32i8, c: v32i8) -> v16i16 {
-    unsafe { __lasx_xvmaddwev_h_b(a, b, c) }
+pub fn lasx_xvmaddwev_h_b(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_h_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_q_du(a: v4u64, b: v4u64, c: v4u64) -> v4u64 {
-    unsafe { __lasx_xvmaddwev_q_du(a, b, c) }
+pub fn lasx_xvmaddwev_q_du(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_q_du(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_d_wu(a: v4u64, b: v8u32, c: v8u32) -> v4u64 {
-    unsafe { __lasx_xvmaddwev_d_wu(a, b, c) }
+pub fn lasx_xvmaddwev_d_wu(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_d_wu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_w_hu(a: v8u32, b: v16u16, c: v16u16) -> v8u32 {
-    unsafe { __lasx_xvmaddwev_w_hu(a, b, c) }
+pub fn lasx_xvmaddwev_w_hu(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_w_hu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_h_bu(a: v16u16, b: v32u8, c: v32u8) -> v16u16 {
-    unsafe { __lasx_xvmaddwev_h_bu(a, b, c) }
+pub fn lasx_xvmaddwev_h_bu(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_h_bu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_q_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmaddwod_q_d(a, b, c) }
+pub fn lasx_xvmaddwod_q_d(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_q_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_d_w(a: v4i64, b: v8i32, c: v8i32) -> v4i64 {
-    unsafe { __lasx_xvmaddwod_d_w(a, b, c) }
+pub fn lasx_xvmaddwod_d_w(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_d_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_w_h(a: v8i32, b: v16i16, c: v16i16) -> v8i32 {
-    unsafe { __lasx_xvmaddwod_w_h(a, b, c) }
+pub fn lasx_xvmaddwod_w_h(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_w_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_h_b(a: v16i16, b: v32i8, c: v32i8) -> v16i16 {
-    unsafe { __lasx_xvmaddwod_h_b(a, b, c) }
+pub fn lasx_xvmaddwod_h_b(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_h_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_q_du(a: v4u64, b: v4u64, c: v4u64) -> v4u64 {
-    unsafe { __lasx_xvmaddwod_q_du(a, b, c) }
+pub fn lasx_xvmaddwod_q_du(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_q_du(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_d_wu(a: v4u64, b: v8u32, c: v8u32) -> v4u64 {
-    unsafe { __lasx_xvmaddwod_d_wu(a, b, c) }
+pub fn lasx_xvmaddwod_d_wu(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_d_wu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_w_hu(a: v8u32, b: v16u16, c: v16u16) -> v8u32 {
-    unsafe { __lasx_xvmaddwod_w_hu(a, b, c) }
+pub fn lasx_xvmaddwod_w_hu(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_w_hu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_h_bu(a: v16u16, b: v32u8, c: v32u8) -> v16u16 {
-    unsafe { __lasx_xvmaddwod_h_bu(a, b, c) }
+pub fn lasx_xvmaddwod_h_bu(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_h_bu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_q_du_d(a: v4i64, b: v4u64, c: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmaddwev_q_du_d(a, b, c) }
+pub fn lasx_xvmaddwev_q_du_d(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_q_du_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_d_wu_w(a: v4i64, b: v8u32, c: v8i32) -> v4i64 {
-    unsafe { __lasx_xvmaddwev_d_wu_w(a, b, c) }
+pub fn lasx_xvmaddwev_d_wu_w(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_d_wu_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_w_hu_h(a: v8i32, b: v16u16, c: v16i16) -> v8i32 {
-    unsafe { __lasx_xvmaddwev_w_hu_h(a, b, c) }
+pub fn lasx_xvmaddwev_w_hu_h(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_w_hu_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_h_bu_b(a: v16i16, b: v32u8, c: v32i8) -> v16i16 {
-    unsafe { __lasx_xvmaddwev_h_bu_b(a, b, c) }
+pub fn lasx_xvmaddwev_h_bu_b(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_h_bu_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_q_du_d(a: v4i64, b: v4u64, c: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmaddwod_q_du_d(a, b, c) }
+pub fn lasx_xvmaddwod_q_du_d(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_q_du_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_d_wu_w(a: v4i64, b: v8u32, c: v8i32) -> v4i64 {
-    unsafe { __lasx_xvmaddwod_d_wu_w(a, b, c) }
+pub fn lasx_xvmaddwod_d_wu_w(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_d_wu_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_w_hu_h(a: v8i32, b: v16u16, c: v16i16) -> v8i32 {
-    unsafe { __lasx_xvmaddwod_w_hu_h(a, b, c) }
+pub fn lasx_xvmaddwod_w_hu_h(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_w_hu_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_h_bu_b(a: v16i16, b: v32u8, c: v32i8) -> v16i16 {
-    unsafe { __lasx_xvmaddwod_h_bu_b(a, b, c) }
+pub fn lasx_xvmaddwod_h_bu_b(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_h_bu_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrotr_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvrotr_b(a, b) }
+pub fn lasx_xvrotr_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvrotr_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrotr_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvrotr_h(a, b) }
+pub fn lasx_xvrotr_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvrotr_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrotr_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvrotr_w(a, b) }
+pub fn lasx_xvrotr_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvrotr_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrotr_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvrotr_d(a, b) }
+pub fn lasx_xvrotr_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvrotr_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvadd_q(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvadd_q(a, b) }
+pub fn lasx_xvadd_q(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvadd_q(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsub_q(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsub_q(a, b) }
+pub fn lasx_xvsub_q(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsub_q(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_q_du_d(a: v4u64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvaddwev_q_du_d(a, b) }
+pub fn lasx_xvaddwev_q_du_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_q_du_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_q_du_d(a: v4u64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvaddwod_q_du_d(a, b) }
+pub fn lasx_xvaddwod_q_du_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_q_du_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_q_du_d(a: v4u64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmulwev_q_du_d(a, b) }
+pub fn lasx_xvmulwev_q_du_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_q_du_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_q_du_d(a: v4u64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmulwod_q_du_d(a, b) }
+pub fn lasx_xvmulwod_q_du_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_q_du_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmskgez_b(a: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmskgez_b(a) }
+pub fn lasx_xvmskgez_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmskgez_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmsknz_b(a: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmsknz_b(a) }
+pub fn lasx_xvmsknz_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmsknz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvexth_h_b(a: v32i8) -> v16i16 {
-    unsafe { __lasx_xvexth_h_b(a) }
+pub fn lasx_xvexth_h_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvexth_h_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvexth_w_h(a: v16i16) -> v8i32 {
-    unsafe { __lasx_xvexth_w_h(a) }
+pub fn lasx_xvexth_w_h(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvexth_w_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvexth_d_w(a: v8i32) -> v4i64 {
-    unsafe { __lasx_xvexth_d_w(a) }
+pub fn lasx_xvexth_d_w(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvexth_d_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvexth_q_d(a: v4i64) -> v4i64 {
-    unsafe { __lasx_xvexth_q_d(a) }
+pub fn lasx_xvexth_q_d(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvexth_q_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvexth_hu_bu(a: v32u8) -> v16u16 {
-    unsafe { __lasx_xvexth_hu_bu(a) }
+pub fn lasx_xvexth_hu_bu(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvexth_hu_bu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvexth_wu_hu(a: v16u16) -> v8u32 {
-    unsafe { __lasx_xvexth_wu_hu(a) }
+pub fn lasx_xvexth_wu_hu(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvexth_wu_hu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvexth_du_wu(a: v8u32) -> v4u64 {
-    unsafe { __lasx_xvexth_du_wu(a) }
+pub fn lasx_xvexth_du_wu(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvexth_du_wu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvexth_qu_du(a: v4u64) -> v4u64 {
-    unsafe { __lasx_xvexth_qu_du(a) }
+pub fn lasx_xvexth_qu_du(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvexth_qu_du(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrotri_b<const IMM3: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvrotri_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvrotri_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvrotri_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrotri_h<const IMM4: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvrotri_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvrotri_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvrotri_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrotri_w<const IMM5: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvrotri_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvrotri_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvrotri_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrotri_d<const IMM6: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvrotri_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvrotri_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvrotri_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvextl_q_d(a: v4i64) -> v4i64 {
-    unsafe { __lasx_xvextl_q_d(a) }
+pub fn lasx_xvextl_q_d(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvextl_q_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlni_b_h<const IMM4: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvsrlni_b_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsrlni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvsrlni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlni_h_w<const IMM5: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvsrlni_h_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsrlni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvsrlni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlni_w_d<const IMM6: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvsrlni_w_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsrlni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvsrlni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlni_d_q<const IMM7: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvsrlni_d_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvsrlni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvsrlni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlrni_b_h<const IMM4: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvsrlrni_b_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsrlrni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvsrlrni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlrni_h_w<const IMM5: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvsrlrni_h_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsrlrni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvsrlrni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlrni_w_d<const IMM6: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvsrlrni_w_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsrlrni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvsrlrni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlrni_d_q<const IMM7: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvsrlrni_d_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvsrlrni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvsrlrni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlni_b_h<const IMM4: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvssrlni_b_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvssrlni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvssrlni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlni_h_w<const IMM5: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvssrlni_h_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvssrlni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvssrlni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlni_w_d<const IMM6: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvssrlni_w_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvssrlni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvssrlni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlni_d_q<const IMM7: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvssrlni_d_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvssrlni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvssrlni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlni_bu_h<const IMM4: u32>(a: v32u8, b: v32i8) -> v32u8 {
+pub fn lasx_xvssrlni_bu_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvssrlni_bu_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvssrlni_bu_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlni_hu_w<const IMM5: u32>(a: v16u16, b: v16i16) -> v16u16 {
+pub fn lasx_xvssrlni_hu_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvssrlni_hu_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvssrlni_hu_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlni_wu_d<const IMM6: u32>(a: v8u32, b: v8i32) -> v8u32 {
+pub fn lasx_xvssrlni_wu_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvssrlni_wu_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvssrlni_wu_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlni_du_q<const IMM7: u32>(a: v4u64, b: v4i64) -> v4u64 {
+pub fn lasx_xvssrlni_du_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvssrlni_du_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvssrlni_du_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrni_b_h<const IMM4: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvssrlrni_b_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvssrlrni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvssrlrni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrni_h_w<const IMM5: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvssrlrni_h_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvssrlrni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvssrlrni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrni_w_d<const IMM6: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvssrlrni_w_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvssrlrni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvssrlrni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrni_d_q<const IMM7: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvssrlrni_d_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvssrlrni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvssrlrni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrni_bu_h<const IMM4: u32>(a: v32u8, b: v32i8) -> v32u8 {
+pub fn lasx_xvssrlrni_bu_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvssrlrni_bu_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvssrlrni_bu_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrni_hu_w<const IMM5: u32>(a: v16u16, b: v16i16) -> v16u16 {
+pub fn lasx_xvssrlrni_hu_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvssrlrni_hu_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvssrlrni_hu_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrni_wu_d<const IMM6: u32>(a: v8u32, b: v8i32) -> v8u32 {
+pub fn lasx_xvssrlrni_wu_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvssrlrni_wu_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvssrlrni_wu_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrni_du_q<const IMM7: u32>(a: v4u64, b: v4i64) -> v4u64 {
+pub fn lasx_xvssrlrni_du_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvssrlrni_du_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvssrlrni_du_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrani_b_h<const IMM4: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvsrani_b_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsrani_b_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvsrani_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrani_h_w<const IMM5: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvsrani_h_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsrani_h_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvsrani_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrani_w_d<const IMM6: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvsrani_w_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsrani_w_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvsrani_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrani_d_q<const IMM7: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvsrani_d_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvsrani_d_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvsrani_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrarni_b_h<const IMM4: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvsrarni_b_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsrarni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvsrarni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrarni_h_w<const IMM5: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvsrarni_h_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsrarni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvsrarni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrarni_w_d<const IMM6: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvsrarni_w_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsrarni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvsrarni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrarni_d_q<const IMM7: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvsrarni_d_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvsrarni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvsrarni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrani_b_h<const IMM4: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvssrani_b_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvssrani_b_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvssrani_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrani_h_w<const IMM5: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvssrani_h_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvssrani_h_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvssrani_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrani_w_d<const IMM6: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvssrani_w_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvssrani_w_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvssrani_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrani_d_q<const IMM7: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvssrani_d_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvssrani_d_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvssrani_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrani_bu_h<const IMM4: u32>(a: v32u8, b: v32i8) -> v32u8 {
+pub fn lasx_xvssrani_bu_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvssrani_bu_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvssrani_bu_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrani_hu_w<const IMM5: u32>(a: v16u16, b: v16i16) -> v16u16 {
+pub fn lasx_xvssrani_hu_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvssrani_hu_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvssrani_hu_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrani_wu_d<const IMM6: u32>(a: v8u32, b: v8i32) -> v8u32 {
+pub fn lasx_xvssrani_wu_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvssrani_wu_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvssrani_wu_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrani_du_q<const IMM7: u32>(a: v4u64, b: v4i64) -> v4u64 {
+pub fn lasx_xvssrani_du_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvssrani_du_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvssrani_du_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarni_b_h<const IMM4: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvssrarni_b_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvssrarni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvssrarni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarni_h_w<const IMM5: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvssrarni_h_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvssrarni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvssrarni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarni_w_d<const IMM6: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvssrarni_w_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvssrarni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvssrarni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarni_d_q<const IMM7: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvssrarni_d_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvssrarni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvssrarni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarni_bu_h<const IMM4: u32>(a: v32u8, b: v32i8) -> v32u8 {
+pub fn lasx_xvssrarni_bu_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvssrarni_bu_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvssrarni_bu_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarni_hu_w<const IMM5: u32>(a: v16u16, b: v16i16) -> v16u16 {
+pub fn lasx_xvssrarni_hu_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvssrarni_hu_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvssrarni_hu_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarni_wu_d<const IMM6: u32>(a: v8u32, b: v8i32) -> v8u32 {
+pub fn lasx_xvssrarni_wu_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvssrarni_wu_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvssrarni_wu_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarni_du_q<const IMM7: u32>(a: v4u64, b: v4i64) -> v4u64 {
+pub fn lasx_xvssrarni_du_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvssrarni_du_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvssrarni_du_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbnz_b(a: v32u8) -> i32 {
-    unsafe { __lasx_xbnz_b(a) }
+pub fn lasx_xbnz_b(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbnz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbnz_d(a: v4u64) -> i32 {
-    unsafe { __lasx_xbnz_d(a) }
+pub fn lasx_xbnz_d(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbnz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbnz_h(a: v16u16) -> i32 {
-    unsafe { __lasx_xbnz_h(a) }
+pub fn lasx_xbnz_h(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbnz_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbnz_v(a: v32u8) -> i32 {
-    unsafe { __lasx_xbnz_v(a) }
+pub fn lasx_xbnz_v(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbnz_v(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbnz_w(a: v8u32) -> i32 {
-    unsafe { __lasx_xbnz_w(a) }
+pub fn lasx_xbnz_w(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbnz_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbz_b(a: v32u8) -> i32 {
-    unsafe { __lasx_xbz_b(a) }
+pub fn lasx_xbz_b(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbz_d(a: v4u64) -> i32 {
-    unsafe { __lasx_xbz_d(a) }
+pub fn lasx_xbz_d(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbz_h(a: v16u16) -> i32 {
-    unsafe { __lasx_xbz_h(a) }
+pub fn lasx_xbz_h(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbz_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbz_v(a: v32u8) -> i32 {
-    unsafe { __lasx_xbz_v(a) }
+pub fn lasx_xbz_v(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbz_v(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbz_w(a: v8u32) -> i32 {
-    unsafe { __lasx_xbz_w(a) }
+pub fn lasx_xbz_w(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbz_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_caf_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_caf_d(a, b) }
+pub fn lasx_xvfcmp_caf_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_caf_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_caf_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_caf_s(a, b) }
+pub fn lasx_xvfcmp_caf_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_caf_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_ceq_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_ceq_d(a, b) }
+pub fn lasx_xvfcmp_ceq_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_ceq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_ceq_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_ceq_s(a, b) }
+pub fn lasx_xvfcmp_ceq_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_ceq_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cle_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_cle_d(a, b) }
+pub fn lasx_xvfcmp_cle_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cle_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cle_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_cle_s(a, b) }
+pub fn lasx_xvfcmp_cle_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cle_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_clt_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_clt_d(a, b) }
+pub fn lasx_xvfcmp_clt_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_clt_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_clt_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_clt_s(a, b) }
+pub fn lasx_xvfcmp_clt_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_clt_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cne_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_cne_d(a, b) }
+pub fn lasx_xvfcmp_cne_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cne_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cne_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_cne_s(a, b) }
+pub fn lasx_xvfcmp_cne_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cne_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cor_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_cor_d(a, b) }
+pub fn lasx_xvfcmp_cor_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cor_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cor_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_cor_s(a, b) }
+pub fn lasx_xvfcmp_cor_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cor_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cueq_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_cueq_d(a, b) }
+pub fn lasx_xvfcmp_cueq_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cueq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cueq_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_cueq_s(a, b) }
+pub fn lasx_xvfcmp_cueq_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cueq_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cule_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_cule_d(a, b) }
+pub fn lasx_xvfcmp_cule_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cule_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cule_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_cule_s(a, b) }
+pub fn lasx_xvfcmp_cule_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cule_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cult_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_cult_d(a, b) }
+pub fn lasx_xvfcmp_cult_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cult_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cult_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_cult_s(a, b) }
+pub fn lasx_xvfcmp_cult_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cult_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cun_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_cun_d(a, b) }
+pub fn lasx_xvfcmp_cun_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cun_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cune_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_cune_d(a, b) }
+pub fn lasx_xvfcmp_cune_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cune_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cune_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_cune_s(a, b) }
+pub fn lasx_xvfcmp_cune_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cune_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cun_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_cun_s(a, b) }
+pub fn lasx_xvfcmp_cun_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cun_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_saf_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_saf_d(a, b) }
+pub fn lasx_xvfcmp_saf_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_saf_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_saf_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_saf_s(a, b) }
+pub fn lasx_xvfcmp_saf_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_saf_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_seq_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_seq_d(a, b) }
+pub fn lasx_xvfcmp_seq_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_seq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_seq_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_seq_s(a, b) }
+pub fn lasx_xvfcmp_seq_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_seq_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sle_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_sle_d(a, b) }
+pub fn lasx_xvfcmp_sle_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sle_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sle_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_sle_s(a, b) }
+pub fn lasx_xvfcmp_sle_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sle_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_slt_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_slt_d(a, b) }
+pub fn lasx_xvfcmp_slt_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_slt_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_slt_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_slt_s(a, b) }
+pub fn lasx_xvfcmp_slt_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_slt_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sne_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_sne_d(a, b) }
+pub fn lasx_xvfcmp_sne_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sne_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sne_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_sne_s(a, b) }
+pub fn lasx_xvfcmp_sne_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sne_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sor_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_sor_d(a, b) }
+pub fn lasx_xvfcmp_sor_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sor_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sor_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_sor_s(a, b) }
+pub fn lasx_xvfcmp_sor_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sor_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sueq_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_sueq_d(a, b) }
+pub fn lasx_xvfcmp_sueq_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sueq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sueq_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_sueq_s(a, b) }
+pub fn lasx_xvfcmp_sueq_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sueq_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sule_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_sule_d(a, b) }
+pub fn lasx_xvfcmp_sule_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sule_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sule_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_sule_s(a, b) }
+pub fn lasx_xvfcmp_sule_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sule_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sult_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_sult_d(a, b) }
+pub fn lasx_xvfcmp_sult_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sult_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sult_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_sult_s(a, b) }
+pub fn lasx_xvfcmp_sult_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sult_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sun_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_sun_d(a, b) }
+pub fn lasx_xvfcmp_sun_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sun_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sune_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_sune_d(a, b) }
+pub fn lasx_xvfcmp_sune_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sune_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sune_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_sune_s(a, b) }
+pub fn lasx_xvfcmp_sune_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sune_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sun_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_sun_s(a, b) }
+pub fn lasx_xvfcmp_sun_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sun_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickve_d_f<const IMM2: u32>(a: v4f64) -> v4f64 {
+pub fn lasx_xvpickve_d_f<const IMM2: u32>(a: m256d) -> m256d {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lasx_xvpickve_d_f(a, IMM2) }
+    unsafe { transmute(__lasx_xvpickve_d_f(transmute(a), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickve_w_f<const IMM3: u32>(a: v8f32) -> v8f32 {
+pub fn lasx_xvpickve_w_f<const IMM3: u32>(a: m256) -> m256 {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvpickve_w_f(a, IMM3) }
+    unsafe { transmute(__lasx_xvpickve_w_f(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrepli_b<const IMM_S10: i32>() -> v32i8 {
+pub fn lasx_xvrepli_b<const IMM_S10: i32>() -> m256i {
     static_assert_simm_bits!(IMM_S10, 10);
-    unsafe { __lasx_xvrepli_b(IMM_S10) }
+    unsafe { transmute(__lasx_xvrepli_b(IMM_S10)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrepli_d<const IMM_S10: i32>() -> v4i64 {
+pub fn lasx_xvrepli_d<const IMM_S10: i32>() -> m256i {
     static_assert_simm_bits!(IMM_S10, 10);
-    unsafe { __lasx_xvrepli_d(IMM_S10) }
+    unsafe { transmute(__lasx_xvrepli_d(IMM_S10)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrepli_h<const IMM_S10: i32>() -> v16i16 {
+pub fn lasx_xvrepli_h<const IMM_S10: i32>() -> m256i {
     static_assert_simm_bits!(IMM_S10, 10);
-    unsafe { __lasx_xvrepli_h(IMM_S10) }
+    unsafe { transmute(__lasx_xvrepli_h(IMM_S10)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrepli_w<const IMM_S10: i32>() -> v8i32 {
+pub fn lasx_xvrepli_w<const IMM_S10: i32>() -> m256i {
     static_assert_simm_bits!(IMM_S10, 10);
-    unsafe { __lasx_xvrepli_w(IMM_S10) }
+    unsafe { transmute(__lasx_xvrepli_w(IMM_S10)) }
 }
diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lasx/types.rs b/library/stdarch/crates/core_arch/src/loongarch64/lasx/types.rs
index 9611517e637..a8ceede8739 100644
--- a/library/stdarch/crates/core_arch/src/loongarch64/lasx/types.rs
+++ b/library/stdarch/crates/core_arch/src/loongarch64/lasx/types.rs
@@ -1,33 +1,140 @@
 types! {
     #![unstable(feature = "stdarch_loongarch", issue = "117427")]
 
-    /// LOONGARCH-specific 256-bit wide vector of 32 packed `i8`.
-    pub struct v32i8(32 x pub(crate) i8);
+    /// 256-bit wide integer vector type, LoongArch-specific
+    ///
+    /// This type is the same as the `__m256i` type defined in `lasxintrin.h`,
+    /// representing a 256-bit SIMD register. Usage of this type typically
+    /// occurs in conjunction with the `lasx` target features for LoongArch.
+    ///
+    /// Internally this type may be viewed as:
+    ///
+    /// * `i8x32` - thirty two `i8` values packed together
+    /// * `i16x16` - sixteen `i16` values packed together
+    /// * `i32x8` - eight `i32` values packed together
+    /// * `i64x4` - four `i64` values packed together
+    ///
+    /// (as well as unsigned versions). Each intrinsic may interpret the
+    /// internal bits differently, check the documentation of the intrinsic
+    /// to see how it's being used.
+    ///
+    /// The in-memory representation of this type is the same as the one of an
+    /// equivalent array (i.e. the in-memory order of elements is the same, and
+    /// there is no padding); however, the alignment is different and equal to
+    /// the size of the type. Note that the ABI for function calls may *not* be
+    /// the same.
+    ///
+    /// Note that this means that an instance of `m256i` typically just means
+    /// a "bag of bits" which is left up to interpretation at the point of use.
+    ///
+    /// Most intrinsics using `m256i` are prefixed with `lasx_` and the integer
+    /// types tend to correspond to suffixes like "b", "h", "w" or "d".
+    pub struct m256i(4 x i64);
 
-    /// LOONGARCH-specific 256-bit wide vector of 16 packed `i16`.
-    pub struct v16i16(16 x pub(crate) i16);
+    /// 256-bit wide set of eight `f32` values, LoongArch-specific
+    ///
+    /// This type is the same as the `__m256` type defined in `lasxintrin.h`,
+    /// representing a 256-bit SIMD register which internally consists of
+    /// eight packed `f32` instances. Usage of this type typically occurs in
+    /// conjunction with the `lasx` target features for LoongArch.
+    ///
+    /// Note that unlike `m256i`, the integer version of the 256-bit registers,
+    /// this `m256` type has *one* interpretation. Each instance of `m256`
+    /// always corresponds to `f32x8`, or eight `f32` values packed together.
+    ///
+    /// The in-memory representation of this type is the same as the one of an
+    /// equivalent array (i.e. the in-memory order of elements is the same, and
+    /// there is no padding  between two consecutive elements); however, the
+    /// alignment is different and equal to the size of the type. Note that the
+    /// ABI for function calls may *not* be the same.
+    ///
+    /// Most intrinsics using `m256` are prefixed with `lasx_` and are
+    /// suffixed with "s".
+    pub struct m256(8 x f32);
 
-    /// LOONGARCH-specific 256-bit wide vector of 8 packed `i32`.
-    pub struct v8i32(8 x pub(crate) i32);
+    /// 256-bit wide set of four `f64` values, LoongArch-specific
+    ///
+    /// This type is the same as the `__m256d` type defined in `lasxintrin.h`,
+    /// representing a 256-bit SIMD register which internally consists of
+    /// four packed `f64` instances. Usage of this type typically occurs in
+    /// conjunction with the `lasx` target features for LoongArch.
+    ///
+    /// Note that unlike `m256i`, the integer version of the 256-bit registers,
+    /// this `m256d` type has *one* interpretation. Each instance of `m256d`
+    /// always corresponds to `f64x4`, or four `f64` values packed together.
+    ///
+    /// The in-memory representation of this type is the same as the one of an
+    /// equivalent array (i.e. the in-memory order of elements is the same, and
+    /// there is no padding); however, the alignment is different and equal to
+    /// the size of the type. Note that the ABI for function calls may *not* be
+    /// the same.
+    ///
+    /// Most intrinsics using `m256d` are prefixed with `lasx_` and are suffixed
+    /// with "d". Not to be confused with "d" which is used for `m256i`.
+    pub struct m256d(4 x f64);
 
-    /// LOONGARCH-specific 256-bit wide vector of 4 packed `i64`.
-    pub struct v4i64(4 x pub(crate) i64);
-
-    /// LOONGARCH-specific 256-bit wide vector of 32 packed `u8`.
-    pub struct v32u8(32 x pub(crate) u8);
-
-    /// LOONGARCH-specific 256-bit wide vector of 16 packed `u16`.
-    pub struct v16u16(16 x pub(crate) u16);
-
-    /// LOONGARCH-specific 256-bit wide vector of 8 packed `u32`.
-    pub struct v8u32(8 x pub(crate) u32);
-
-    /// LOONGARCH-specific 256-bit wide vector of 4 packed `u64`.
-    pub struct v4u64(4 x pub(crate) u64);
+}
 
-    /// LOONGARCH-specific 128-bit wide vector of 8 packed `f32`.
-    pub struct v8f32(8 x pub(crate) f32);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v32i8([i8; 32]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v16i16([i16; 16]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v8i32([i32; 8]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v4i64([i64; 4]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v32u8([u8; 32]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v16u16([u16; 16]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v8u32([u32; 8]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v4u64([u64; 4]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v8f32([f32; 8]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v4f64([f64; 4]);
 
-    /// LOONGARCH-specific 256-bit wide vector of 4 packed `f64`.
-    pub struct v4f64(4 x pub(crate) f64);
-}
+// These type aliases are provided solely for transitional compatibility.
+// They are temporary and will be removed when appropriate.
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v32i8 = m256i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v16i16 = m256i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v8i32 = m256i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v4i64 = m256i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v32u8 = m256i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v16u16 = m256i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v8u32 = m256i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v4u64 = m256i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v8f32 = m256;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v4f64 = m256d;
diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs b/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs
index ba821a3e3dc..764e69ca054 100644
--- a/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs
+++ b/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs
@@ -6,6874 +6,6875 @@
 // OUT_DIR=`pwd`/crates/core_arch cargo run -p stdarch-gen-loongarch -- crates/stdarch-gen-loongarch/lsx.spec
 // ```
 
+use crate::mem::transmute;
 use super::types::*;
 
 #[allow(improper_ctypes)]
 unsafe extern "unadjusted" {
     #[link_name = "llvm.loongarch.lsx.vsll.b"]
-    fn __lsx_vsll_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vsll_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsll.h"]
-    fn __lsx_vsll_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vsll_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsll.w"]
-    fn __lsx_vsll_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vsll_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsll.d"]
-    fn __lsx_vsll_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsll_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vslli.b"]
-    fn __lsx_vslli_b(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vslli_b(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vslli.h"]
-    fn __lsx_vslli_h(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vslli_h(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vslli.w"]
-    fn __lsx_vslli_w(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vslli_w(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vslli.d"]
-    fn __lsx_vslli_d(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vslli_d(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsra.b"]
-    fn __lsx_vsra_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vsra_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsra.h"]
-    fn __lsx_vsra_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vsra_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsra.w"]
-    fn __lsx_vsra_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vsra_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsra.d"]
-    fn __lsx_vsra_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsra_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrai.b"]
-    fn __lsx_vsrai_b(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vsrai_b(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrai.h"]
-    fn __lsx_vsrai_h(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vsrai_h(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrai.w"]
-    fn __lsx_vsrai_w(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vsrai_w(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrai.d"]
-    fn __lsx_vsrai_d(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vsrai_d(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrar.b"]
-    fn __lsx_vsrar_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vsrar_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrar.h"]
-    fn __lsx_vsrar_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vsrar_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrar.w"]
-    fn __lsx_vsrar_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vsrar_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrar.d"]
-    fn __lsx_vsrar_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsrar_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrari.b"]
-    fn __lsx_vsrari_b(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vsrari_b(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrari.h"]
-    fn __lsx_vsrari_h(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vsrari_h(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrari.w"]
-    fn __lsx_vsrari_w(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vsrari_w(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrari.d"]
-    fn __lsx_vsrari_d(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vsrari_d(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrl.b"]
-    fn __lsx_vsrl_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vsrl_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrl.h"]
-    fn __lsx_vsrl_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vsrl_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrl.w"]
-    fn __lsx_vsrl_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vsrl_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrl.d"]
-    fn __lsx_vsrl_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsrl_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrli.b"]
-    fn __lsx_vsrli_b(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vsrli_b(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrli.h"]
-    fn __lsx_vsrli_h(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vsrli_h(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrli.w"]
-    fn __lsx_vsrli_w(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vsrli_w(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrli.d"]
-    fn __lsx_vsrli_d(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vsrli_d(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrlr.b"]
-    fn __lsx_vsrlr_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vsrlr_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrlr.h"]
-    fn __lsx_vsrlr_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vsrlr_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrlr.w"]
-    fn __lsx_vsrlr_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vsrlr_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrlr.d"]
-    fn __lsx_vsrlr_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsrlr_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrlri.b"]
-    fn __lsx_vsrlri_b(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vsrlri_b(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrlri.h"]
-    fn __lsx_vsrlri_h(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vsrlri_h(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrlri.w"]
-    fn __lsx_vsrlri_w(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vsrlri_w(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrlri.d"]
-    fn __lsx_vsrlri_d(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vsrlri_d(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vbitclr.b"]
-    fn __lsx_vbitclr_b(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vbitclr_b(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vbitclr.h"]
-    fn __lsx_vbitclr_h(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vbitclr_h(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vbitclr.w"]
-    fn __lsx_vbitclr_w(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vbitclr_w(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vbitclr.d"]
-    fn __lsx_vbitclr_d(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vbitclr_d(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vbitclri.b"]
-    fn __lsx_vbitclri_b(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vbitclri_b(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vbitclri.h"]
-    fn __lsx_vbitclri_h(a: v8u16, b: u32) -> v8u16;
+    fn __lsx_vbitclri_h(a: __v8u16, b: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vbitclri.w"]
-    fn __lsx_vbitclri_w(a: v4u32, b: u32) -> v4u32;
+    fn __lsx_vbitclri_w(a: __v4u32, b: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vbitclri.d"]
-    fn __lsx_vbitclri_d(a: v2u64, b: u32) -> v2u64;
+    fn __lsx_vbitclri_d(a: __v2u64, b: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vbitset.b"]
-    fn __lsx_vbitset_b(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vbitset_b(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vbitset.h"]
-    fn __lsx_vbitset_h(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vbitset_h(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vbitset.w"]
-    fn __lsx_vbitset_w(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vbitset_w(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vbitset.d"]
-    fn __lsx_vbitset_d(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vbitset_d(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vbitseti.b"]
-    fn __lsx_vbitseti_b(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vbitseti_b(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vbitseti.h"]
-    fn __lsx_vbitseti_h(a: v8u16, b: u32) -> v8u16;
+    fn __lsx_vbitseti_h(a: __v8u16, b: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vbitseti.w"]
-    fn __lsx_vbitseti_w(a: v4u32, b: u32) -> v4u32;
+    fn __lsx_vbitseti_w(a: __v4u32, b: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vbitseti.d"]
-    fn __lsx_vbitseti_d(a: v2u64, b: u32) -> v2u64;
+    fn __lsx_vbitseti_d(a: __v2u64, b: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vbitrev.b"]
-    fn __lsx_vbitrev_b(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vbitrev_b(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vbitrev.h"]
-    fn __lsx_vbitrev_h(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vbitrev_h(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vbitrev.w"]
-    fn __lsx_vbitrev_w(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vbitrev_w(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vbitrev.d"]
-    fn __lsx_vbitrev_d(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vbitrev_d(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vbitrevi.b"]
-    fn __lsx_vbitrevi_b(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vbitrevi_b(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vbitrevi.h"]
-    fn __lsx_vbitrevi_h(a: v8u16, b: u32) -> v8u16;
+    fn __lsx_vbitrevi_h(a: __v8u16, b: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vbitrevi.w"]
-    fn __lsx_vbitrevi_w(a: v4u32, b: u32) -> v4u32;
+    fn __lsx_vbitrevi_w(a: __v4u32, b: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vbitrevi.d"]
-    fn __lsx_vbitrevi_d(a: v2u64, b: u32) -> v2u64;
+    fn __lsx_vbitrevi_d(a: __v2u64, b: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vadd.b"]
-    fn __lsx_vadd_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vadd_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vadd.h"]
-    fn __lsx_vadd_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vadd_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vadd.w"]
-    fn __lsx_vadd_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vadd_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vadd.d"]
-    fn __lsx_vadd_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vadd_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddi.bu"]
-    fn __lsx_vaddi_bu(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vaddi_bu(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vaddi.hu"]
-    fn __lsx_vaddi_hu(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vaddi_hu(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vaddi.wu"]
-    fn __lsx_vaddi_wu(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vaddi_wu(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vaddi.du"]
-    fn __lsx_vaddi_du(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vaddi_du(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsub.b"]
-    fn __lsx_vsub_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vsub_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsub.h"]
-    fn __lsx_vsub_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vsub_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsub.w"]
-    fn __lsx_vsub_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vsub_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsub.d"]
-    fn __lsx_vsub_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsub_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsubi.bu"]
-    fn __lsx_vsubi_bu(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vsubi_bu(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsubi.hu"]
-    fn __lsx_vsubi_hu(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vsubi_hu(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsubi.wu"]
-    fn __lsx_vsubi_wu(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vsubi_wu(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsubi.du"]
-    fn __lsx_vsubi_du(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vsubi_du(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmax.b"]
-    fn __lsx_vmax_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vmax_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmax.h"]
-    fn __lsx_vmax_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vmax_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmax.w"]
-    fn __lsx_vmax_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vmax_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmax.d"]
-    fn __lsx_vmax_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vmax_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmaxi.b"]
-    fn __lsx_vmaxi_b(a: v16i8, b: i32) -> v16i8;
+    fn __lsx_vmaxi_b(a: __v16i8, b: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmaxi.h"]
-    fn __lsx_vmaxi_h(a: v8i16, b: i32) -> v8i16;
+    fn __lsx_vmaxi_h(a: __v8i16, b: i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmaxi.w"]
-    fn __lsx_vmaxi_w(a: v4i32, b: i32) -> v4i32;
+    fn __lsx_vmaxi_w(a: __v4i32, b: i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmaxi.d"]
-    fn __lsx_vmaxi_d(a: v2i64, b: i32) -> v2i64;
+    fn __lsx_vmaxi_d(a: __v2i64, b: i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmax.bu"]
-    fn __lsx_vmax_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vmax_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vmax.hu"]
-    fn __lsx_vmax_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vmax_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vmax.wu"]
-    fn __lsx_vmax_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vmax_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vmax.du"]
-    fn __lsx_vmax_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vmax_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vmaxi.bu"]
-    fn __lsx_vmaxi_bu(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vmaxi_bu(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vmaxi.hu"]
-    fn __lsx_vmaxi_hu(a: v8u16, b: u32) -> v8u16;
+    fn __lsx_vmaxi_hu(a: __v8u16, b: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vmaxi.wu"]
-    fn __lsx_vmaxi_wu(a: v4u32, b: u32) -> v4u32;
+    fn __lsx_vmaxi_wu(a: __v4u32, b: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vmaxi.du"]
-    fn __lsx_vmaxi_du(a: v2u64, b: u32) -> v2u64;
+    fn __lsx_vmaxi_du(a: __v2u64, b: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vmin.b"]
-    fn __lsx_vmin_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vmin_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmin.h"]
-    fn __lsx_vmin_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vmin_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmin.w"]
-    fn __lsx_vmin_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vmin_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmin.d"]
-    fn __lsx_vmin_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vmin_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmini.b"]
-    fn __lsx_vmini_b(a: v16i8, b: i32) -> v16i8;
+    fn __lsx_vmini_b(a: __v16i8, b: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmini.h"]
-    fn __lsx_vmini_h(a: v8i16, b: i32) -> v8i16;
+    fn __lsx_vmini_h(a: __v8i16, b: i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmini.w"]
-    fn __lsx_vmini_w(a: v4i32, b: i32) -> v4i32;
+    fn __lsx_vmini_w(a: __v4i32, b: i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmini.d"]
-    fn __lsx_vmini_d(a: v2i64, b: i32) -> v2i64;
+    fn __lsx_vmini_d(a: __v2i64, b: i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmin.bu"]
-    fn __lsx_vmin_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vmin_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vmin.hu"]
-    fn __lsx_vmin_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vmin_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vmin.wu"]
-    fn __lsx_vmin_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vmin_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vmin.du"]
-    fn __lsx_vmin_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vmin_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vmini.bu"]
-    fn __lsx_vmini_bu(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vmini_bu(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vmini.hu"]
-    fn __lsx_vmini_hu(a: v8u16, b: u32) -> v8u16;
+    fn __lsx_vmini_hu(a: __v8u16, b: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vmini.wu"]
-    fn __lsx_vmini_wu(a: v4u32, b: u32) -> v4u32;
+    fn __lsx_vmini_wu(a: __v4u32, b: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vmini.du"]
-    fn __lsx_vmini_du(a: v2u64, b: u32) -> v2u64;
+    fn __lsx_vmini_du(a: __v2u64, b: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vseq.b"]
-    fn __lsx_vseq_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vseq_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vseq.h"]
-    fn __lsx_vseq_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vseq_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vseq.w"]
-    fn __lsx_vseq_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vseq_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vseq.d"]
-    fn __lsx_vseq_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vseq_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vseqi.b"]
-    fn __lsx_vseqi_b(a: v16i8, b: i32) -> v16i8;
+    fn __lsx_vseqi_b(a: __v16i8, b: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vseqi.h"]
-    fn __lsx_vseqi_h(a: v8i16, b: i32) -> v8i16;
+    fn __lsx_vseqi_h(a: __v8i16, b: i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vseqi.w"]
-    fn __lsx_vseqi_w(a: v4i32, b: i32) -> v4i32;
+    fn __lsx_vseqi_w(a: __v4i32, b: i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vseqi.d"]
-    fn __lsx_vseqi_d(a: v2i64, b: i32) -> v2i64;
+    fn __lsx_vseqi_d(a: __v2i64, b: i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vslti.b"]
-    fn __lsx_vslti_b(a: v16i8, b: i32) -> v16i8;
+    fn __lsx_vslti_b(a: __v16i8, b: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vslt.b"]
-    fn __lsx_vslt_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vslt_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vslt.h"]
-    fn __lsx_vslt_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vslt_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vslt.w"]
-    fn __lsx_vslt_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vslt_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vslt.d"]
-    fn __lsx_vslt_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vslt_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vslti.h"]
-    fn __lsx_vslti_h(a: v8i16, b: i32) -> v8i16;
+    fn __lsx_vslti_h(a: __v8i16, b: i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vslti.w"]
-    fn __lsx_vslti_w(a: v4i32, b: i32) -> v4i32;
+    fn __lsx_vslti_w(a: __v4i32, b: i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vslti.d"]
-    fn __lsx_vslti_d(a: v2i64, b: i32) -> v2i64;
+    fn __lsx_vslti_d(a: __v2i64, b: i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vslt.bu"]
-    fn __lsx_vslt_bu(a: v16u8, b: v16u8) -> v16i8;
+    fn __lsx_vslt_bu(a: __v16u8, b: __v16u8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vslt.hu"]
-    fn __lsx_vslt_hu(a: v8u16, b: v8u16) -> v8i16;
+    fn __lsx_vslt_hu(a: __v8u16, b: __v8u16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vslt.wu"]
-    fn __lsx_vslt_wu(a: v4u32, b: v4u32) -> v4i32;
+    fn __lsx_vslt_wu(a: __v4u32, b: __v4u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vslt.du"]
-    fn __lsx_vslt_du(a: v2u64, b: v2u64) -> v2i64;
+    fn __lsx_vslt_du(a: __v2u64, b: __v2u64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vslti.bu"]
-    fn __lsx_vslti_bu(a: v16u8, b: u32) -> v16i8;
+    fn __lsx_vslti_bu(a: __v16u8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vslti.hu"]
-    fn __lsx_vslti_hu(a: v8u16, b: u32) -> v8i16;
+    fn __lsx_vslti_hu(a: __v8u16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vslti.wu"]
-    fn __lsx_vslti_wu(a: v4u32, b: u32) -> v4i32;
+    fn __lsx_vslti_wu(a: __v4u32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vslti.du"]
-    fn __lsx_vslti_du(a: v2u64, b: u32) -> v2i64;
+    fn __lsx_vslti_du(a: __v2u64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsle.b"]
-    fn __lsx_vsle_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vsle_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsle.h"]
-    fn __lsx_vsle_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vsle_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsle.w"]
-    fn __lsx_vsle_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vsle_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsle.d"]
-    fn __lsx_vsle_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsle_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vslei.b"]
-    fn __lsx_vslei_b(a: v16i8, b: i32) -> v16i8;
+    fn __lsx_vslei_b(a: __v16i8, b: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vslei.h"]
-    fn __lsx_vslei_h(a: v8i16, b: i32) -> v8i16;
+    fn __lsx_vslei_h(a: __v8i16, b: i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vslei.w"]
-    fn __lsx_vslei_w(a: v4i32, b: i32) -> v4i32;
+    fn __lsx_vslei_w(a: __v4i32, b: i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vslei.d"]
-    fn __lsx_vslei_d(a: v2i64, b: i32) -> v2i64;
+    fn __lsx_vslei_d(a: __v2i64, b: i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsle.bu"]
-    fn __lsx_vsle_bu(a: v16u8, b: v16u8) -> v16i8;
+    fn __lsx_vsle_bu(a: __v16u8, b: __v16u8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsle.hu"]
-    fn __lsx_vsle_hu(a: v8u16, b: v8u16) -> v8i16;
+    fn __lsx_vsle_hu(a: __v8u16, b: __v8u16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsle.wu"]
-    fn __lsx_vsle_wu(a: v4u32, b: v4u32) -> v4i32;
+    fn __lsx_vsle_wu(a: __v4u32, b: __v4u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsle.du"]
-    fn __lsx_vsle_du(a: v2u64, b: v2u64) -> v2i64;
+    fn __lsx_vsle_du(a: __v2u64, b: __v2u64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vslei.bu"]
-    fn __lsx_vslei_bu(a: v16u8, b: u32) -> v16i8;
+    fn __lsx_vslei_bu(a: __v16u8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vslei.hu"]
-    fn __lsx_vslei_hu(a: v8u16, b: u32) -> v8i16;
+    fn __lsx_vslei_hu(a: __v8u16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vslei.wu"]
-    fn __lsx_vslei_wu(a: v4u32, b: u32) -> v4i32;
+    fn __lsx_vslei_wu(a: __v4u32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vslei.du"]
-    fn __lsx_vslei_du(a: v2u64, b: u32) -> v2i64;
+    fn __lsx_vslei_du(a: __v2u64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsat.b"]
-    fn __lsx_vsat_b(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vsat_b(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsat.h"]
-    fn __lsx_vsat_h(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vsat_h(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsat.w"]
-    fn __lsx_vsat_w(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vsat_w(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsat.d"]
-    fn __lsx_vsat_d(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vsat_d(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsat.bu"]
-    fn __lsx_vsat_bu(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vsat_bu(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vsat.hu"]
-    fn __lsx_vsat_hu(a: v8u16, b: u32) -> v8u16;
+    fn __lsx_vsat_hu(a: __v8u16, b: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vsat.wu"]
-    fn __lsx_vsat_wu(a: v4u32, b: u32) -> v4u32;
+    fn __lsx_vsat_wu(a: __v4u32, b: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vsat.du"]
-    fn __lsx_vsat_du(a: v2u64, b: u32) -> v2u64;
+    fn __lsx_vsat_du(a: __v2u64, b: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vadda.b"]
-    fn __lsx_vadda_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vadda_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vadda.h"]
-    fn __lsx_vadda_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vadda_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vadda.w"]
-    fn __lsx_vadda_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vadda_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vadda.d"]
-    fn __lsx_vadda_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vadda_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsadd.b"]
-    fn __lsx_vsadd_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vsadd_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsadd.h"]
-    fn __lsx_vsadd_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vsadd_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsadd.w"]
-    fn __lsx_vsadd_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vsadd_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsadd.d"]
-    fn __lsx_vsadd_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsadd_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsadd.bu"]
-    fn __lsx_vsadd_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vsadd_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vsadd.hu"]
-    fn __lsx_vsadd_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vsadd_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vsadd.wu"]
-    fn __lsx_vsadd_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vsadd_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vsadd.du"]
-    fn __lsx_vsadd_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vsadd_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vavg.b"]
-    fn __lsx_vavg_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vavg_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vavg.h"]
-    fn __lsx_vavg_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vavg_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vavg.w"]
-    fn __lsx_vavg_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vavg_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vavg.d"]
-    fn __lsx_vavg_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vavg_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vavg.bu"]
-    fn __lsx_vavg_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vavg_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vavg.hu"]
-    fn __lsx_vavg_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vavg_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vavg.wu"]
-    fn __lsx_vavg_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vavg_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vavg.du"]
-    fn __lsx_vavg_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vavg_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vavgr.b"]
-    fn __lsx_vavgr_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vavgr_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vavgr.h"]
-    fn __lsx_vavgr_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vavgr_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vavgr.w"]
-    fn __lsx_vavgr_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vavgr_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vavgr.d"]
-    fn __lsx_vavgr_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vavgr_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vavgr.bu"]
-    fn __lsx_vavgr_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vavgr_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vavgr.hu"]
-    fn __lsx_vavgr_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vavgr_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vavgr.wu"]
-    fn __lsx_vavgr_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vavgr_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vavgr.du"]
-    fn __lsx_vavgr_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vavgr_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vssub.b"]
-    fn __lsx_vssub_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vssub_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vssub.h"]
-    fn __lsx_vssub_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vssub_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vssub.w"]
-    fn __lsx_vssub_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vssub_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssub.d"]
-    fn __lsx_vssub_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vssub_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vssub.bu"]
-    fn __lsx_vssub_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vssub_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vssub.hu"]
-    fn __lsx_vssub_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vssub_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vssub.wu"]
-    fn __lsx_vssub_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vssub_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vssub.du"]
-    fn __lsx_vssub_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vssub_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vabsd.b"]
-    fn __lsx_vabsd_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vabsd_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vabsd.h"]
-    fn __lsx_vabsd_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vabsd_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vabsd.w"]
-    fn __lsx_vabsd_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vabsd_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vabsd.d"]
-    fn __lsx_vabsd_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vabsd_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vabsd.bu"]
-    fn __lsx_vabsd_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vabsd_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vabsd.hu"]
-    fn __lsx_vabsd_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vabsd_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vabsd.wu"]
-    fn __lsx_vabsd_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vabsd_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vabsd.du"]
-    fn __lsx_vabsd_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vabsd_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vmul.b"]
-    fn __lsx_vmul_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vmul_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmul.h"]
-    fn __lsx_vmul_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vmul_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmul.w"]
-    fn __lsx_vmul_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vmul_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmul.d"]
-    fn __lsx_vmul_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vmul_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmadd.b"]
-    fn __lsx_vmadd_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8;
+    fn __lsx_vmadd_b(a: __v16i8, b: __v16i8, c: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmadd.h"]
-    fn __lsx_vmadd_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16;
+    fn __lsx_vmadd_h(a: __v8i16, b: __v8i16, c: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmadd.w"]
-    fn __lsx_vmadd_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32;
+    fn __lsx_vmadd_w(a: __v4i32, b: __v4i32, c: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmadd.d"]
-    fn __lsx_vmadd_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64;
+    fn __lsx_vmadd_d(a: __v2i64, b: __v2i64, c: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmsub.b"]
-    fn __lsx_vmsub_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8;
+    fn __lsx_vmsub_b(a: __v16i8, b: __v16i8, c: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmsub.h"]
-    fn __lsx_vmsub_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16;
+    fn __lsx_vmsub_h(a: __v8i16, b: __v8i16, c: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmsub.w"]
-    fn __lsx_vmsub_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32;
+    fn __lsx_vmsub_w(a: __v4i32, b: __v4i32, c: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmsub.d"]
-    fn __lsx_vmsub_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64;
+    fn __lsx_vmsub_d(a: __v2i64, b: __v2i64, c: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vdiv.b"]
-    fn __lsx_vdiv_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vdiv_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vdiv.h"]
-    fn __lsx_vdiv_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vdiv_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vdiv.w"]
-    fn __lsx_vdiv_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vdiv_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vdiv.d"]
-    fn __lsx_vdiv_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vdiv_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vdiv.bu"]
-    fn __lsx_vdiv_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vdiv_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vdiv.hu"]
-    fn __lsx_vdiv_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vdiv_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vdiv.wu"]
-    fn __lsx_vdiv_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vdiv_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vdiv.du"]
-    fn __lsx_vdiv_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vdiv_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vhaddw.h.b"]
-    fn __lsx_vhaddw_h_b(a: v16i8, b: v16i8) -> v8i16;
+    fn __lsx_vhaddw_h_b(a: __v16i8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vhaddw.w.h"]
-    fn __lsx_vhaddw_w_h(a: v8i16, b: v8i16) -> v4i32;
+    fn __lsx_vhaddw_w_h(a: __v8i16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vhaddw.d.w"]
-    fn __lsx_vhaddw_d_w(a: v4i32, b: v4i32) -> v2i64;
+    fn __lsx_vhaddw_d_w(a: __v4i32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vhaddw.hu.bu"]
-    fn __lsx_vhaddw_hu_bu(a: v16u8, b: v16u8) -> v8u16;
+    fn __lsx_vhaddw_hu_bu(a: __v16u8, b: __v16u8) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vhaddw.wu.hu"]
-    fn __lsx_vhaddw_wu_hu(a: v8u16, b: v8u16) -> v4u32;
+    fn __lsx_vhaddw_wu_hu(a: __v8u16, b: __v8u16) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vhaddw.du.wu"]
-    fn __lsx_vhaddw_du_wu(a: v4u32, b: v4u32) -> v2u64;
+    fn __lsx_vhaddw_du_wu(a: __v4u32, b: __v4u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vhsubw.h.b"]
-    fn __lsx_vhsubw_h_b(a: v16i8, b: v16i8) -> v8i16;
+    fn __lsx_vhsubw_h_b(a: __v16i8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vhsubw.w.h"]
-    fn __lsx_vhsubw_w_h(a: v8i16, b: v8i16) -> v4i32;
+    fn __lsx_vhsubw_w_h(a: __v8i16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vhsubw.d.w"]
-    fn __lsx_vhsubw_d_w(a: v4i32, b: v4i32) -> v2i64;
+    fn __lsx_vhsubw_d_w(a: __v4i32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vhsubw.hu.bu"]
-    fn __lsx_vhsubw_hu_bu(a: v16u8, b: v16u8) -> v8i16;
+    fn __lsx_vhsubw_hu_bu(a: __v16u8, b: __v16u8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vhsubw.wu.hu"]
-    fn __lsx_vhsubw_wu_hu(a: v8u16, b: v8u16) -> v4i32;
+    fn __lsx_vhsubw_wu_hu(a: __v8u16, b: __v8u16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vhsubw.du.wu"]
-    fn __lsx_vhsubw_du_wu(a: v4u32, b: v4u32) -> v2i64;
+    fn __lsx_vhsubw_du_wu(a: __v4u32, b: __v4u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmod.b"]
-    fn __lsx_vmod_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vmod_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmod.h"]
-    fn __lsx_vmod_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vmod_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmod.w"]
-    fn __lsx_vmod_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vmod_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmod.d"]
-    fn __lsx_vmod_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vmod_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmod.bu"]
-    fn __lsx_vmod_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vmod_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vmod.hu"]
-    fn __lsx_vmod_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vmod_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vmod.wu"]
-    fn __lsx_vmod_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vmod_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vmod.du"]
-    fn __lsx_vmod_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vmod_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vreplve.b"]
-    fn __lsx_vreplve_b(a: v16i8, b: i32) -> v16i8;
+    fn __lsx_vreplve_b(a: __v16i8, b: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vreplve.h"]
-    fn __lsx_vreplve_h(a: v8i16, b: i32) -> v8i16;
+    fn __lsx_vreplve_h(a: __v8i16, b: i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vreplve.w"]
-    fn __lsx_vreplve_w(a: v4i32, b: i32) -> v4i32;
+    fn __lsx_vreplve_w(a: __v4i32, b: i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vreplve.d"]
-    fn __lsx_vreplve_d(a: v2i64, b: i32) -> v2i64;
+    fn __lsx_vreplve_d(a: __v2i64, b: i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vreplvei.b"]
-    fn __lsx_vreplvei_b(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vreplvei_b(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vreplvei.h"]
-    fn __lsx_vreplvei_h(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vreplvei_h(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vreplvei.w"]
-    fn __lsx_vreplvei_w(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vreplvei_w(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vreplvei.d"]
-    fn __lsx_vreplvei_d(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vreplvei_d(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vpickev.b"]
-    fn __lsx_vpickev_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vpickev_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vpickev.h"]
-    fn __lsx_vpickev_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vpickev_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vpickev.w"]
-    fn __lsx_vpickev_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vpickev_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vpickev.d"]
-    fn __lsx_vpickev_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vpickev_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vpickod.b"]
-    fn __lsx_vpickod_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vpickod_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vpickod.h"]
-    fn __lsx_vpickod_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vpickod_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vpickod.w"]
-    fn __lsx_vpickod_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vpickod_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vpickod.d"]
-    fn __lsx_vpickod_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vpickod_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vilvh.b"]
-    fn __lsx_vilvh_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vilvh_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vilvh.h"]
-    fn __lsx_vilvh_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vilvh_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vilvh.w"]
-    fn __lsx_vilvh_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vilvh_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vilvh.d"]
-    fn __lsx_vilvh_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vilvh_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vilvl.b"]
-    fn __lsx_vilvl_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vilvl_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vilvl.h"]
-    fn __lsx_vilvl_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vilvl_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vilvl.w"]
-    fn __lsx_vilvl_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vilvl_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vilvl.d"]
-    fn __lsx_vilvl_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vilvl_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vpackev.b"]
-    fn __lsx_vpackev_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vpackev_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vpackev.h"]
-    fn __lsx_vpackev_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vpackev_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vpackev.w"]
-    fn __lsx_vpackev_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vpackev_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vpackev.d"]
-    fn __lsx_vpackev_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vpackev_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vpackod.b"]
-    fn __lsx_vpackod_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vpackod_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vpackod.h"]
-    fn __lsx_vpackod_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vpackod_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vpackod.w"]
-    fn __lsx_vpackod_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vpackod_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vpackod.d"]
-    fn __lsx_vpackod_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vpackod_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vshuf.h"]
-    fn __lsx_vshuf_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16;
+    fn __lsx_vshuf_h(a: __v8i16, b: __v8i16, c: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vshuf.w"]
-    fn __lsx_vshuf_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32;
+    fn __lsx_vshuf_w(a: __v4i32, b: __v4i32, c: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vshuf.d"]
-    fn __lsx_vshuf_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64;
+    fn __lsx_vshuf_d(a: __v2i64, b: __v2i64, c: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vand.v"]
-    fn __lsx_vand_v(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vand_v(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vandi.b"]
-    fn __lsx_vandi_b(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vandi_b(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vor.v"]
-    fn __lsx_vor_v(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vor_v(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vori.b"]
-    fn __lsx_vori_b(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vori_b(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vnor.v"]
-    fn __lsx_vnor_v(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vnor_v(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vnori.b"]
-    fn __lsx_vnori_b(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vnori_b(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vxor.v"]
-    fn __lsx_vxor_v(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vxor_v(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vxori.b"]
-    fn __lsx_vxori_b(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vxori_b(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vbitsel.v"]
-    fn __lsx_vbitsel_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8;
+    fn __lsx_vbitsel_v(a: __v16u8, b: __v16u8, c: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vbitseli.b"]
-    fn __lsx_vbitseli_b(a: v16u8, b: v16u8, c: u32) -> v16u8;
+    fn __lsx_vbitseli_b(a: __v16u8, b: __v16u8, c: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vshuf4i.b"]
-    fn __lsx_vshuf4i_b(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vshuf4i_b(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vshuf4i.h"]
-    fn __lsx_vshuf4i_h(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vshuf4i_h(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vshuf4i.w"]
-    fn __lsx_vshuf4i_w(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vshuf4i_w(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vreplgr2vr.b"]
-    fn __lsx_vreplgr2vr_b(a: i32) -> v16i8;
+    fn __lsx_vreplgr2vr_b(a: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vreplgr2vr.h"]
-    fn __lsx_vreplgr2vr_h(a: i32) -> v8i16;
+    fn __lsx_vreplgr2vr_h(a: i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vreplgr2vr.w"]
-    fn __lsx_vreplgr2vr_w(a: i32) -> v4i32;
+    fn __lsx_vreplgr2vr_w(a: i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vreplgr2vr.d"]
-    fn __lsx_vreplgr2vr_d(a: i64) -> v2i64;
+    fn __lsx_vreplgr2vr_d(a: i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vpcnt.b"]
-    fn __lsx_vpcnt_b(a: v16i8) -> v16i8;
+    fn __lsx_vpcnt_b(a: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vpcnt.h"]
-    fn __lsx_vpcnt_h(a: v8i16) -> v8i16;
+    fn __lsx_vpcnt_h(a: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vpcnt.w"]
-    fn __lsx_vpcnt_w(a: v4i32) -> v4i32;
+    fn __lsx_vpcnt_w(a: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vpcnt.d"]
-    fn __lsx_vpcnt_d(a: v2i64) -> v2i64;
+    fn __lsx_vpcnt_d(a: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vclo.b"]
-    fn __lsx_vclo_b(a: v16i8) -> v16i8;
+    fn __lsx_vclo_b(a: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vclo.h"]
-    fn __lsx_vclo_h(a: v8i16) -> v8i16;
+    fn __lsx_vclo_h(a: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vclo.w"]
-    fn __lsx_vclo_w(a: v4i32) -> v4i32;
+    fn __lsx_vclo_w(a: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vclo.d"]
-    fn __lsx_vclo_d(a: v2i64) -> v2i64;
+    fn __lsx_vclo_d(a: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vclz.b"]
-    fn __lsx_vclz_b(a: v16i8) -> v16i8;
+    fn __lsx_vclz_b(a: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vclz.h"]
-    fn __lsx_vclz_h(a: v8i16) -> v8i16;
+    fn __lsx_vclz_h(a: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vclz.w"]
-    fn __lsx_vclz_w(a: v4i32) -> v4i32;
+    fn __lsx_vclz_w(a: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vclz.d"]
-    fn __lsx_vclz_d(a: v2i64) -> v2i64;
+    fn __lsx_vclz_d(a: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vpickve2gr.b"]
-    fn __lsx_vpickve2gr_b(a: v16i8, b: u32) -> i32;
+    fn __lsx_vpickve2gr_b(a: __v16i8, b: u32) -> i32;
     #[link_name = "llvm.loongarch.lsx.vpickve2gr.h"]
-    fn __lsx_vpickve2gr_h(a: v8i16, b: u32) -> i32;
+    fn __lsx_vpickve2gr_h(a: __v8i16, b: u32) -> i32;
     #[link_name = "llvm.loongarch.lsx.vpickve2gr.w"]
-    fn __lsx_vpickve2gr_w(a: v4i32, b: u32) -> i32;
+    fn __lsx_vpickve2gr_w(a: __v4i32, b: u32) -> i32;
     #[link_name = "llvm.loongarch.lsx.vpickve2gr.d"]
-    fn __lsx_vpickve2gr_d(a: v2i64, b: u32) -> i64;
+    fn __lsx_vpickve2gr_d(a: __v2i64, b: u32) -> i64;
     #[link_name = "llvm.loongarch.lsx.vpickve2gr.bu"]
-    fn __lsx_vpickve2gr_bu(a: v16i8, b: u32) -> u32;
+    fn __lsx_vpickve2gr_bu(a: __v16i8, b: u32) -> u32;
     #[link_name = "llvm.loongarch.lsx.vpickve2gr.hu"]
-    fn __lsx_vpickve2gr_hu(a: v8i16, b: u32) -> u32;
+    fn __lsx_vpickve2gr_hu(a: __v8i16, b: u32) -> u32;
     #[link_name = "llvm.loongarch.lsx.vpickve2gr.wu"]
-    fn __lsx_vpickve2gr_wu(a: v4i32, b: u32) -> u32;
+    fn __lsx_vpickve2gr_wu(a: __v4i32, b: u32) -> u32;
     #[link_name = "llvm.loongarch.lsx.vpickve2gr.du"]
-    fn __lsx_vpickve2gr_du(a: v2i64, b: u32) -> u64;
+    fn __lsx_vpickve2gr_du(a: __v2i64, b: u32) -> u64;
     #[link_name = "llvm.loongarch.lsx.vinsgr2vr.b"]
-    fn __lsx_vinsgr2vr_b(a: v16i8, b: i32, c: u32) -> v16i8;
+    fn __lsx_vinsgr2vr_b(a: __v16i8, b: i32, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vinsgr2vr.h"]
-    fn __lsx_vinsgr2vr_h(a: v8i16, b: i32, c: u32) -> v8i16;
+    fn __lsx_vinsgr2vr_h(a: __v8i16, b: i32, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vinsgr2vr.w"]
-    fn __lsx_vinsgr2vr_w(a: v4i32, b: i32, c: u32) -> v4i32;
+    fn __lsx_vinsgr2vr_w(a: __v4i32, b: i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vinsgr2vr.d"]
-    fn __lsx_vinsgr2vr_d(a: v2i64, b: i64, c: u32) -> v2i64;
+    fn __lsx_vinsgr2vr_d(a: __v2i64, b: i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfadd.s"]
-    fn __lsx_vfadd_s(a: v4f32, b: v4f32) -> v4f32;
+    fn __lsx_vfadd_s(a: __v4f32, b: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfadd.d"]
-    fn __lsx_vfadd_d(a: v2f64, b: v2f64) -> v2f64;
+    fn __lsx_vfadd_d(a: __v2f64, b: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfsub.s"]
-    fn __lsx_vfsub_s(a: v4f32, b: v4f32) -> v4f32;
+    fn __lsx_vfsub_s(a: __v4f32, b: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfsub.d"]
-    fn __lsx_vfsub_d(a: v2f64, b: v2f64) -> v2f64;
+    fn __lsx_vfsub_d(a: __v2f64, b: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfmul.s"]
-    fn __lsx_vfmul_s(a: v4f32, b: v4f32) -> v4f32;
+    fn __lsx_vfmul_s(a: __v4f32, b: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfmul.d"]
-    fn __lsx_vfmul_d(a: v2f64, b: v2f64) -> v2f64;
+    fn __lsx_vfmul_d(a: __v2f64, b: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfdiv.s"]
-    fn __lsx_vfdiv_s(a: v4f32, b: v4f32) -> v4f32;
+    fn __lsx_vfdiv_s(a: __v4f32, b: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfdiv.d"]
-    fn __lsx_vfdiv_d(a: v2f64, b: v2f64) -> v2f64;
+    fn __lsx_vfdiv_d(a: __v2f64, b: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfcvt.h.s"]
-    fn __lsx_vfcvt_h_s(a: v4f32, b: v4f32) -> v8i16;
+    fn __lsx_vfcvt_h_s(a: __v4f32, b: __v4f32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vfcvt.s.d"]
-    fn __lsx_vfcvt_s_d(a: v2f64, b: v2f64) -> v4f32;
+    fn __lsx_vfcvt_s_d(a: __v2f64, b: __v2f64) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfmin.s"]
-    fn __lsx_vfmin_s(a: v4f32, b: v4f32) -> v4f32;
+    fn __lsx_vfmin_s(a: __v4f32, b: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfmin.d"]
-    fn __lsx_vfmin_d(a: v2f64, b: v2f64) -> v2f64;
+    fn __lsx_vfmin_d(a: __v2f64, b: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfmina.s"]
-    fn __lsx_vfmina_s(a: v4f32, b: v4f32) -> v4f32;
+    fn __lsx_vfmina_s(a: __v4f32, b: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfmina.d"]
-    fn __lsx_vfmina_d(a: v2f64, b: v2f64) -> v2f64;
+    fn __lsx_vfmina_d(a: __v2f64, b: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfmax.s"]
-    fn __lsx_vfmax_s(a: v4f32, b: v4f32) -> v4f32;
+    fn __lsx_vfmax_s(a: __v4f32, b: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfmax.d"]
-    fn __lsx_vfmax_d(a: v2f64, b: v2f64) -> v2f64;
+    fn __lsx_vfmax_d(a: __v2f64, b: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfmaxa.s"]
-    fn __lsx_vfmaxa_s(a: v4f32, b: v4f32) -> v4f32;
+    fn __lsx_vfmaxa_s(a: __v4f32, b: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfmaxa.d"]
-    fn __lsx_vfmaxa_d(a: v2f64, b: v2f64) -> v2f64;
+    fn __lsx_vfmaxa_d(a: __v2f64, b: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfclass.s"]
-    fn __lsx_vfclass_s(a: v4f32) -> v4i32;
+    fn __lsx_vfclass_s(a: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfclass.d"]
-    fn __lsx_vfclass_d(a: v2f64) -> v2i64;
+    fn __lsx_vfclass_d(a: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfsqrt.s"]
-    fn __lsx_vfsqrt_s(a: v4f32) -> v4f32;
+    fn __lsx_vfsqrt_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfsqrt.d"]
-    fn __lsx_vfsqrt_d(a: v2f64) -> v2f64;
+    fn __lsx_vfsqrt_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfrecip.s"]
-    fn __lsx_vfrecip_s(a: v4f32) -> v4f32;
+    fn __lsx_vfrecip_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfrecip.d"]
-    fn __lsx_vfrecip_d(a: v2f64) -> v2f64;
+    fn __lsx_vfrecip_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfrecipe.s"]
-    fn __lsx_vfrecipe_s(a: v4f32) -> v4f32;
+    fn __lsx_vfrecipe_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfrecipe.d"]
-    fn __lsx_vfrecipe_d(a: v2f64) -> v2f64;
+    fn __lsx_vfrecipe_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfrsqrte.s"]
-    fn __lsx_vfrsqrte_s(a: v4f32) -> v4f32;
+    fn __lsx_vfrsqrte_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfrsqrte.d"]
-    fn __lsx_vfrsqrte_d(a: v2f64) -> v2f64;
+    fn __lsx_vfrsqrte_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfrint.s"]
-    fn __lsx_vfrint_s(a: v4f32) -> v4f32;
+    fn __lsx_vfrint_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfrint.d"]
-    fn __lsx_vfrint_d(a: v2f64) -> v2f64;
+    fn __lsx_vfrint_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfrsqrt.s"]
-    fn __lsx_vfrsqrt_s(a: v4f32) -> v4f32;
+    fn __lsx_vfrsqrt_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfrsqrt.d"]
-    fn __lsx_vfrsqrt_d(a: v2f64) -> v2f64;
+    fn __lsx_vfrsqrt_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vflogb.s"]
-    fn __lsx_vflogb_s(a: v4f32) -> v4f32;
+    fn __lsx_vflogb_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vflogb.d"]
-    fn __lsx_vflogb_d(a: v2f64) -> v2f64;
+    fn __lsx_vflogb_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfcvth.s.h"]
-    fn __lsx_vfcvth_s_h(a: v8i16) -> v4f32;
+    fn __lsx_vfcvth_s_h(a: __v8i16) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfcvth.d.s"]
-    fn __lsx_vfcvth_d_s(a: v4f32) -> v2f64;
+    fn __lsx_vfcvth_d_s(a: __v4f32) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfcvtl.s.h"]
-    fn __lsx_vfcvtl_s_h(a: v8i16) -> v4f32;
+    fn __lsx_vfcvtl_s_h(a: __v8i16) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfcvtl.d.s"]
-    fn __lsx_vfcvtl_d_s(a: v4f32) -> v2f64;
+    fn __lsx_vfcvtl_d_s(a: __v4f32) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vftint.w.s"]
-    fn __lsx_vftint_w_s(a: v4f32) -> v4i32;
+    fn __lsx_vftint_w_s(a: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vftint.l.d"]
-    fn __lsx_vftint_l_d(a: v2f64) -> v2i64;
+    fn __lsx_vftint_l_d(a: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftint.wu.s"]
-    fn __lsx_vftint_wu_s(a: v4f32) -> v4u32;
+    fn __lsx_vftint_wu_s(a: __v4f32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vftint.lu.d"]
-    fn __lsx_vftint_lu_d(a: v2f64) -> v2u64;
+    fn __lsx_vftint_lu_d(a: __v2f64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vftintrz.w.s"]
-    fn __lsx_vftintrz_w_s(a: v4f32) -> v4i32;
+    fn __lsx_vftintrz_w_s(a: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vftintrz.l.d"]
-    fn __lsx_vftintrz_l_d(a: v2f64) -> v2i64;
+    fn __lsx_vftintrz_l_d(a: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrz.wu.s"]
-    fn __lsx_vftintrz_wu_s(a: v4f32) -> v4u32;
+    fn __lsx_vftintrz_wu_s(a: __v4f32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vftintrz.lu.d"]
-    fn __lsx_vftintrz_lu_d(a: v2f64) -> v2u64;
+    fn __lsx_vftintrz_lu_d(a: __v2f64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vffint.s.w"]
-    fn __lsx_vffint_s_w(a: v4i32) -> v4f32;
+    fn __lsx_vffint_s_w(a: __v4i32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vffint.d.l"]
-    fn __lsx_vffint_d_l(a: v2i64) -> v2f64;
+    fn __lsx_vffint_d_l(a: __v2i64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vffint.s.wu"]
-    fn __lsx_vffint_s_wu(a: v4u32) -> v4f32;
+    fn __lsx_vffint_s_wu(a: __v4u32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vffint.d.lu"]
-    fn __lsx_vffint_d_lu(a: v2u64) -> v2f64;
+    fn __lsx_vffint_d_lu(a: __v2u64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vandn.v"]
-    fn __lsx_vandn_v(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vandn_v(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vneg.b"]
-    fn __lsx_vneg_b(a: v16i8) -> v16i8;
+    fn __lsx_vneg_b(a: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vneg.h"]
-    fn __lsx_vneg_h(a: v8i16) -> v8i16;
+    fn __lsx_vneg_h(a: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vneg.w"]
-    fn __lsx_vneg_w(a: v4i32) -> v4i32;
+    fn __lsx_vneg_w(a: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vneg.d"]
-    fn __lsx_vneg_d(a: v2i64) -> v2i64;
+    fn __lsx_vneg_d(a: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmuh.b"]
-    fn __lsx_vmuh_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vmuh_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmuh.h"]
-    fn __lsx_vmuh_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vmuh_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmuh.w"]
-    fn __lsx_vmuh_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vmuh_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmuh.d"]
-    fn __lsx_vmuh_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vmuh_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmuh.bu"]
-    fn __lsx_vmuh_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vmuh_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vmuh.hu"]
-    fn __lsx_vmuh_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vmuh_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vmuh.wu"]
-    fn __lsx_vmuh_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vmuh_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vmuh.du"]
-    fn __lsx_vmuh_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vmuh_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vsllwil.h.b"]
-    fn __lsx_vsllwil_h_b(a: v16i8, b: u32) -> v8i16;
+    fn __lsx_vsllwil_h_b(a: __v16i8, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsllwil.w.h"]
-    fn __lsx_vsllwil_w_h(a: v8i16, b: u32) -> v4i32;
+    fn __lsx_vsllwil_w_h(a: __v8i16, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsllwil.d.w"]
-    fn __lsx_vsllwil_d_w(a: v4i32, b: u32) -> v2i64;
+    fn __lsx_vsllwil_d_w(a: __v4i32, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsllwil.hu.bu"]
-    fn __lsx_vsllwil_hu_bu(a: v16u8, b: u32) -> v8u16;
+    fn __lsx_vsllwil_hu_bu(a: __v16u8, b: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vsllwil.wu.hu"]
-    fn __lsx_vsllwil_wu_hu(a: v8u16, b: u32) -> v4u32;
+    fn __lsx_vsllwil_wu_hu(a: __v8u16, b: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vsllwil.du.wu"]
-    fn __lsx_vsllwil_du_wu(a: v4u32, b: u32) -> v2u64;
+    fn __lsx_vsllwil_du_wu(a: __v4u32, b: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vsran.b.h"]
-    fn __lsx_vsran_b_h(a: v8i16, b: v8i16) -> v16i8;
+    fn __lsx_vsran_b_h(a: __v8i16, b: __v8i16) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsran.h.w"]
-    fn __lsx_vsran_h_w(a: v4i32, b: v4i32) -> v8i16;
+    fn __lsx_vsran_h_w(a: __v4i32, b: __v4i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsran.w.d"]
-    fn __lsx_vsran_w_d(a: v2i64, b: v2i64) -> v4i32;
+    fn __lsx_vsran_w_d(a: __v2i64, b: __v2i64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssran.b.h"]
-    fn __lsx_vssran_b_h(a: v8i16, b: v8i16) -> v16i8;
+    fn __lsx_vssran_b_h(a: __v8i16, b: __v8i16) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vssran.h.w"]
-    fn __lsx_vssran_h_w(a: v4i32, b: v4i32) -> v8i16;
+    fn __lsx_vssran_h_w(a: __v4i32, b: __v4i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vssran.w.d"]
-    fn __lsx_vssran_w_d(a: v2i64, b: v2i64) -> v4i32;
+    fn __lsx_vssran_w_d(a: __v2i64, b: __v2i64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssran.bu.h"]
-    fn __lsx_vssran_bu_h(a: v8u16, b: v8u16) -> v16u8;
+    fn __lsx_vssran_bu_h(a: __v8u16, b: __v8u16) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vssran.hu.w"]
-    fn __lsx_vssran_hu_w(a: v4u32, b: v4u32) -> v8u16;
+    fn __lsx_vssran_hu_w(a: __v4u32, b: __v4u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vssran.wu.d"]
-    fn __lsx_vssran_wu_d(a: v2u64, b: v2u64) -> v4u32;
+    fn __lsx_vssran_wu_d(a: __v2u64, b: __v2u64) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vsrarn.b.h"]
-    fn __lsx_vsrarn_b_h(a: v8i16, b: v8i16) -> v16i8;
+    fn __lsx_vsrarn_b_h(a: __v8i16, b: __v8i16) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrarn.h.w"]
-    fn __lsx_vsrarn_h_w(a: v4i32, b: v4i32) -> v8i16;
+    fn __lsx_vsrarn_h_w(a: __v4i32, b: __v4i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrarn.w.d"]
-    fn __lsx_vsrarn_w_d(a: v2i64, b: v2i64) -> v4i32;
+    fn __lsx_vsrarn_w_d(a: __v2i64, b: __v2i64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssrarn.b.h"]
-    fn __lsx_vssrarn_b_h(a: v8i16, b: v8i16) -> v16i8;
+    fn __lsx_vssrarn_b_h(a: __v8i16, b: __v8i16) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vssrarn.h.w"]
-    fn __lsx_vssrarn_h_w(a: v4i32, b: v4i32) -> v8i16;
+    fn __lsx_vssrarn_h_w(a: __v4i32, b: __v4i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vssrarn.w.d"]
-    fn __lsx_vssrarn_w_d(a: v2i64, b: v2i64) -> v4i32;
+    fn __lsx_vssrarn_w_d(a: __v2i64, b: __v2i64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssrarn.bu.h"]
-    fn __lsx_vssrarn_bu_h(a: v8u16, b: v8u16) -> v16u8;
+    fn __lsx_vssrarn_bu_h(a: __v8u16, b: __v8u16) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vssrarn.hu.w"]
-    fn __lsx_vssrarn_hu_w(a: v4u32, b: v4u32) -> v8u16;
+    fn __lsx_vssrarn_hu_w(a: __v4u32, b: __v4u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vssrarn.wu.d"]
-    fn __lsx_vssrarn_wu_d(a: v2u64, b: v2u64) -> v4u32;
+    fn __lsx_vssrarn_wu_d(a: __v2u64, b: __v2u64) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vsrln.b.h"]
-    fn __lsx_vsrln_b_h(a: v8i16, b: v8i16) -> v16i8;
+    fn __lsx_vsrln_b_h(a: __v8i16, b: __v8i16) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrln.h.w"]
-    fn __lsx_vsrln_h_w(a: v4i32, b: v4i32) -> v8i16;
+    fn __lsx_vsrln_h_w(a: __v4i32, b: __v4i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrln.w.d"]
-    fn __lsx_vsrln_w_d(a: v2i64, b: v2i64) -> v4i32;
+    fn __lsx_vsrln_w_d(a: __v2i64, b: __v2i64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssrln.bu.h"]
-    fn __lsx_vssrln_bu_h(a: v8u16, b: v8u16) -> v16u8;
+    fn __lsx_vssrln_bu_h(a: __v8u16, b: __v8u16) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vssrln.hu.w"]
-    fn __lsx_vssrln_hu_w(a: v4u32, b: v4u32) -> v8u16;
+    fn __lsx_vssrln_hu_w(a: __v4u32, b: __v4u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vssrln.wu.d"]
-    fn __lsx_vssrln_wu_d(a: v2u64, b: v2u64) -> v4u32;
+    fn __lsx_vssrln_wu_d(a: __v2u64, b: __v2u64) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vsrlrn.b.h"]
-    fn __lsx_vsrlrn_b_h(a: v8i16, b: v8i16) -> v16i8;
+    fn __lsx_vsrlrn_b_h(a: __v8i16, b: __v8i16) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrlrn.h.w"]
-    fn __lsx_vsrlrn_h_w(a: v4i32, b: v4i32) -> v8i16;
+    fn __lsx_vsrlrn_h_w(a: __v4i32, b: __v4i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrlrn.w.d"]
-    fn __lsx_vsrlrn_w_d(a: v2i64, b: v2i64) -> v4i32;
+    fn __lsx_vsrlrn_w_d(a: __v2i64, b: __v2i64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssrlrn.bu.h"]
-    fn __lsx_vssrlrn_bu_h(a: v8u16, b: v8u16) -> v16u8;
+    fn __lsx_vssrlrn_bu_h(a: __v8u16, b: __v8u16) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vssrlrn.hu.w"]
-    fn __lsx_vssrlrn_hu_w(a: v4u32, b: v4u32) -> v8u16;
+    fn __lsx_vssrlrn_hu_w(a: __v4u32, b: __v4u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vssrlrn.wu.d"]
-    fn __lsx_vssrlrn_wu_d(a: v2u64, b: v2u64) -> v4u32;
+    fn __lsx_vssrlrn_wu_d(a: __v2u64, b: __v2u64) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vfrstpi.b"]
-    fn __lsx_vfrstpi_b(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vfrstpi_b(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vfrstpi.h"]
-    fn __lsx_vfrstpi_h(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vfrstpi_h(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vfrstp.b"]
-    fn __lsx_vfrstp_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8;
+    fn __lsx_vfrstp_b(a: __v16i8, b: __v16i8, c: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vfrstp.h"]
-    fn __lsx_vfrstp_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16;
+    fn __lsx_vfrstp_h(a: __v8i16, b: __v8i16, c: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vshuf4i.d"]
-    fn __lsx_vshuf4i_d(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vshuf4i_d(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vbsrl.v"]
-    fn __lsx_vbsrl_v(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vbsrl_v(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vbsll.v"]
-    fn __lsx_vbsll_v(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vbsll_v(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vextrins.b"]
-    fn __lsx_vextrins_b(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vextrins_b(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vextrins.h"]
-    fn __lsx_vextrins_h(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vextrins_h(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vextrins.w"]
-    fn __lsx_vextrins_w(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vextrins_w(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vextrins.d"]
-    fn __lsx_vextrins_d(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vextrins_d(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmskltz.b"]
-    fn __lsx_vmskltz_b(a: v16i8) -> v16i8;
+    fn __lsx_vmskltz_b(a: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmskltz.h"]
-    fn __lsx_vmskltz_h(a: v8i16) -> v8i16;
+    fn __lsx_vmskltz_h(a: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmskltz.w"]
-    fn __lsx_vmskltz_w(a: v4i32) -> v4i32;
+    fn __lsx_vmskltz_w(a: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmskltz.d"]
-    fn __lsx_vmskltz_d(a: v2i64) -> v2i64;
+    fn __lsx_vmskltz_d(a: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsigncov.b"]
-    fn __lsx_vsigncov_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vsigncov_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsigncov.h"]
-    fn __lsx_vsigncov_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vsigncov_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsigncov.w"]
-    fn __lsx_vsigncov_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vsigncov_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsigncov.d"]
-    fn __lsx_vsigncov_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsigncov_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfmadd.s"]
-    fn __lsx_vfmadd_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32;
+    fn __lsx_vfmadd_s(a: __v4f32, b: __v4f32, c: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfmadd.d"]
-    fn __lsx_vfmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64;
+    fn __lsx_vfmadd_d(a: __v2f64, b: __v2f64, c: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfmsub.s"]
-    fn __lsx_vfmsub_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32;
+    fn __lsx_vfmsub_s(a: __v4f32, b: __v4f32, c: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfmsub.d"]
-    fn __lsx_vfmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64;
+    fn __lsx_vfmsub_d(a: __v2f64, b: __v2f64, c: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfnmadd.s"]
-    fn __lsx_vfnmadd_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32;
+    fn __lsx_vfnmadd_s(a: __v4f32, b: __v4f32, c: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfnmadd.d"]
-    fn __lsx_vfnmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64;
+    fn __lsx_vfnmadd_d(a: __v2f64, b: __v2f64, c: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfnmsub.s"]
-    fn __lsx_vfnmsub_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32;
+    fn __lsx_vfnmsub_s(a: __v4f32, b: __v4f32, c: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfnmsub.d"]
-    fn __lsx_vfnmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64;
+    fn __lsx_vfnmsub_d(a: __v2f64, b: __v2f64, c: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vftintrne.w.s"]
-    fn __lsx_vftintrne_w_s(a: v4f32) -> v4i32;
+    fn __lsx_vftintrne_w_s(a: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vftintrne.l.d"]
-    fn __lsx_vftintrne_l_d(a: v2f64) -> v2i64;
+    fn __lsx_vftintrne_l_d(a: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrp.w.s"]
-    fn __lsx_vftintrp_w_s(a: v4f32) -> v4i32;
+    fn __lsx_vftintrp_w_s(a: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vftintrp.l.d"]
-    fn __lsx_vftintrp_l_d(a: v2f64) -> v2i64;
+    fn __lsx_vftintrp_l_d(a: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrm.w.s"]
-    fn __lsx_vftintrm_w_s(a: v4f32) -> v4i32;
+    fn __lsx_vftintrm_w_s(a: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vftintrm.l.d"]
-    fn __lsx_vftintrm_l_d(a: v2f64) -> v2i64;
+    fn __lsx_vftintrm_l_d(a: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftint.w.d"]
-    fn __lsx_vftint_w_d(a: v2f64, b: v2f64) -> v4i32;
+    fn __lsx_vftint_w_d(a: __v2f64, b: __v2f64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vffint.s.l"]
-    fn __lsx_vffint_s_l(a: v2i64, b: v2i64) -> v4f32;
+    fn __lsx_vffint_s_l(a: __v2i64, b: __v2i64) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vftintrz.w.d"]
-    fn __lsx_vftintrz_w_d(a: v2f64, b: v2f64) -> v4i32;
+    fn __lsx_vftintrz_w_d(a: __v2f64, b: __v2f64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vftintrp.w.d"]
-    fn __lsx_vftintrp_w_d(a: v2f64, b: v2f64) -> v4i32;
+    fn __lsx_vftintrp_w_d(a: __v2f64, b: __v2f64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vftintrm.w.d"]
-    fn __lsx_vftintrm_w_d(a: v2f64, b: v2f64) -> v4i32;
+    fn __lsx_vftintrm_w_d(a: __v2f64, b: __v2f64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vftintrne.w.d"]
-    fn __lsx_vftintrne_w_d(a: v2f64, b: v2f64) -> v4i32;
+    fn __lsx_vftintrne_w_d(a: __v2f64, b: __v2f64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vftintl.l.s"]
-    fn __lsx_vftintl_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftintl_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftinth.l.s"]
-    fn __lsx_vftinth_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftinth_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vffinth.d.w"]
-    fn __lsx_vffinth_d_w(a: v4i32) -> v2f64;
+    fn __lsx_vffinth_d_w(a: __v4i32) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vffintl.d.w"]
-    fn __lsx_vffintl_d_w(a: v4i32) -> v2f64;
+    fn __lsx_vffintl_d_w(a: __v4i32) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vftintrzl.l.s"]
-    fn __lsx_vftintrzl_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftintrzl_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrzh.l.s"]
-    fn __lsx_vftintrzh_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftintrzh_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrpl.l.s"]
-    fn __lsx_vftintrpl_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftintrpl_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrph.l.s"]
-    fn __lsx_vftintrph_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftintrph_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrml.l.s"]
-    fn __lsx_vftintrml_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftintrml_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrmh.l.s"]
-    fn __lsx_vftintrmh_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftintrmh_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrnel.l.s"]
-    fn __lsx_vftintrnel_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftintrnel_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrneh.l.s"]
-    fn __lsx_vftintrneh_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftintrneh_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfrintrne.s"]
-    fn __lsx_vfrintrne_s(a: v4f32) -> v4f32;
+    fn __lsx_vfrintrne_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfrintrne.d"]
-    fn __lsx_vfrintrne_d(a: v2f64) -> v2f64;
+    fn __lsx_vfrintrne_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfrintrz.s"]
-    fn __lsx_vfrintrz_s(a: v4f32) -> v4f32;
+    fn __lsx_vfrintrz_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfrintrz.d"]
-    fn __lsx_vfrintrz_d(a: v2f64) -> v2f64;
+    fn __lsx_vfrintrz_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfrintrp.s"]
-    fn __lsx_vfrintrp_s(a: v4f32) -> v4f32;
+    fn __lsx_vfrintrp_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfrintrp.d"]
-    fn __lsx_vfrintrp_d(a: v2f64) -> v2f64;
+    fn __lsx_vfrintrp_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfrintrm.s"]
-    fn __lsx_vfrintrm_s(a: v4f32) -> v4f32;
+    fn __lsx_vfrintrm_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfrintrm.d"]
-    fn __lsx_vfrintrm_d(a: v2f64) -> v2f64;
+    fn __lsx_vfrintrm_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vstelm.b"]
-    fn __lsx_vstelm_b(a: v16i8, b: *mut i8, c: i32, d: u32);
+    fn __lsx_vstelm_b(a: __v16i8, b: *mut i8, c: i32, d: u32);
     #[link_name = "llvm.loongarch.lsx.vstelm.h"]
-    fn __lsx_vstelm_h(a: v8i16, b: *mut i8, c: i32, d: u32);
+    fn __lsx_vstelm_h(a: __v8i16, b: *mut i8, c: i32, d: u32);
     #[link_name = "llvm.loongarch.lsx.vstelm.w"]
-    fn __lsx_vstelm_w(a: v4i32, b: *mut i8, c: i32, d: u32);
+    fn __lsx_vstelm_w(a: __v4i32, b: *mut i8, c: i32, d: u32);
     #[link_name = "llvm.loongarch.lsx.vstelm.d"]
-    fn __lsx_vstelm_d(a: v2i64, b: *mut i8, c: i32, d: u32);
+    fn __lsx_vstelm_d(a: __v2i64, b: *mut i8, c: i32, d: u32);
     #[link_name = "llvm.loongarch.lsx.vaddwev.d.w"]
-    fn __lsx_vaddwev_d_w(a: v4i32, b: v4i32) -> v2i64;
+    fn __lsx_vaddwev_d_w(a: __v4i32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwev.w.h"]
-    fn __lsx_vaddwev_w_h(a: v8i16, b: v8i16) -> v4i32;
+    fn __lsx_vaddwev_w_h(a: __v8i16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vaddwev.h.b"]
-    fn __lsx_vaddwev_h_b(a: v16i8, b: v16i8) -> v8i16;
+    fn __lsx_vaddwev_h_b(a: __v16i8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vaddwod.d.w"]
-    fn __lsx_vaddwod_d_w(a: v4i32, b: v4i32) -> v2i64;
+    fn __lsx_vaddwod_d_w(a: __v4i32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwod.w.h"]
-    fn __lsx_vaddwod_w_h(a: v8i16, b: v8i16) -> v4i32;
+    fn __lsx_vaddwod_w_h(a: __v8i16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vaddwod.h.b"]
-    fn __lsx_vaddwod_h_b(a: v16i8, b: v16i8) -> v8i16;
+    fn __lsx_vaddwod_h_b(a: __v16i8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vaddwev.d.wu"]
-    fn __lsx_vaddwev_d_wu(a: v4u32, b: v4u32) -> v2i64;
+    fn __lsx_vaddwev_d_wu(a: __v4u32, b: __v4u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwev.w.hu"]
-    fn __lsx_vaddwev_w_hu(a: v8u16, b: v8u16) -> v4i32;
+    fn __lsx_vaddwev_w_hu(a: __v8u16, b: __v8u16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vaddwev.h.bu"]
-    fn __lsx_vaddwev_h_bu(a: v16u8, b: v16u8) -> v8i16;
+    fn __lsx_vaddwev_h_bu(a: __v16u8, b: __v16u8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vaddwod.d.wu"]
-    fn __lsx_vaddwod_d_wu(a: v4u32, b: v4u32) -> v2i64;
+    fn __lsx_vaddwod_d_wu(a: __v4u32, b: __v4u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwod.w.hu"]
-    fn __lsx_vaddwod_w_hu(a: v8u16, b: v8u16) -> v4i32;
+    fn __lsx_vaddwod_w_hu(a: __v8u16, b: __v8u16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vaddwod.h.bu"]
-    fn __lsx_vaddwod_h_bu(a: v16u8, b: v16u8) -> v8i16;
+    fn __lsx_vaddwod_h_bu(a: __v16u8, b: __v16u8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vaddwev.d.wu.w"]
-    fn __lsx_vaddwev_d_wu_w(a: v4u32, b: v4i32) -> v2i64;
+    fn __lsx_vaddwev_d_wu_w(a: __v4u32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwev.w.hu.h"]
-    fn __lsx_vaddwev_w_hu_h(a: v8u16, b: v8i16) -> v4i32;
+    fn __lsx_vaddwev_w_hu_h(a: __v8u16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vaddwev.h.bu.b"]
-    fn __lsx_vaddwev_h_bu_b(a: v16u8, b: v16i8) -> v8i16;
+    fn __lsx_vaddwev_h_bu_b(a: __v16u8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vaddwod.d.wu.w"]
-    fn __lsx_vaddwod_d_wu_w(a: v4u32, b: v4i32) -> v2i64;
+    fn __lsx_vaddwod_d_wu_w(a: __v4u32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwod.w.hu.h"]
-    fn __lsx_vaddwod_w_hu_h(a: v8u16, b: v8i16) -> v4i32;
+    fn __lsx_vaddwod_w_hu_h(a: __v8u16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vaddwod.h.bu.b"]
-    fn __lsx_vaddwod_h_bu_b(a: v16u8, b: v16i8) -> v8i16;
+    fn __lsx_vaddwod_h_bu_b(a: __v16u8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsubwev.d.w"]
-    fn __lsx_vsubwev_d_w(a: v4i32, b: v4i32) -> v2i64;
+    fn __lsx_vsubwev_d_w(a: __v4i32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsubwev.w.h"]
-    fn __lsx_vsubwev_w_h(a: v8i16, b: v8i16) -> v4i32;
+    fn __lsx_vsubwev_w_h(a: __v8i16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsubwev.h.b"]
-    fn __lsx_vsubwev_h_b(a: v16i8, b: v16i8) -> v8i16;
+    fn __lsx_vsubwev_h_b(a: __v16i8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsubwod.d.w"]
-    fn __lsx_vsubwod_d_w(a: v4i32, b: v4i32) -> v2i64;
+    fn __lsx_vsubwod_d_w(a: __v4i32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsubwod.w.h"]
-    fn __lsx_vsubwod_w_h(a: v8i16, b: v8i16) -> v4i32;
+    fn __lsx_vsubwod_w_h(a: __v8i16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsubwod.h.b"]
-    fn __lsx_vsubwod_h_b(a: v16i8, b: v16i8) -> v8i16;
+    fn __lsx_vsubwod_h_b(a: __v16i8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsubwev.d.wu"]
-    fn __lsx_vsubwev_d_wu(a: v4u32, b: v4u32) -> v2i64;
+    fn __lsx_vsubwev_d_wu(a: __v4u32, b: __v4u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsubwev.w.hu"]
-    fn __lsx_vsubwev_w_hu(a: v8u16, b: v8u16) -> v4i32;
+    fn __lsx_vsubwev_w_hu(a: __v8u16, b: __v8u16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsubwev.h.bu"]
-    fn __lsx_vsubwev_h_bu(a: v16u8, b: v16u8) -> v8i16;
+    fn __lsx_vsubwev_h_bu(a: __v16u8, b: __v16u8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsubwod.d.wu"]
-    fn __lsx_vsubwod_d_wu(a: v4u32, b: v4u32) -> v2i64;
+    fn __lsx_vsubwod_d_wu(a: __v4u32, b: __v4u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsubwod.w.hu"]
-    fn __lsx_vsubwod_w_hu(a: v8u16, b: v8u16) -> v4i32;
+    fn __lsx_vsubwod_w_hu(a: __v8u16, b: __v8u16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsubwod.h.bu"]
-    fn __lsx_vsubwod_h_bu(a: v16u8, b: v16u8) -> v8i16;
+    fn __lsx_vsubwod_h_bu(a: __v16u8, b: __v16u8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vaddwev.q.d"]
-    fn __lsx_vaddwev_q_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vaddwev_q_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwod.q.d"]
-    fn __lsx_vaddwod_q_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vaddwod_q_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwev.q.du"]
-    fn __lsx_vaddwev_q_du(a: v2u64, b: v2u64) -> v2i64;
+    fn __lsx_vaddwev_q_du(a: __v2u64, b: __v2u64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwod.q.du"]
-    fn __lsx_vaddwod_q_du(a: v2u64, b: v2u64) -> v2i64;
+    fn __lsx_vaddwod_q_du(a: __v2u64, b: __v2u64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsubwev.q.d"]
-    fn __lsx_vsubwev_q_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsubwev_q_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsubwod.q.d"]
-    fn __lsx_vsubwod_q_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsubwod_q_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsubwev.q.du"]
-    fn __lsx_vsubwev_q_du(a: v2u64, b: v2u64) -> v2i64;
+    fn __lsx_vsubwev_q_du(a: __v2u64, b: __v2u64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsubwod.q.du"]
-    fn __lsx_vsubwod_q_du(a: v2u64, b: v2u64) -> v2i64;
+    fn __lsx_vsubwod_q_du(a: __v2u64, b: __v2u64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwev.q.du.d"]
-    fn __lsx_vaddwev_q_du_d(a: v2u64, b: v2i64) -> v2i64;
+    fn __lsx_vaddwev_q_du_d(a: __v2u64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwod.q.du.d"]
-    fn __lsx_vaddwod_q_du_d(a: v2u64, b: v2i64) -> v2i64;
+    fn __lsx_vaddwod_q_du_d(a: __v2u64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwev.d.w"]
-    fn __lsx_vmulwev_d_w(a: v4i32, b: v4i32) -> v2i64;
+    fn __lsx_vmulwev_d_w(a: __v4i32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwev.w.h"]
-    fn __lsx_vmulwev_w_h(a: v8i16, b: v8i16) -> v4i32;
+    fn __lsx_vmulwev_w_h(a: __v8i16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmulwev.h.b"]
-    fn __lsx_vmulwev_h_b(a: v16i8, b: v16i8) -> v8i16;
+    fn __lsx_vmulwev_h_b(a: __v16i8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmulwod.d.w"]
-    fn __lsx_vmulwod_d_w(a: v4i32, b: v4i32) -> v2i64;
+    fn __lsx_vmulwod_d_w(a: __v4i32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwod.w.h"]
-    fn __lsx_vmulwod_w_h(a: v8i16, b: v8i16) -> v4i32;
+    fn __lsx_vmulwod_w_h(a: __v8i16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmulwod.h.b"]
-    fn __lsx_vmulwod_h_b(a: v16i8, b: v16i8) -> v8i16;
+    fn __lsx_vmulwod_h_b(a: __v16i8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmulwev.d.wu"]
-    fn __lsx_vmulwev_d_wu(a: v4u32, b: v4u32) -> v2i64;
+    fn __lsx_vmulwev_d_wu(a: __v4u32, b: __v4u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwev.w.hu"]
-    fn __lsx_vmulwev_w_hu(a: v8u16, b: v8u16) -> v4i32;
+    fn __lsx_vmulwev_w_hu(a: __v8u16, b: __v8u16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmulwev.h.bu"]
-    fn __lsx_vmulwev_h_bu(a: v16u8, b: v16u8) -> v8i16;
+    fn __lsx_vmulwev_h_bu(a: __v16u8, b: __v16u8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmulwod.d.wu"]
-    fn __lsx_vmulwod_d_wu(a: v4u32, b: v4u32) -> v2i64;
+    fn __lsx_vmulwod_d_wu(a: __v4u32, b: __v4u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwod.w.hu"]
-    fn __lsx_vmulwod_w_hu(a: v8u16, b: v8u16) -> v4i32;
+    fn __lsx_vmulwod_w_hu(a: __v8u16, b: __v8u16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmulwod.h.bu"]
-    fn __lsx_vmulwod_h_bu(a: v16u8, b: v16u8) -> v8i16;
+    fn __lsx_vmulwod_h_bu(a: __v16u8, b: __v16u8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmulwev.d.wu.w"]
-    fn __lsx_vmulwev_d_wu_w(a: v4u32, b: v4i32) -> v2i64;
+    fn __lsx_vmulwev_d_wu_w(a: __v4u32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwev.w.hu.h"]
-    fn __lsx_vmulwev_w_hu_h(a: v8u16, b: v8i16) -> v4i32;
+    fn __lsx_vmulwev_w_hu_h(a: __v8u16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmulwev.h.bu.b"]
-    fn __lsx_vmulwev_h_bu_b(a: v16u8, b: v16i8) -> v8i16;
+    fn __lsx_vmulwev_h_bu_b(a: __v16u8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmulwod.d.wu.w"]
-    fn __lsx_vmulwod_d_wu_w(a: v4u32, b: v4i32) -> v2i64;
+    fn __lsx_vmulwod_d_wu_w(a: __v4u32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwod.w.hu.h"]
-    fn __lsx_vmulwod_w_hu_h(a: v8u16, b: v8i16) -> v4i32;
+    fn __lsx_vmulwod_w_hu_h(a: __v8u16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmulwod.h.bu.b"]
-    fn __lsx_vmulwod_h_bu_b(a: v16u8, b: v16i8) -> v8i16;
+    fn __lsx_vmulwod_h_bu_b(a: __v16u8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmulwev.q.d"]
-    fn __lsx_vmulwev_q_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vmulwev_q_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwod.q.d"]
-    fn __lsx_vmulwod_q_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vmulwod_q_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwev.q.du"]
-    fn __lsx_vmulwev_q_du(a: v2u64, b: v2u64) -> v2i64;
+    fn __lsx_vmulwev_q_du(a: __v2u64, b: __v2u64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwod.q.du"]
-    fn __lsx_vmulwod_q_du(a: v2u64, b: v2u64) -> v2i64;
+    fn __lsx_vmulwod_q_du(a: __v2u64, b: __v2u64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwev.q.du.d"]
-    fn __lsx_vmulwev_q_du_d(a: v2u64, b: v2i64) -> v2i64;
+    fn __lsx_vmulwev_q_du_d(a: __v2u64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwod.q.du.d"]
-    fn __lsx_vmulwod_q_du_d(a: v2u64, b: v2i64) -> v2i64;
+    fn __lsx_vmulwod_q_du_d(a: __v2u64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vhaddw.q.d"]
-    fn __lsx_vhaddw_q_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vhaddw_q_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vhaddw.qu.du"]
-    fn __lsx_vhaddw_qu_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vhaddw_qu_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vhsubw.q.d"]
-    fn __lsx_vhsubw_q_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vhsubw_q_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vhsubw.qu.du"]
-    fn __lsx_vhsubw_qu_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vhsubw_qu_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.d.w"]
-    fn __lsx_vmaddwev_d_w(a: v2i64, b: v4i32, c: v4i32) -> v2i64;
+    fn __lsx_vmaddwev_d_w(a: __v2i64, b: __v4i32, c: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.w.h"]
-    fn __lsx_vmaddwev_w_h(a: v4i32, b: v8i16, c: v8i16) -> v4i32;
+    fn __lsx_vmaddwev_w_h(a: __v4i32, b: __v8i16, c: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.h.b"]
-    fn __lsx_vmaddwev_h_b(a: v8i16, b: v16i8, c: v16i8) -> v8i16;
+    fn __lsx_vmaddwev_h_b(a: __v8i16, b: __v16i8, c: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.d.wu"]
-    fn __lsx_vmaddwev_d_wu(a: v2u64, b: v4u32, c: v4u32) -> v2u64;
+    fn __lsx_vmaddwev_d_wu(a: __v2u64, b: __v4u32, c: __v4u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.w.hu"]
-    fn __lsx_vmaddwev_w_hu(a: v4u32, b: v8u16, c: v8u16) -> v4u32;
+    fn __lsx_vmaddwev_w_hu(a: __v4u32, b: __v8u16, c: __v8u16) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.h.bu"]
-    fn __lsx_vmaddwev_h_bu(a: v8u16, b: v16u8, c: v16u8) -> v8u16;
+    fn __lsx_vmaddwev_h_bu(a: __v8u16, b: __v16u8, c: __v16u8) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.d.w"]
-    fn __lsx_vmaddwod_d_w(a: v2i64, b: v4i32, c: v4i32) -> v2i64;
+    fn __lsx_vmaddwod_d_w(a: __v2i64, b: __v4i32, c: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.w.h"]
-    fn __lsx_vmaddwod_w_h(a: v4i32, b: v8i16, c: v8i16) -> v4i32;
+    fn __lsx_vmaddwod_w_h(a: __v4i32, b: __v8i16, c: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.h.b"]
-    fn __lsx_vmaddwod_h_b(a: v8i16, b: v16i8, c: v16i8) -> v8i16;
+    fn __lsx_vmaddwod_h_b(a: __v8i16, b: __v16i8, c: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.d.wu"]
-    fn __lsx_vmaddwod_d_wu(a: v2u64, b: v4u32, c: v4u32) -> v2u64;
+    fn __lsx_vmaddwod_d_wu(a: __v2u64, b: __v4u32, c: __v4u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.w.hu"]
-    fn __lsx_vmaddwod_w_hu(a: v4u32, b: v8u16, c: v8u16) -> v4u32;
+    fn __lsx_vmaddwod_w_hu(a: __v4u32, b: __v8u16, c: __v8u16) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.h.bu"]
-    fn __lsx_vmaddwod_h_bu(a: v8u16, b: v16u8, c: v16u8) -> v8u16;
+    fn __lsx_vmaddwod_h_bu(a: __v8u16, b: __v16u8, c: __v16u8) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.d.wu.w"]
-    fn __lsx_vmaddwev_d_wu_w(a: v2i64, b: v4u32, c: v4i32) -> v2i64;
+    fn __lsx_vmaddwev_d_wu_w(a: __v2i64, b: __v4u32, c: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.w.hu.h"]
-    fn __lsx_vmaddwev_w_hu_h(a: v4i32, b: v8u16, c: v8i16) -> v4i32;
+    fn __lsx_vmaddwev_w_hu_h(a: __v4i32, b: __v8u16, c: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.h.bu.b"]
-    fn __lsx_vmaddwev_h_bu_b(a: v8i16, b: v16u8, c: v16i8) -> v8i16;
+    fn __lsx_vmaddwev_h_bu_b(a: __v8i16, b: __v16u8, c: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.d.wu.w"]
-    fn __lsx_vmaddwod_d_wu_w(a: v2i64, b: v4u32, c: v4i32) -> v2i64;
+    fn __lsx_vmaddwod_d_wu_w(a: __v2i64, b: __v4u32, c: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.w.hu.h"]
-    fn __lsx_vmaddwod_w_hu_h(a: v4i32, b: v8u16, c: v8i16) -> v4i32;
+    fn __lsx_vmaddwod_w_hu_h(a: __v4i32, b: __v8u16, c: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.h.bu.b"]
-    fn __lsx_vmaddwod_h_bu_b(a: v8i16, b: v16u8, c: v16i8) -> v8i16;
+    fn __lsx_vmaddwod_h_bu_b(a: __v8i16, b: __v16u8, c: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.q.d"]
-    fn __lsx_vmaddwev_q_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64;
+    fn __lsx_vmaddwev_q_d(a: __v2i64, b: __v2i64, c: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.q.d"]
-    fn __lsx_vmaddwod_q_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64;
+    fn __lsx_vmaddwod_q_d(a: __v2i64, b: __v2i64, c: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.q.du"]
-    fn __lsx_vmaddwev_q_du(a: v2u64, b: v2u64, c: v2u64) -> v2u64;
+    fn __lsx_vmaddwev_q_du(a: __v2u64, b: __v2u64, c: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.q.du"]
-    fn __lsx_vmaddwod_q_du(a: v2u64, b: v2u64, c: v2u64) -> v2u64;
+    fn __lsx_vmaddwod_q_du(a: __v2u64, b: __v2u64, c: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.q.du.d"]
-    fn __lsx_vmaddwev_q_du_d(a: v2i64, b: v2u64, c: v2i64) -> v2i64;
+    fn __lsx_vmaddwev_q_du_d(a: __v2i64, b: __v2u64, c: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.q.du.d"]
-    fn __lsx_vmaddwod_q_du_d(a: v2i64, b: v2u64, c: v2i64) -> v2i64;
+    fn __lsx_vmaddwod_q_du_d(a: __v2i64, b: __v2u64, c: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vrotr.b"]
-    fn __lsx_vrotr_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vrotr_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vrotr.h"]
-    fn __lsx_vrotr_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vrotr_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vrotr.w"]
-    fn __lsx_vrotr_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vrotr_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vrotr.d"]
-    fn __lsx_vrotr_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vrotr_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vadd.q"]
-    fn __lsx_vadd_q(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vadd_q(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsub.q"]
-    fn __lsx_vsub_q(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsub_q(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vldrepl.b"]
-    fn __lsx_vldrepl_b(a: *const i8, b: i32) -> v16i8;
+    fn __lsx_vldrepl_b(a: *const i8, b: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vldrepl.h"]
-    fn __lsx_vldrepl_h(a: *const i8, b: i32) -> v8i16;
+    fn __lsx_vldrepl_h(a: *const i8, b: i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vldrepl.w"]
-    fn __lsx_vldrepl_w(a: *const i8, b: i32) -> v4i32;
+    fn __lsx_vldrepl_w(a: *const i8, b: i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vldrepl.d"]
-    fn __lsx_vldrepl_d(a: *const i8, b: i32) -> v2i64;
+    fn __lsx_vldrepl_d(a: *const i8, b: i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmskgez.b"]
-    fn __lsx_vmskgez_b(a: v16i8) -> v16i8;
+    fn __lsx_vmskgez_b(a: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmsknz.b"]
-    fn __lsx_vmsknz_b(a: v16i8) -> v16i8;
+    fn __lsx_vmsknz_b(a: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vexth.h.b"]
-    fn __lsx_vexth_h_b(a: v16i8) -> v8i16;
+    fn __lsx_vexth_h_b(a: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vexth.w.h"]
-    fn __lsx_vexth_w_h(a: v8i16) -> v4i32;
+    fn __lsx_vexth_w_h(a: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vexth.d.w"]
-    fn __lsx_vexth_d_w(a: v4i32) -> v2i64;
+    fn __lsx_vexth_d_w(a: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vexth.q.d"]
-    fn __lsx_vexth_q_d(a: v2i64) -> v2i64;
+    fn __lsx_vexth_q_d(a: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vexth.hu.bu"]
-    fn __lsx_vexth_hu_bu(a: v16u8) -> v8u16;
+    fn __lsx_vexth_hu_bu(a: __v16u8) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vexth.wu.hu"]
-    fn __lsx_vexth_wu_hu(a: v8u16) -> v4u32;
+    fn __lsx_vexth_wu_hu(a: __v8u16) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vexth.du.wu"]
-    fn __lsx_vexth_du_wu(a: v4u32) -> v2u64;
+    fn __lsx_vexth_du_wu(a: __v4u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vexth.qu.du"]
-    fn __lsx_vexth_qu_du(a: v2u64) -> v2u64;
+    fn __lsx_vexth_qu_du(a: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vrotri.b"]
-    fn __lsx_vrotri_b(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vrotri_b(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vrotri.h"]
-    fn __lsx_vrotri_h(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vrotri_h(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vrotri.w"]
-    fn __lsx_vrotri_w(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vrotri_w(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vrotri.d"]
-    fn __lsx_vrotri_d(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vrotri_d(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vextl.q.d"]
-    fn __lsx_vextl_q_d(a: v2i64) -> v2i64;
+    fn __lsx_vextl_q_d(a: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrlni.b.h"]
-    fn __lsx_vsrlni_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vsrlni_b_h(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrlni.h.w"]
-    fn __lsx_vsrlni_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vsrlni_h_w(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrlni.w.d"]
-    fn __lsx_vsrlni_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vsrlni_w_d(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrlni.d.q"]
-    fn __lsx_vsrlni_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vsrlni_d_q(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrlrni.b.h"]
-    fn __lsx_vsrlrni_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vsrlrni_b_h(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrlrni.h.w"]
-    fn __lsx_vsrlrni_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vsrlrni_h_w(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrlrni.w.d"]
-    fn __lsx_vsrlrni_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vsrlrni_w_d(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrlrni.d.q"]
-    fn __lsx_vsrlrni_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vsrlrni_d_q(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vssrlni.b.h"]
-    fn __lsx_vssrlni_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vssrlni_b_h(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vssrlni.h.w"]
-    fn __lsx_vssrlni_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vssrlni_h_w(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vssrlni.w.d"]
-    fn __lsx_vssrlni_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vssrlni_w_d(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssrlni.d.q"]
-    fn __lsx_vssrlni_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vssrlni_d_q(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vssrlni.bu.h"]
-    fn __lsx_vssrlni_bu_h(a: v16u8, b: v16i8, c: u32) -> v16u8;
+    fn __lsx_vssrlni_bu_h(a: __v16u8, b: __v16i8, c: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vssrlni.hu.w"]
-    fn __lsx_vssrlni_hu_w(a: v8u16, b: v8i16, c: u32) -> v8u16;
+    fn __lsx_vssrlni_hu_w(a: __v8u16, b: __v8i16, c: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vssrlni.wu.d"]
-    fn __lsx_vssrlni_wu_d(a: v4u32, b: v4i32, c: u32) -> v4u32;
+    fn __lsx_vssrlni_wu_d(a: __v4u32, b: __v4i32, c: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vssrlni.du.q"]
-    fn __lsx_vssrlni_du_q(a: v2u64, b: v2i64, c: u32) -> v2u64;
+    fn __lsx_vssrlni_du_q(a: __v2u64, b: __v2i64, c: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vssrlrni.b.h"]
-    fn __lsx_vssrlrni_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vssrlrni_b_h(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vssrlrni.h.w"]
-    fn __lsx_vssrlrni_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vssrlrni_h_w(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vssrlrni.w.d"]
-    fn __lsx_vssrlrni_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vssrlrni_w_d(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssrlrni.d.q"]
-    fn __lsx_vssrlrni_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vssrlrni_d_q(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vssrlrni.bu.h"]
-    fn __lsx_vssrlrni_bu_h(a: v16u8, b: v16i8, c: u32) -> v16u8;
+    fn __lsx_vssrlrni_bu_h(a: __v16u8, b: __v16i8, c: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vssrlrni.hu.w"]
-    fn __lsx_vssrlrni_hu_w(a: v8u16, b: v8i16, c: u32) -> v8u16;
+    fn __lsx_vssrlrni_hu_w(a: __v8u16, b: __v8i16, c: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vssrlrni.wu.d"]
-    fn __lsx_vssrlrni_wu_d(a: v4u32, b: v4i32, c: u32) -> v4u32;
+    fn __lsx_vssrlrni_wu_d(a: __v4u32, b: __v4i32, c: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vssrlrni.du.q"]
-    fn __lsx_vssrlrni_du_q(a: v2u64, b: v2i64, c: u32) -> v2u64;
+    fn __lsx_vssrlrni_du_q(a: __v2u64, b: __v2i64, c: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vsrani.b.h"]
-    fn __lsx_vsrani_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vsrani_b_h(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrani.h.w"]
-    fn __lsx_vsrani_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vsrani_h_w(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrani.w.d"]
-    fn __lsx_vsrani_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vsrani_w_d(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrani.d.q"]
-    fn __lsx_vsrani_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vsrani_d_q(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrarni.b.h"]
-    fn __lsx_vsrarni_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vsrarni_b_h(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrarni.h.w"]
-    fn __lsx_vsrarni_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vsrarni_h_w(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrarni.w.d"]
-    fn __lsx_vsrarni_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vsrarni_w_d(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrarni.d.q"]
-    fn __lsx_vsrarni_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vsrarni_d_q(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vssrani.b.h"]
-    fn __lsx_vssrani_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vssrani_b_h(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vssrani.h.w"]
-    fn __lsx_vssrani_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vssrani_h_w(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vssrani.w.d"]
-    fn __lsx_vssrani_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vssrani_w_d(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssrani.d.q"]
-    fn __lsx_vssrani_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vssrani_d_q(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vssrani.bu.h"]
-    fn __lsx_vssrani_bu_h(a: v16u8, b: v16i8, c: u32) -> v16u8;
+    fn __lsx_vssrani_bu_h(a: __v16u8, b: __v16i8, c: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vssrani.hu.w"]
-    fn __lsx_vssrani_hu_w(a: v8u16, b: v8i16, c: u32) -> v8u16;
+    fn __lsx_vssrani_hu_w(a: __v8u16, b: __v8i16, c: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vssrani.wu.d"]
-    fn __lsx_vssrani_wu_d(a: v4u32, b: v4i32, c: u32) -> v4u32;
+    fn __lsx_vssrani_wu_d(a: __v4u32, b: __v4i32, c: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vssrani.du.q"]
-    fn __lsx_vssrani_du_q(a: v2u64, b: v2i64, c: u32) -> v2u64;
+    fn __lsx_vssrani_du_q(a: __v2u64, b: __v2i64, c: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vssrarni.b.h"]
-    fn __lsx_vssrarni_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vssrarni_b_h(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vssrarni.h.w"]
-    fn __lsx_vssrarni_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vssrarni_h_w(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vssrarni.w.d"]
-    fn __lsx_vssrarni_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vssrarni_w_d(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssrarni.d.q"]
-    fn __lsx_vssrarni_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vssrarni_d_q(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vssrarni.bu.h"]
-    fn __lsx_vssrarni_bu_h(a: v16u8, b: v16i8, c: u32) -> v16u8;
+    fn __lsx_vssrarni_bu_h(a: __v16u8, b: __v16i8, c: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vssrarni.hu.w"]
-    fn __lsx_vssrarni_hu_w(a: v8u16, b: v8i16, c: u32) -> v8u16;
+    fn __lsx_vssrarni_hu_w(a: __v8u16, b: __v8i16, c: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vssrarni.wu.d"]
-    fn __lsx_vssrarni_wu_d(a: v4u32, b: v4i32, c: u32) -> v4u32;
+    fn __lsx_vssrarni_wu_d(a: __v4u32, b: __v4i32, c: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vssrarni.du.q"]
-    fn __lsx_vssrarni_du_q(a: v2u64, b: v2i64, c: u32) -> v2u64;
+    fn __lsx_vssrarni_du_q(a: __v2u64, b: __v2i64, c: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vpermi.w"]
-    fn __lsx_vpermi_w(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vpermi_w(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vld"]
-    fn __lsx_vld(a: *const i8, b: i32) -> v16i8;
+    fn __lsx_vld(a: *const i8, b: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vst"]
-    fn __lsx_vst(a: v16i8, b: *mut i8, c: i32);
+    fn __lsx_vst(a: __v16i8, b: *mut i8, c: i32);
     #[link_name = "llvm.loongarch.lsx.vssrlrn.b.h"]
-    fn __lsx_vssrlrn_b_h(a: v8i16, b: v8i16) -> v16i8;
+    fn __lsx_vssrlrn_b_h(a: __v8i16, b: __v8i16) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vssrlrn.h.w"]
-    fn __lsx_vssrlrn_h_w(a: v4i32, b: v4i32) -> v8i16;
+    fn __lsx_vssrlrn_h_w(a: __v4i32, b: __v4i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vssrlrn.w.d"]
-    fn __lsx_vssrlrn_w_d(a: v2i64, b: v2i64) -> v4i32;
+    fn __lsx_vssrlrn_w_d(a: __v2i64, b: __v2i64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssrln.b.h"]
-    fn __lsx_vssrln_b_h(a: v8i16, b: v8i16) -> v16i8;
+    fn __lsx_vssrln_b_h(a: __v8i16, b: __v8i16) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vssrln.h.w"]
-    fn __lsx_vssrln_h_w(a: v4i32, b: v4i32) -> v8i16;
+    fn __lsx_vssrln_h_w(a: __v4i32, b: __v4i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vssrln.w.d"]
-    fn __lsx_vssrln_w_d(a: v2i64, b: v2i64) -> v4i32;
+    fn __lsx_vssrln_w_d(a: __v2i64, b: __v2i64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vorn.v"]
-    fn __lsx_vorn_v(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vorn_v(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vldi"]
-    fn __lsx_vldi(a: i32) -> v2i64;
+    fn __lsx_vldi(a: i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vshuf.b"]
-    fn __lsx_vshuf_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8;
+    fn __lsx_vshuf_b(a: __v16i8, b: __v16i8, c: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vldx"]
-    fn __lsx_vldx(a: *const i8, b: i64) -> v16i8;
+    fn __lsx_vldx(a: *const i8, b: i64) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vstx"]
-    fn __lsx_vstx(a: v16i8, b: *mut i8, c: i64);
+    fn __lsx_vstx(a: __v16i8, b: *mut i8, c: i64);
     #[link_name = "llvm.loongarch.lsx.vextl.qu.du"]
-    fn __lsx_vextl_qu_du(a: v2u64) -> v2u64;
+    fn __lsx_vextl_qu_du(a: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.bnz.b"]
-    fn __lsx_bnz_b(a: v16u8) -> i32;
+    fn __lsx_bnz_b(a: __v16u8) -> i32;
     #[link_name = "llvm.loongarch.lsx.bnz.d"]
-    fn __lsx_bnz_d(a: v2u64) -> i32;
+    fn __lsx_bnz_d(a: __v2u64) -> i32;
     #[link_name = "llvm.loongarch.lsx.bnz.h"]
-    fn __lsx_bnz_h(a: v8u16) -> i32;
+    fn __lsx_bnz_h(a: __v8u16) -> i32;
     #[link_name = "llvm.loongarch.lsx.bnz.v"]
-    fn __lsx_bnz_v(a: v16u8) -> i32;
+    fn __lsx_bnz_v(a: __v16u8) -> i32;
     #[link_name = "llvm.loongarch.lsx.bnz.w"]
-    fn __lsx_bnz_w(a: v4u32) -> i32;
+    fn __lsx_bnz_w(a: __v4u32) -> i32;
     #[link_name = "llvm.loongarch.lsx.bz.b"]
-    fn __lsx_bz_b(a: v16u8) -> i32;
+    fn __lsx_bz_b(a: __v16u8) -> i32;
     #[link_name = "llvm.loongarch.lsx.bz.d"]
-    fn __lsx_bz_d(a: v2u64) -> i32;
+    fn __lsx_bz_d(a: __v2u64) -> i32;
     #[link_name = "llvm.loongarch.lsx.bz.h"]
-    fn __lsx_bz_h(a: v8u16) -> i32;
+    fn __lsx_bz_h(a: __v8u16) -> i32;
     #[link_name = "llvm.loongarch.lsx.bz.v"]
-    fn __lsx_bz_v(a: v16u8) -> i32;
+    fn __lsx_bz_v(a: __v16u8) -> i32;
     #[link_name = "llvm.loongarch.lsx.bz.w"]
-    fn __lsx_bz_w(a: v4u32) -> i32;
+    fn __lsx_bz_w(a: __v4u32) -> i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.caf.d"]
-    fn __lsx_vfcmp_caf_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_caf_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.caf.s"]
-    fn __lsx_vfcmp_caf_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_caf_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.ceq.d"]
-    fn __lsx_vfcmp_ceq_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_ceq_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.ceq.s"]
-    fn __lsx_vfcmp_ceq_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_ceq_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cle.d"]
-    fn __lsx_vfcmp_cle_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_cle_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cle.s"]
-    fn __lsx_vfcmp_cle_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_cle_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.clt.d"]
-    fn __lsx_vfcmp_clt_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_clt_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.clt.s"]
-    fn __lsx_vfcmp_clt_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_clt_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cne.d"]
-    fn __lsx_vfcmp_cne_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_cne_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cne.s"]
-    fn __lsx_vfcmp_cne_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_cne_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cor.d"]
-    fn __lsx_vfcmp_cor_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_cor_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cor.s"]
-    fn __lsx_vfcmp_cor_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_cor_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cueq.d"]
-    fn __lsx_vfcmp_cueq_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_cueq_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cueq.s"]
-    fn __lsx_vfcmp_cueq_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_cueq_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cule.d"]
-    fn __lsx_vfcmp_cule_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_cule_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cule.s"]
-    fn __lsx_vfcmp_cule_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_cule_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cult.d"]
-    fn __lsx_vfcmp_cult_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_cult_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cult.s"]
-    fn __lsx_vfcmp_cult_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_cult_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cun.d"]
-    fn __lsx_vfcmp_cun_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_cun_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cune.d"]
-    fn __lsx_vfcmp_cune_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_cune_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cune.s"]
-    fn __lsx_vfcmp_cune_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_cune_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cun.s"]
-    fn __lsx_vfcmp_cun_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_cun_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.saf.d"]
-    fn __lsx_vfcmp_saf_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_saf_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.saf.s"]
-    fn __lsx_vfcmp_saf_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_saf_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.seq.d"]
-    fn __lsx_vfcmp_seq_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_seq_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.seq.s"]
-    fn __lsx_vfcmp_seq_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_seq_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sle.d"]
-    fn __lsx_vfcmp_sle_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_sle_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sle.s"]
-    fn __lsx_vfcmp_sle_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_sle_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.slt.d"]
-    fn __lsx_vfcmp_slt_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_slt_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.slt.s"]
-    fn __lsx_vfcmp_slt_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_slt_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sne.d"]
-    fn __lsx_vfcmp_sne_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_sne_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sne.s"]
-    fn __lsx_vfcmp_sne_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_sne_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sor.d"]
-    fn __lsx_vfcmp_sor_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_sor_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sor.s"]
-    fn __lsx_vfcmp_sor_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_sor_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sueq.d"]
-    fn __lsx_vfcmp_sueq_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_sueq_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sueq.s"]
-    fn __lsx_vfcmp_sueq_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_sueq_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sule.d"]
-    fn __lsx_vfcmp_sule_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_sule_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sule.s"]
-    fn __lsx_vfcmp_sule_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_sule_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sult.d"]
-    fn __lsx_vfcmp_sult_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_sult_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sult.s"]
-    fn __lsx_vfcmp_sult_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_sult_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sun.d"]
-    fn __lsx_vfcmp_sun_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_sun_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sune.d"]
-    fn __lsx_vfcmp_sune_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_sune_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sune.s"]
-    fn __lsx_vfcmp_sune_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_sune_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sun.s"]
-    fn __lsx_vfcmp_sun_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_sun_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vrepli.b"]
-    fn __lsx_vrepli_b(a: i32) -> v16i8;
+    fn __lsx_vrepli_b(a: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vrepli.d"]
-    fn __lsx_vrepli_d(a: i32) -> v2i64;
+    fn __lsx_vrepli_d(a: i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vrepli.h"]
-    fn __lsx_vrepli_h(a: i32) -> v8i16;
+    fn __lsx_vrepli_h(a: i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vrepli.w"]
-    fn __lsx_vrepli_w(a: i32) -> v4i32;
+    fn __lsx_vrepli_w(a: i32) -> __v4i32;
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsll_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vsll_b(a, b) }
+pub fn lsx_vsll_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsll_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsll_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vsll_h(a, b) }
+pub fn lsx_vsll_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsll_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsll_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vsll_w(a, b) }
+pub fn lsx_vsll_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsll_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsll_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsll_d(a, b) }
+pub fn lsx_vsll_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsll_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslli_b<const IMM3: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vslli_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vslli_b(a, IMM3) }
+    unsafe { transmute(__lsx_vslli_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslli_h<const IMM4: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vslli_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vslli_h(a, IMM4) }
+    unsafe { transmute(__lsx_vslli_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslli_w<const IMM5: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vslli_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vslli_w(a, IMM5) }
+    unsafe { transmute(__lsx_vslli_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslli_d<const IMM6: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vslli_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vslli_d(a, IMM6) }
+    unsafe { transmute(__lsx_vslli_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsra_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vsra_b(a, b) }
+pub fn lsx_vsra_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsra_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsra_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vsra_h(a, b) }
+pub fn lsx_vsra_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsra_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsra_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vsra_w(a, b) }
+pub fn lsx_vsra_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsra_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsra_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsra_d(a, b) }
+pub fn lsx_vsra_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsra_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrai_b<const IMM3: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vsrai_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vsrai_b(a, IMM3) }
+    unsafe { transmute(__lsx_vsrai_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrai_h<const IMM4: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vsrai_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsrai_h(a, IMM4) }
+    unsafe { transmute(__lsx_vsrai_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrai_w<const IMM5: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vsrai_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsrai_w(a, IMM5) }
+    unsafe { transmute(__lsx_vsrai_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrai_d<const IMM6: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vsrai_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsrai_d(a, IMM6) }
+    unsafe { transmute(__lsx_vsrai_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrar_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vsrar_b(a, b) }
+pub fn lsx_vsrar_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrar_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrar_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vsrar_h(a, b) }
+pub fn lsx_vsrar_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrar_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrar_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vsrar_w(a, b) }
+pub fn lsx_vsrar_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrar_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrar_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsrar_d(a, b) }
+pub fn lsx_vsrar_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrar_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrari_b<const IMM3: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vsrari_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vsrari_b(a, IMM3) }
+    unsafe { transmute(__lsx_vsrari_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrari_h<const IMM4: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vsrari_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsrari_h(a, IMM4) }
+    unsafe { transmute(__lsx_vsrari_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrari_w<const IMM5: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vsrari_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsrari_w(a, IMM5) }
+    unsafe { transmute(__lsx_vsrari_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrari_d<const IMM6: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vsrari_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsrari_d(a, IMM6) }
+    unsafe { transmute(__lsx_vsrari_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrl_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vsrl_b(a, b) }
+pub fn lsx_vsrl_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrl_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrl_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vsrl_h(a, b) }
+pub fn lsx_vsrl_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrl_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrl_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vsrl_w(a, b) }
+pub fn lsx_vsrl_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrl_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrl_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsrl_d(a, b) }
+pub fn lsx_vsrl_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrl_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrli_b<const IMM3: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vsrli_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vsrli_b(a, IMM3) }
+    unsafe { transmute(__lsx_vsrli_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrli_h<const IMM4: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vsrli_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsrli_h(a, IMM4) }
+    unsafe { transmute(__lsx_vsrli_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrli_w<const IMM5: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vsrli_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsrli_w(a, IMM5) }
+    unsafe { transmute(__lsx_vsrli_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrli_d<const IMM6: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vsrli_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsrli_d(a, IMM6) }
+    unsafe { transmute(__lsx_vsrli_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlr_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vsrlr_b(a, b) }
+pub fn lsx_vsrlr_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrlr_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlr_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vsrlr_h(a, b) }
+pub fn lsx_vsrlr_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrlr_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlr_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vsrlr_w(a, b) }
+pub fn lsx_vsrlr_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrlr_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlr_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsrlr_d(a, b) }
+pub fn lsx_vsrlr_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrlr_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlri_b<const IMM3: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vsrlri_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vsrlri_b(a, IMM3) }
+    unsafe { transmute(__lsx_vsrlri_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlri_h<const IMM4: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vsrlri_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsrlri_h(a, IMM4) }
+    unsafe { transmute(__lsx_vsrlri_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlri_w<const IMM5: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vsrlri_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsrlri_w(a, IMM5) }
+    unsafe { transmute(__lsx_vsrlri_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlri_d<const IMM6: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vsrlri_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsrlri_d(a, IMM6) }
+    unsafe { transmute(__lsx_vsrlri_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitclr_b(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vbitclr_b(a, b) }
+pub fn lsx_vbitclr_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitclr_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitclr_h(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vbitclr_h(a, b) }
+pub fn lsx_vbitclr_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitclr_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitclr_w(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vbitclr_w(a, b) }
+pub fn lsx_vbitclr_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitclr_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitclr_d(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vbitclr_d(a, b) }
+pub fn lsx_vbitclr_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitclr_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitclri_b<const IMM3: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vbitclri_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vbitclri_b(a, IMM3) }
+    unsafe { transmute(__lsx_vbitclri_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitclri_h<const IMM4: u32>(a: v8u16) -> v8u16 {
+pub fn lsx_vbitclri_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vbitclri_h(a, IMM4) }
+    unsafe { transmute(__lsx_vbitclri_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitclri_w<const IMM5: u32>(a: v4u32) -> v4u32 {
+pub fn lsx_vbitclri_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vbitclri_w(a, IMM5) }
+    unsafe { transmute(__lsx_vbitclri_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitclri_d<const IMM6: u32>(a: v2u64) -> v2u64 {
+pub fn lsx_vbitclri_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vbitclri_d(a, IMM6) }
+    unsafe { transmute(__lsx_vbitclri_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitset_b(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vbitset_b(a, b) }
+pub fn lsx_vbitset_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitset_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitset_h(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vbitset_h(a, b) }
+pub fn lsx_vbitset_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitset_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitset_w(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vbitset_w(a, b) }
+pub fn lsx_vbitset_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitset_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitset_d(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vbitset_d(a, b) }
+pub fn lsx_vbitset_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitset_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitseti_b<const IMM3: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vbitseti_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vbitseti_b(a, IMM3) }
+    unsafe { transmute(__lsx_vbitseti_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitseti_h<const IMM4: u32>(a: v8u16) -> v8u16 {
+pub fn lsx_vbitseti_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vbitseti_h(a, IMM4) }
+    unsafe { transmute(__lsx_vbitseti_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitseti_w<const IMM5: u32>(a: v4u32) -> v4u32 {
+pub fn lsx_vbitseti_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vbitseti_w(a, IMM5) }
+    unsafe { transmute(__lsx_vbitseti_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitseti_d<const IMM6: u32>(a: v2u64) -> v2u64 {
+pub fn lsx_vbitseti_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vbitseti_d(a, IMM6) }
+    unsafe { transmute(__lsx_vbitseti_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitrev_b(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vbitrev_b(a, b) }
+pub fn lsx_vbitrev_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitrev_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitrev_h(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vbitrev_h(a, b) }
+pub fn lsx_vbitrev_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitrev_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitrev_w(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vbitrev_w(a, b) }
+pub fn lsx_vbitrev_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitrev_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitrev_d(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vbitrev_d(a, b) }
+pub fn lsx_vbitrev_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitrev_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitrevi_b<const IMM3: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vbitrevi_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vbitrevi_b(a, IMM3) }
+    unsafe { transmute(__lsx_vbitrevi_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitrevi_h<const IMM4: u32>(a: v8u16) -> v8u16 {
+pub fn lsx_vbitrevi_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vbitrevi_h(a, IMM4) }
+    unsafe { transmute(__lsx_vbitrevi_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitrevi_w<const IMM5: u32>(a: v4u32) -> v4u32 {
+pub fn lsx_vbitrevi_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vbitrevi_w(a, IMM5) }
+    unsafe { transmute(__lsx_vbitrevi_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitrevi_d<const IMM6: u32>(a: v2u64) -> v2u64 {
+pub fn lsx_vbitrevi_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vbitrevi_d(a, IMM6) }
+    unsafe { transmute(__lsx_vbitrevi_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vadd_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vadd_b(a, b) }
+pub fn lsx_vadd_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vadd_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vadd_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vadd_h(a, b) }
+pub fn lsx_vadd_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vadd_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vadd_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vadd_w(a, b) }
+pub fn lsx_vadd_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vadd_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vadd_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vadd_d(a, b) }
+pub fn lsx_vadd_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vadd_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddi_bu<const IMM5: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vaddi_bu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vaddi_bu(a, IMM5) }
+    unsafe { transmute(__lsx_vaddi_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddi_hu<const IMM5: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vaddi_hu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vaddi_hu(a, IMM5) }
+    unsafe { transmute(__lsx_vaddi_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddi_wu<const IMM5: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vaddi_wu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vaddi_wu(a, IMM5) }
+    unsafe { transmute(__lsx_vaddi_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddi_du<const IMM5: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vaddi_du<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vaddi_du(a, IMM5) }
+    unsafe { transmute(__lsx_vaddi_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsub_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vsub_b(a, b) }
+pub fn lsx_vsub_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsub_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsub_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vsub_h(a, b) }
+pub fn lsx_vsub_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsub_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsub_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vsub_w(a, b) }
+pub fn lsx_vsub_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsub_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsub_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsub_d(a, b) }
+pub fn lsx_vsub_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsub_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubi_bu<const IMM5: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vsubi_bu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsubi_bu(a, IMM5) }
+    unsafe { transmute(__lsx_vsubi_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubi_hu<const IMM5: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vsubi_hu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsubi_hu(a, IMM5) }
+    unsafe { transmute(__lsx_vsubi_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubi_wu<const IMM5: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vsubi_wu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsubi_wu(a, IMM5) }
+    unsafe { transmute(__lsx_vsubi_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubi_du<const IMM5: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vsubi_du<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsubi_du(a, IMM5) }
+    unsafe { transmute(__lsx_vsubi_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmax_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vmax_b(a, b) }
+pub fn lsx_vmax_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmax_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmax_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vmax_h(a, b) }
+pub fn lsx_vmax_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmax_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmax_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vmax_w(a, b) }
+pub fn lsx_vmax_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmax_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmax_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vmax_d(a, b) }
+pub fn lsx_vmax_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmax_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaxi_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
+pub fn lsx_vmaxi_b<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vmaxi_b(a, IMM_S5) }
+    unsafe { transmute(__lsx_vmaxi_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaxi_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
+pub fn lsx_vmaxi_h<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vmaxi_h(a, IMM_S5) }
+    unsafe { transmute(__lsx_vmaxi_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaxi_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
+pub fn lsx_vmaxi_w<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vmaxi_w(a, IMM_S5) }
+    unsafe { transmute(__lsx_vmaxi_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaxi_d<const IMM_S5: i32>(a: v2i64) -> v2i64 {
+pub fn lsx_vmaxi_d<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vmaxi_d(a, IMM_S5) }
+    unsafe { transmute(__lsx_vmaxi_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmax_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vmax_bu(a, b) }
+pub fn lsx_vmax_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmax_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmax_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vmax_hu(a, b) }
+pub fn lsx_vmax_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmax_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmax_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vmax_wu(a, b) }
+pub fn lsx_vmax_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmax_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmax_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vmax_du(a, b) }
+pub fn lsx_vmax_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmax_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaxi_bu<const IMM5: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vmaxi_bu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vmaxi_bu(a, IMM5) }
+    unsafe { transmute(__lsx_vmaxi_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaxi_hu<const IMM5: u32>(a: v8u16) -> v8u16 {
+pub fn lsx_vmaxi_hu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vmaxi_hu(a, IMM5) }
+    unsafe { transmute(__lsx_vmaxi_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaxi_wu<const IMM5: u32>(a: v4u32) -> v4u32 {
+pub fn lsx_vmaxi_wu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vmaxi_wu(a, IMM5) }
+    unsafe { transmute(__lsx_vmaxi_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaxi_du<const IMM5: u32>(a: v2u64) -> v2u64 {
+pub fn lsx_vmaxi_du<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vmaxi_du(a, IMM5) }
+    unsafe { transmute(__lsx_vmaxi_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmin_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vmin_b(a, b) }
+pub fn lsx_vmin_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmin_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmin_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vmin_h(a, b) }
+pub fn lsx_vmin_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmin_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmin_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vmin_w(a, b) }
+pub fn lsx_vmin_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmin_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmin_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vmin_d(a, b) }
+pub fn lsx_vmin_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmin_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmini_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
+pub fn lsx_vmini_b<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vmini_b(a, IMM_S5) }
+    unsafe { transmute(__lsx_vmini_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmini_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
+pub fn lsx_vmini_h<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vmini_h(a, IMM_S5) }
+    unsafe { transmute(__lsx_vmini_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmini_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
+pub fn lsx_vmini_w<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vmini_w(a, IMM_S5) }
+    unsafe { transmute(__lsx_vmini_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmini_d<const IMM_S5: i32>(a: v2i64) -> v2i64 {
+pub fn lsx_vmini_d<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vmini_d(a, IMM_S5) }
+    unsafe { transmute(__lsx_vmini_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmin_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vmin_bu(a, b) }
+pub fn lsx_vmin_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmin_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmin_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vmin_hu(a, b) }
+pub fn lsx_vmin_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmin_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmin_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vmin_wu(a, b) }
+pub fn lsx_vmin_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmin_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmin_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vmin_du(a, b) }
+pub fn lsx_vmin_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmin_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmini_bu<const IMM5: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vmini_bu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vmini_bu(a, IMM5) }
+    unsafe { transmute(__lsx_vmini_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmini_hu<const IMM5: u32>(a: v8u16) -> v8u16 {
+pub fn lsx_vmini_hu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vmini_hu(a, IMM5) }
+    unsafe { transmute(__lsx_vmini_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmini_wu<const IMM5: u32>(a: v4u32) -> v4u32 {
+pub fn lsx_vmini_wu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vmini_wu(a, IMM5) }
+    unsafe { transmute(__lsx_vmini_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmini_du<const IMM5: u32>(a: v2u64) -> v2u64 {
+pub fn lsx_vmini_du<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vmini_du(a, IMM5) }
+    unsafe { transmute(__lsx_vmini_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vseq_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vseq_b(a, b) }
+pub fn lsx_vseq_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vseq_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vseq_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vseq_h(a, b) }
+pub fn lsx_vseq_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vseq_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vseq_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vseq_w(a, b) }
+pub fn lsx_vseq_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vseq_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vseq_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vseq_d(a, b) }
+pub fn lsx_vseq_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vseq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vseqi_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
+pub fn lsx_vseqi_b<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vseqi_b(a, IMM_S5) }
+    unsafe { transmute(__lsx_vseqi_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vseqi_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
+pub fn lsx_vseqi_h<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vseqi_h(a, IMM_S5) }
+    unsafe { transmute(__lsx_vseqi_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vseqi_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
+pub fn lsx_vseqi_w<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vseqi_w(a, IMM_S5) }
+    unsafe { transmute(__lsx_vseqi_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vseqi_d<const IMM_S5: i32>(a: v2i64) -> v2i64 {
+pub fn lsx_vseqi_d<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vseqi_d(a, IMM_S5) }
+    unsafe { transmute(__lsx_vseqi_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslti_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
+pub fn lsx_vslti_b<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vslti_b(a, IMM_S5) }
+    unsafe { transmute(__lsx_vslti_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslt_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vslt_b(a, b) }
+pub fn lsx_vslt_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vslt_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslt_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vslt_h(a, b) }
+pub fn lsx_vslt_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vslt_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslt_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vslt_w(a, b) }
+pub fn lsx_vslt_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vslt_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslt_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vslt_d(a, b) }
+pub fn lsx_vslt_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vslt_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslti_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
+pub fn lsx_vslti_h<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vslti_h(a, IMM_S5) }
+    unsafe { transmute(__lsx_vslti_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslti_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
+pub fn lsx_vslti_w<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vslti_w(a, IMM_S5) }
+    unsafe { transmute(__lsx_vslti_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslti_d<const IMM_S5: i32>(a: v2i64) -> v2i64 {
+pub fn lsx_vslti_d<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vslti_d(a, IMM_S5) }
+    unsafe { transmute(__lsx_vslti_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslt_bu(a: v16u8, b: v16u8) -> v16i8 {
-    unsafe { __lsx_vslt_bu(a, b) }
+pub fn lsx_vslt_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vslt_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslt_hu(a: v8u16, b: v8u16) -> v8i16 {
-    unsafe { __lsx_vslt_hu(a, b) }
+pub fn lsx_vslt_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vslt_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslt_wu(a: v4u32, b: v4u32) -> v4i32 {
-    unsafe { __lsx_vslt_wu(a, b) }
+pub fn lsx_vslt_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vslt_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslt_du(a: v2u64, b: v2u64) -> v2i64 {
-    unsafe { __lsx_vslt_du(a, b) }
+pub fn lsx_vslt_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vslt_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslti_bu<const IMM5: u32>(a: v16u8) -> v16i8 {
+pub fn lsx_vslti_bu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vslti_bu(a, IMM5) }
+    unsafe { transmute(__lsx_vslti_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslti_hu<const IMM5: u32>(a: v8u16) -> v8i16 {
+pub fn lsx_vslti_hu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vslti_hu(a, IMM5) }
+    unsafe { transmute(__lsx_vslti_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslti_wu<const IMM5: u32>(a: v4u32) -> v4i32 {
+pub fn lsx_vslti_wu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vslti_wu(a, IMM5) }
+    unsafe { transmute(__lsx_vslti_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslti_du<const IMM5: u32>(a: v2u64) -> v2i64 {
+pub fn lsx_vslti_du<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vslti_du(a, IMM5) }
+    unsafe { transmute(__lsx_vslti_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsle_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vsle_b(a, b) }
+pub fn lsx_vsle_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsle_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsle_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vsle_h(a, b) }
+pub fn lsx_vsle_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsle_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsle_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vsle_w(a, b) }
+pub fn lsx_vsle_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsle_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsle_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsle_d(a, b) }
+pub fn lsx_vsle_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsle_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslei_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
+pub fn lsx_vslei_b<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vslei_b(a, IMM_S5) }
+    unsafe { transmute(__lsx_vslei_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslei_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
+pub fn lsx_vslei_h<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vslei_h(a, IMM_S5) }
+    unsafe { transmute(__lsx_vslei_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslei_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
+pub fn lsx_vslei_w<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vslei_w(a, IMM_S5) }
+    unsafe { transmute(__lsx_vslei_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslei_d<const IMM_S5: i32>(a: v2i64) -> v2i64 {
+pub fn lsx_vslei_d<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vslei_d(a, IMM_S5) }
+    unsafe { transmute(__lsx_vslei_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsle_bu(a: v16u8, b: v16u8) -> v16i8 {
-    unsafe { __lsx_vsle_bu(a, b) }
+pub fn lsx_vsle_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsle_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsle_hu(a: v8u16, b: v8u16) -> v8i16 {
-    unsafe { __lsx_vsle_hu(a, b) }
+pub fn lsx_vsle_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsle_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsle_wu(a: v4u32, b: v4u32) -> v4i32 {
-    unsafe { __lsx_vsle_wu(a, b) }
+pub fn lsx_vsle_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsle_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsle_du(a: v2u64, b: v2u64) -> v2i64 {
-    unsafe { __lsx_vsle_du(a, b) }
+pub fn lsx_vsle_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsle_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslei_bu<const IMM5: u32>(a: v16u8) -> v16i8 {
+pub fn lsx_vslei_bu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vslei_bu(a, IMM5) }
+    unsafe { transmute(__lsx_vslei_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslei_hu<const IMM5: u32>(a: v8u16) -> v8i16 {
+pub fn lsx_vslei_hu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vslei_hu(a, IMM5) }
+    unsafe { transmute(__lsx_vslei_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslei_wu<const IMM5: u32>(a: v4u32) -> v4i32 {
+pub fn lsx_vslei_wu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vslei_wu(a, IMM5) }
+    unsafe { transmute(__lsx_vslei_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslei_du<const IMM5: u32>(a: v2u64) -> v2i64 {
+pub fn lsx_vslei_du<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vslei_du(a, IMM5) }
+    unsafe { transmute(__lsx_vslei_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsat_b<const IMM3: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vsat_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vsat_b(a, IMM3) }
+    unsafe { transmute(__lsx_vsat_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsat_h<const IMM4: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vsat_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsat_h(a, IMM4) }
+    unsafe { transmute(__lsx_vsat_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsat_w<const IMM5: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vsat_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsat_w(a, IMM5) }
+    unsafe { transmute(__lsx_vsat_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsat_d<const IMM6: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vsat_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsat_d(a, IMM6) }
+    unsafe { transmute(__lsx_vsat_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsat_bu<const IMM3: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vsat_bu<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vsat_bu(a, IMM3) }
+    unsafe { transmute(__lsx_vsat_bu(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsat_hu<const IMM4: u32>(a: v8u16) -> v8u16 {
+pub fn lsx_vsat_hu<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsat_hu(a, IMM4) }
+    unsafe { transmute(__lsx_vsat_hu(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsat_wu<const IMM5: u32>(a: v4u32) -> v4u32 {
+pub fn lsx_vsat_wu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsat_wu(a, IMM5) }
+    unsafe { transmute(__lsx_vsat_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsat_du<const IMM6: u32>(a: v2u64) -> v2u64 {
+pub fn lsx_vsat_du<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsat_du(a, IMM6) }
+    unsafe { transmute(__lsx_vsat_du(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vadda_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vadda_b(a, b) }
+pub fn lsx_vadda_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vadda_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vadda_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vadda_h(a, b) }
+pub fn lsx_vadda_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vadda_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vadda_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vadda_w(a, b) }
+pub fn lsx_vadda_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vadda_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vadda_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vadda_d(a, b) }
+pub fn lsx_vadda_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vadda_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsadd_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vsadd_b(a, b) }
+pub fn lsx_vsadd_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsadd_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsadd_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vsadd_h(a, b) }
+pub fn lsx_vsadd_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsadd_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsadd_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vsadd_w(a, b) }
+pub fn lsx_vsadd_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsadd_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsadd_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsadd_d(a, b) }
+pub fn lsx_vsadd_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsadd_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsadd_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vsadd_bu(a, b) }
+pub fn lsx_vsadd_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsadd_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsadd_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vsadd_hu(a, b) }
+pub fn lsx_vsadd_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsadd_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsadd_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vsadd_wu(a, b) }
+pub fn lsx_vsadd_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsadd_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsadd_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vsadd_du(a, b) }
+pub fn lsx_vsadd_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsadd_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavg_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vavg_b(a, b) }
+pub fn lsx_vavg_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavg_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavg_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vavg_h(a, b) }
+pub fn lsx_vavg_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavg_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavg_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vavg_w(a, b) }
+pub fn lsx_vavg_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavg_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavg_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vavg_d(a, b) }
+pub fn lsx_vavg_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavg_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavg_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vavg_bu(a, b) }
+pub fn lsx_vavg_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavg_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavg_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vavg_hu(a, b) }
+pub fn lsx_vavg_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavg_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavg_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vavg_wu(a, b) }
+pub fn lsx_vavg_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavg_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavg_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vavg_du(a, b) }
+pub fn lsx_vavg_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavg_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavgr_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vavgr_b(a, b) }
+pub fn lsx_vavgr_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavgr_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavgr_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vavgr_h(a, b) }
+pub fn lsx_vavgr_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavgr_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavgr_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vavgr_w(a, b) }
+pub fn lsx_vavgr_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavgr_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavgr_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vavgr_d(a, b) }
+pub fn lsx_vavgr_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavgr_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavgr_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vavgr_bu(a, b) }
+pub fn lsx_vavgr_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavgr_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavgr_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vavgr_hu(a, b) }
+pub fn lsx_vavgr_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavgr_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavgr_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vavgr_wu(a, b) }
+pub fn lsx_vavgr_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavgr_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavgr_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vavgr_du(a, b) }
+pub fn lsx_vavgr_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavgr_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssub_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vssub_b(a, b) }
+pub fn lsx_vssub_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssub_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssub_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vssub_h(a, b) }
+pub fn lsx_vssub_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssub_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssub_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vssub_w(a, b) }
+pub fn lsx_vssub_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssub_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssub_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vssub_d(a, b) }
+pub fn lsx_vssub_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssub_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssub_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vssub_bu(a, b) }
+pub fn lsx_vssub_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssub_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssub_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vssub_hu(a, b) }
+pub fn lsx_vssub_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssub_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssub_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vssub_wu(a, b) }
+pub fn lsx_vssub_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssub_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssub_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vssub_du(a, b) }
+pub fn lsx_vssub_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssub_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vabsd_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vabsd_b(a, b) }
+pub fn lsx_vabsd_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vabsd_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vabsd_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vabsd_h(a, b) }
+pub fn lsx_vabsd_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vabsd_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vabsd_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vabsd_w(a, b) }
+pub fn lsx_vabsd_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vabsd_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vabsd_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vabsd_d(a, b) }
+pub fn lsx_vabsd_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vabsd_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vabsd_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vabsd_bu(a, b) }
+pub fn lsx_vabsd_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vabsd_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vabsd_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vabsd_hu(a, b) }
+pub fn lsx_vabsd_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vabsd_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vabsd_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vabsd_wu(a, b) }
+pub fn lsx_vabsd_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vabsd_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vabsd_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vabsd_du(a, b) }
+pub fn lsx_vabsd_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vabsd_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmul_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vmul_b(a, b) }
+pub fn lsx_vmul_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmul_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmul_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vmul_h(a, b) }
+pub fn lsx_vmul_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmul_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmul_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vmul_w(a, b) }
+pub fn lsx_vmul_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmul_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmul_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vmul_d(a, b) }
+pub fn lsx_vmul_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmul_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmadd_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 {
-    unsafe { __lsx_vmadd_b(a, b, c) }
+pub fn lsx_vmadd_b(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmadd_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmadd_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 {
-    unsafe { __lsx_vmadd_h(a, b, c) }
+pub fn lsx_vmadd_h(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmadd_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmadd_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 {
-    unsafe { __lsx_vmadd_w(a, b, c) }
+pub fn lsx_vmadd_w(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmadd_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmadd_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 {
-    unsafe { __lsx_vmadd_d(a, b, c) }
+pub fn lsx_vmadd_d(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmadd_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmsub_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 {
-    unsafe { __lsx_vmsub_b(a, b, c) }
+pub fn lsx_vmsub_b(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmsub_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmsub_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 {
-    unsafe { __lsx_vmsub_h(a, b, c) }
+pub fn lsx_vmsub_h(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmsub_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmsub_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 {
-    unsafe { __lsx_vmsub_w(a, b, c) }
+pub fn lsx_vmsub_w(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmsub_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmsub_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 {
-    unsafe { __lsx_vmsub_d(a, b, c) }
+pub fn lsx_vmsub_d(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmsub_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vdiv_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vdiv_b(a, b) }
+pub fn lsx_vdiv_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vdiv_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vdiv_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vdiv_h(a, b) }
+pub fn lsx_vdiv_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vdiv_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vdiv_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vdiv_w(a, b) }
+pub fn lsx_vdiv_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vdiv_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vdiv_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vdiv_d(a, b) }
+pub fn lsx_vdiv_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vdiv_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vdiv_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vdiv_bu(a, b) }
+pub fn lsx_vdiv_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vdiv_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vdiv_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vdiv_hu(a, b) }
+pub fn lsx_vdiv_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vdiv_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vdiv_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vdiv_wu(a, b) }
+pub fn lsx_vdiv_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vdiv_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vdiv_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vdiv_du(a, b) }
+pub fn lsx_vdiv_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vdiv_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhaddw_h_b(a: v16i8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vhaddw_h_b(a, b) }
+pub fn lsx_vhaddw_h_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhaddw_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhaddw_w_h(a: v8i16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vhaddw_w_h(a, b) }
+pub fn lsx_vhaddw_w_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhaddw_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhaddw_d_w(a: v4i32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vhaddw_d_w(a, b) }
+pub fn lsx_vhaddw_d_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhaddw_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhaddw_hu_bu(a: v16u8, b: v16u8) -> v8u16 {
-    unsafe { __lsx_vhaddw_hu_bu(a, b) }
+pub fn lsx_vhaddw_hu_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhaddw_hu_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhaddw_wu_hu(a: v8u16, b: v8u16) -> v4u32 {
-    unsafe { __lsx_vhaddw_wu_hu(a, b) }
+pub fn lsx_vhaddw_wu_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhaddw_wu_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhaddw_du_wu(a: v4u32, b: v4u32) -> v2u64 {
-    unsafe { __lsx_vhaddw_du_wu(a, b) }
+pub fn lsx_vhaddw_du_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhaddw_du_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhsubw_h_b(a: v16i8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vhsubw_h_b(a, b) }
+pub fn lsx_vhsubw_h_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhsubw_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhsubw_w_h(a: v8i16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vhsubw_w_h(a, b) }
+pub fn lsx_vhsubw_w_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhsubw_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhsubw_d_w(a: v4i32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vhsubw_d_w(a, b) }
+pub fn lsx_vhsubw_d_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhsubw_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhsubw_hu_bu(a: v16u8, b: v16u8) -> v8i16 {
-    unsafe { __lsx_vhsubw_hu_bu(a, b) }
+pub fn lsx_vhsubw_hu_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhsubw_hu_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhsubw_wu_hu(a: v8u16, b: v8u16) -> v4i32 {
-    unsafe { __lsx_vhsubw_wu_hu(a, b) }
+pub fn lsx_vhsubw_wu_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhsubw_wu_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhsubw_du_wu(a: v4u32, b: v4u32) -> v2i64 {
-    unsafe { __lsx_vhsubw_du_wu(a, b) }
+pub fn lsx_vhsubw_du_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhsubw_du_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmod_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vmod_b(a, b) }
+pub fn lsx_vmod_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmod_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmod_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vmod_h(a, b) }
+pub fn lsx_vmod_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmod_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmod_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vmod_w(a, b) }
+pub fn lsx_vmod_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmod_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmod_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vmod_d(a, b) }
+pub fn lsx_vmod_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmod_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmod_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vmod_bu(a, b) }
+pub fn lsx_vmod_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmod_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmod_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vmod_hu(a, b) }
+pub fn lsx_vmod_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmod_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmod_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vmod_wu(a, b) }
+pub fn lsx_vmod_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmod_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmod_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vmod_du(a, b) }
+pub fn lsx_vmod_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmod_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplve_b(a: v16i8, b: i32) -> v16i8 {
-    unsafe { __lsx_vreplve_b(a, b) }
+pub fn lsx_vreplve_b(a: m128i, b: i32) -> m128i {
+    unsafe { transmute(__lsx_vreplve_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplve_h(a: v8i16, b: i32) -> v8i16 {
-    unsafe { __lsx_vreplve_h(a, b) }
+pub fn lsx_vreplve_h(a: m128i, b: i32) -> m128i {
+    unsafe { transmute(__lsx_vreplve_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplve_w(a: v4i32, b: i32) -> v4i32 {
-    unsafe { __lsx_vreplve_w(a, b) }
+pub fn lsx_vreplve_w(a: m128i, b: i32) -> m128i {
+    unsafe { transmute(__lsx_vreplve_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplve_d(a: v2i64, b: i32) -> v2i64 {
-    unsafe { __lsx_vreplve_d(a, b) }
+pub fn lsx_vreplve_d(a: m128i, b: i32) -> m128i {
+    unsafe { transmute(__lsx_vreplve_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplvei_b<const IMM4: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vreplvei_b<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vreplvei_b(a, IMM4) }
+    unsafe { transmute(__lsx_vreplvei_b(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplvei_h<const IMM3: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vreplvei_h<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vreplvei_h(a, IMM3) }
+    unsafe { transmute(__lsx_vreplvei_h(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplvei_w<const IMM2: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vreplvei_w<const IMM2: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lsx_vreplvei_w(a, IMM2) }
+    unsafe { transmute(__lsx_vreplvei_w(transmute(a), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplvei_d<const IMM1: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vreplvei_d<const IMM1: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM1, 1);
-    unsafe { __lsx_vreplvei_d(a, IMM1) }
+    unsafe { transmute(__lsx_vreplvei_d(transmute(a), IMM1)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickev_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vpickev_b(a, b) }
+pub fn lsx_vpickev_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpickev_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickev_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vpickev_h(a, b) }
+pub fn lsx_vpickev_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpickev_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickev_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vpickev_w(a, b) }
+pub fn lsx_vpickev_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpickev_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickev_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vpickev_d(a, b) }
+pub fn lsx_vpickev_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpickev_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickod_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vpickod_b(a, b) }
+pub fn lsx_vpickod_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpickod_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickod_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vpickod_h(a, b) }
+pub fn lsx_vpickod_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpickod_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickod_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vpickod_w(a, b) }
+pub fn lsx_vpickod_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpickod_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickod_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vpickod_d(a, b) }
+pub fn lsx_vpickod_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpickod_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vilvh_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vilvh_b(a, b) }
+pub fn lsx_vilvh_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vilvh_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vilvh_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vilvh_h(a, b) }
+pub fn lsx_vilvh_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vilvh_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vilvh_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vilvh_w(a, b) }
+pub fn lsx_vilvh_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vilvh_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vilvh_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vilvh_d(a, b) }
+pub fn lsx_vilvh_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vilvh_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vilvl_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vilvl_b(a, b) }
+pub fn lsx_vilvl_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vilvl_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vilvl_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vilvl_h(a, b) }
+pub fn lsx_vilvl_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vilvl_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vilvl_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vilvl_w(a, b) }
+pub fn lsx_vilvl_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vilvl_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vilvl_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vilvl_d(a, b) }
+pub fn lsx_vilvl_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vilvl_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpackev_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vpackev_b(a, b) }
+pub fn lsx_vpackev_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpackev_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpackev_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vpackev_h(a, b) }
+pub fn lsx_vpackev_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpackev_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpackev_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vpackev_w(a, b) }
+pub fn lsx_vpackev_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpackev_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpackev_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vpackev_d(a, b) }
+pub fn lsx_vpackev_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpackev_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpackod_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vpackod_b(a, b) }
+pub fn lsx_vpackod_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpackod_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpackod_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vpackod_h(a, b) }
+pub fn lsx_vpackod_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpackod_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpackod_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vpackod_w(a, b) }
+pub fn lsx_vpackod_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpackod_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpackod_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vpackod_d(a, b) }
+pub fn lsx_vpackod_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpackod_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vshuf_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 {
-    unsafe { __lsx_vshuf_h(a, b, c) }
+pub fn lsx_vshuf_h(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vshuf_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vshuf_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 {
-    unsafe { __lsx_vshuf_w(a, b, c) }
+pub fn lsx_vshuf_w(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vshuf_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vshuf_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 {
-    unsafe { __lsx_vshuf_d(a, b, c) }
+pub fn lsx_vshuf_d(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vshuf_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vand_v(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vand_v(a, b) }
+pub fn lsx_vand_v(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vand_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vandi_b<const IMM8: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vandi_b<const IMM8: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vandi_b(a, IMM8) }
+    unsafe { transmute(__lsx_vandi_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vor_v(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vor_v(a, b) }
+pub fn lsx_vor_v(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vor_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vori_b<const IMM8: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vori_b<const IMM8: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vori_b(a, IMM8) }
+    unsafe { transmute(__lsx_vori_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vnor_v(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vnor_v(a, b) }
+pub fn lsx_vnor_v(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vnor_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vnori_b<const IMM8: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vnori_b<const IMM8: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vnori_b(a, IMM8) }
+    unsafe { transmute(__lsx_vnori_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vxor_v(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vxor_v(a, b) }
+pub fn lsx_vxor_v(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vxor_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vxori_b<const IMM8: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vxori_b<const IMM8: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vxori_b(a, IMM8) }
+    unsafe { transmute(__lsx_vxori_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitsel_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 {
-    unsafe { __lsx_vbitsel_v(a, b, c) }
+pub fn lsx_vbitsel_v(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitsel_v(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitseli_b<const IMM8: u32>(a: v16u8, b: v16u8) -> v16u8 {
+pub fn lsx_vbitseli_b<const IMM8: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vbitseli_b(a, b, IMM8) }
+    unsafe { transmute(__lsx_vbitseli_b(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vshuf4i_b<const IMM8: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vshuf4i_b<const IMM8: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vshuf4i_b(a, IMM8) }
+    unsafe { transmute(__lsx_vshuf4i_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vshuf4i_h<const IMM8: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vshuf4i_h<const IMM8: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vshuf4i_h(a, IMM8) }
+    unsafe { transmute(__lsx_vshuf4i_h(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vshuf4i_w<const IMM8: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vshuf4i_w<const IMM8: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vshuf4i_w(a, IMM8) }
+    unsafe { transmute(__lsx_vshuf4i_w(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplgr2vr_b(a: i32) -> v16i8 {
-    unsafe { __lsx_vreplgr2vr_b(a) }
+pub fn lsx_vreplgr2vr_b(a: i32) -> m128i {
+    unsafe { transmute(__lsx_vreplgr2vr_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplgr2vr_h(a: i32) -> v8i16 {
-    unsafe { __lsx_vreplgr2vr_h(a) }
+pub fn lsx_vreplgr2vr_h(a: i32) -> m128i {
+    unsafe { transmute(__lsx_vreplgr2vr_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplgr2vr_w(a: i32) -> v4i32 {
-    unsafe { __lsx_vreplgr2vr_w(a) }
+pub fn lsx_vreplgr2vr_w(a: i32) -> m128i {
+    unsafe { transmute(__lsx_vreplgr2vr_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplgr2vr_d(a: i64) -> v2i64 {
-    unsafe { __lsx_vreplgr2vr_d(a) }
+pub fn lsx_vreplgr2vr_d(a: i64) -> m128i {
+    unsafe { transmute(__lsx_vreplgr2vr_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpcnt_b(a: v16i8) -> v16i8 {
-    unsafe { __lsx_vpcnt_b(a) }
+pub fn lsx_vpcnt_b(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpcnt_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpcnt_h(a: v8i16) -> v8i16 {
-    unsafe { __lsx_vpcnt_h(a) }
+pub fn lsx_vpcnt_h(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpcnt_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpcnt_w(a: v4i32) -> v4i32 {
-    unsafe { __lsx_vpcnt_w(a) }
+pub fn lsx_vpcnt_w(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpcnt_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpcnt_d(a: v2i64) -> v2i64 {
-    unsafe { __lsx_vpcnt_d(a) }
+pub fn lsx_vpcnt_d(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpcnt_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vclo_b(a: v16i8) -> v16i8 {
-    unsafe { __lsx_vclo_b(a) }
+pub fn lsx_vclo_b(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vclo_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vclo_h(a: v8i16) -> v8i16 {
-    unsafe { __lsx_vclo_h(a) }
+pub fn lsx_vclo_h(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vclo_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vclo_w(a: v4i32) -> v4i32 {
-    unsafe { __lsx_vclo_w(a) }
+pub fn lsx_vclo_w(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vclo_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vclo_d(a: v2i64) -> v2i64 {
-    unsafe { __lsx_vclo_d(a) }
+pub fn lsx_vclo_d(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vclo_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vclz_b(a: v16i8) -> v16i8 {
-    unsafe { __lsx_vclz_b(a) }
+pub fn lsx_vclz_b(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vclz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vclz_h(a: v8i16) -> v8i16 {
-    unsafe { __lsx_vclz_h(a) }
+pub fn lsx_vclz_h(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vclz_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vclz_w(a: v4i32) -> v4i32 {
-    unsafe { __lsx_vclz_w(a) }
+pub fn lsx_vclz_w(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vclz_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vclz_d(a: v2i64) -> v2i64 {
-    unsafe { __lsx_vclz_d(a) }
+pub fn lsx_vclz_d(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vclz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickve2gr_b<const IMM4: u32>(a: v16i8) -> i32 {
+pub fn lsx_vpickve2gr_b<const IMM4: u32>(a: m128i) -> i32 {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vpickve2gr_b(a, IMM4) }
+    unsafe { transmute(__lsx_vpickve2gr_b(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickve2gr_h<const IMM3: u32>(a: v8i16) -> i32 {
+pub fn lsx_vpickve2gr_h<const IMM3: u32>(a: m128i) -> i32 {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vpickve2gr_h(a, IMM3) }
+    unsafe { transmute(__lsx_vpickve2gr_h(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickve2gr_w<const IMM2: u32>(a: v4i32) -> i32 {
+pub fn lsx_vpickve2gr_w<const IMM2: u32>(a: m128i) -> i32 {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lsx_vpickve2gr_w(a, IMM2) }
+    unsafe { transmute(__lsx_vpickve2gr_w(transmute(a), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickve2gr_d<const IMM1: u32>(a: v2i64) -> i64 {
+pub fn lsx_vpickve2gr_d<const IMM1: u32>(a: m128i) -> i64 {
     static_assert_uimm_bits!(IMM1, 1);
-    unsafe { __lsx_vpickve2gr_d(a, IMM1) }
+    unsafe { transmute(__lsx_vpickve2gr_d(transmute(a), IMM1)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickve2gr_bu<const IMM4: u32>(a: v16i8) -> u32 {
+pub fn lsx_vpickve2gr_bu<const IMM4: u32>(a: m128i) -> u32 {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vpickve2gr_bu(a, IMM4) }
+    unsafe { transmute(__lsx_vpickve2gr_bu(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickve2gr_hu<const IMM3: u32>(a: v8i16) -> u32 {
+pub fn lsx_vpickve2gr_hu<const IMM3: u32>(a: m128i) -> u32 {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vpickve2gr_hu(a, IMM3) }
+    unsafe { transmute(__lsx_vpickve2gr_hu(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickve2gr_wu<const IMM2: u32>(a: v4i32) -> u32 {
+pub fn lsx_vpickve2gr_wu<const IMM2: u32>(a: m128i) -> u32 {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lsx_vpickve2gr_wu(a, IMM2) }
+    unsafe { transmute(__lsx_vpickve2gr_wu(transmute(a), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickve2gr_du<const IMM1: u32>(a: v2i64) -> u64 {
+pub fn lsx_vpickve2gr_du<const IMM1: u32>(a: m128i) -> u64 {
     static_assert_uimm_bits!(IMM1, 1);
-    unsafe { __lsx_vpickve2gr_du(a, IMM1) }
+    unsafe { transmute(__lsx_vpickve2gr_du(transmute(a), IMM1)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vinsgr2vr_b<const IMM4: u32>(a: v16i8, b: i32) -> v16i8 {
+pub fn lsx_vinsgr2vr_b<const IMM4: u32>(a: m128i, b: i32) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vinsgr2vr_b(a, b, IMM4) }
+    unsafe { transmute(__lsx_vinsgr2vr_b(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vinsgr2vr_h<const IMM3: u32>(a: v8i16, b: i32) -> v8i16 {
+pub fn lsx_vinsgr2vr_h<const IMM3: u32>(a: m128i, b: i32) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vinsgr2vr_h(a, b, IMM3) }
+    unsafe { transmute(__lsx_vinsgr2vr_h(transmute(a), transmute(b), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vinsgr2vr_w<const IMM2: u32>(a: v4i32, b: i32) -> v4i32 {
+pub fn lsx_vinsgr2vr_w<const IMM2: u32>(a: m128i, b: i32) -> m128i {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lsx_vinsgr2vr_w(a, b, IMM2) }
+    unsafe { transmute(__lsx_vinsgr2vr_w(transmute(a), transmute(b), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vinsgr2vr_d<const IMM1: u32>(a: v2i64, b: i64) -> v2i64 {
+pub fn lsx_vinsgr2vr_d<const IMM1: u32>(a: m128i, b: i64) -> m128i {
     static_assert_uimm_bits!(IMM1, 1);
-    unsafe { __lsx_vinsgr2vr_d(a, b, IMM1) }
+    unsafe { transmute(__lsx_vinsgr2vr_d(transmute(a), transmute(b), IMM1)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfadd_s(a: v4f32, b: v4f32) -> v4f32 {
-    unsafe { __lsx_vfadd_s(a, b) }
+pub fn lsx_vfadd_s(a: m128, b: m128) -> m128 {
+    unsafe { transmute(__lsx_vfadd_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfadd_d(a: v2f64, b: v2f64) -> v2f64 {
-    unsafe { __lsx_vfadd_d(a, b) }
+pub fn lsx_vfadd_d(a: m128d, b: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfadd_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfsub_s(a: v4f32, b: v4f32) -> v4f32 {
-    unsafe { __lsx_vfsub_s(a, b) }
+pub fn lsx_vfsub_s(a: m128, b: m128) -> m128 {
+    unsafe { transmute(__lsx_vfsub_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfsub_d(a: v2f64, b: v2f64) -> v2f64 {
-    unsafe { __lsx_vfsub_d(a, b) }
+pub fn lsx_vfsub_d(a: m128d, b: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfsub_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmul_s(a: v4f32, b: v4f32) -> v4f32 {
-    unsafe { __lsx_vfmul_s(a, b) }
+pub fn lsx_vfmul_s(a: m128, b: m128) -> m128 {
+    unsafe { transmute(__lsx_vfmul_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmul_d(a: v2f64, b: v2f64) -> v2f64 {
-    unsafe { __lsx_vfmul_d(a, b) }
+pub fn lsx_vfmul_d(a: m128d, b: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfmul_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfdiv_s(a: v4f32, b: v4f32) -> v4f32 {
-    unsafe { __lsx_vfdiv_s(a, b) }
+pub fn lsx_vfdiv_s(a: m128, b: m128) -> m128 {
+    unsafe { transmute(__lsx_vfdiv_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfdiv_d(a: v2f64, b: v2f64) -> v2f64 {
-    unsafe { __lsx_vfdiv_d(a, b) }
+pub fn lsx_vfdiv_d(a: m128d, b: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfdiv_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcvt_h_s(a: v4f32, b: v4f32) -> v8i16 {
-    unsafe { __lsx_vfcvt_h_s(a, b) }
+pub fn lsx_vfcvt_h_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcvt_h_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcvt_s_d(a: v2f64, b: v2f64) -> v4f32 {
-    unsafe { __lsx_vfcvt_s_d(a, b) }
+pub fn lsx_vfcvt_s_d(a: m128d, b: m128d) -> m128 {
+    unsafe { transmute(__lsx_vfcvt_s_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmin_s(a: v4f32, b: v4f32) -> v4f32 {
-    unsafe { __lsx_vfmin_s(a, b) }
+pub fn lsx_vfmin_s(a: m128, b: m128) -> m128 {
+    unsafe { transmute(__lsx_vfmin_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmin_d(a: v2f64, b: v2f64) -> v2f64 {
-    unsafe { __lsx_vfmin_d(a, b) }
+pub fn lsx_vfmin_d(a: m128d, b: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfmin_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmina_s(a: v4f32, b: v4f32) -> v4f32 {
-    unsafe { __lsx_vfmina_s(a, b) }
+pub fn lsx_vfmina_s(a: m128, b: m128) -> m128 {
+    unsafe { transmute(__lsx_vfmina_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmina_d(a: v2f64, b: v2f64) -> v2f64 {
-    unsafe { __lsx_vfmina_d(a, b) }
+pub fn lsx_vfmina_d(a: m128d, b: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfmina_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmax_s(a: v4f32, b: v4f32) -> v4f32 {
-    unsafe { __lsx_vfmax_s(a, b) }
+pub fn lsx_vfmax_s(a: m128, b: m128) -> m128 {
+    unsafe { transmute(__lsx_vfmax_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmax_d(a: v2f64, b: v2f64) -> v2f64 {
-    unsafe { __lsx_vfmax_d(a, b) }
+pub fn lsx_vfmax_d(a: m128d, b: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfmax_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmaxa_s(a: v4f32, b: v4f32) -> v4f32 {
-    unsafe { __lsx_vfmaxa_s(a, b) }
+pub fn lsx_vfmaxa_s(a: m128, b: m128) -> m128 {
+    unsafe { transmute(__lsx_vfmaxa_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmaxa_d(a: v2f64, b: v2f64) -> v2f64 {
-    unsafe { __lsx_vfmaxa_d(a, b) }
+pub fn lsx_vfmaxa_d(a: m128d, b: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfmaxa_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfclass_s(a: v4f32) -> v4i32 {
-    unsafe { __lsx_vfclass_s(a) }
+pub fn lsx_vfclass_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vfclass_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfclass_d(a: v2f64) -> v2i64 {
-    unsafe { __lsx_vfclass_d(a) }
+pub fn lsx_vfclass_d(a: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfclass_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfsqrt_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfsqrt_s(a) }
+pub fn lsx_vfsqrt_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfsqrt_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfsqrt_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfsqrt_d(a) }
+pub fn lsx_vfsqrt_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfsqrt_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrecip_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfrecip_s(a) }
+pub fn lsx_vfrecip_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfrecip_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrecip_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfrecip_d(a) }
+pub fn lsx_vfrecip_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfrecip_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx,frecipe")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrecipe_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfrecipe_s(a) }
+pub fn lsx_vfrecipe_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfrecipe_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx,frecipe")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrecipe_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfrecipe_d(a) }
+pub fn lsx_vfrecipe_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfrecipe_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx,frecipe")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrsqrte_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfrsqrte_s(a) }
+pub fn lsx_vfrsqrte_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfrsqrte_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx,frecipe")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrsqrte_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfrsqrte_d(a) }
+pub fn lsx_vfrsqrte_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfrsqrte_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrint_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfrint_s(a) }
+pub fn lsx_vfrint_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfrint_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrint_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfrint_d(a) }
+pub fn lsx_vfrint_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfrint_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrsqrt_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfrsqrt_s(a) }
+pub fn lsx_vfrsqrt_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfrsqrt_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrsqrt_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfrsqrt_d(a) }
+pub fn lsx_vfrsqrt_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfrsqrt_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vflogb_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vflogb_s(a) }
+pub fn lsx_vflogb_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vflogb_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vflogb_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vflogb_d(a) }
+pub fn lsx_vflogb_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vflogb_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcvth_s_h(a: v8i16) -> v4f32 {
-    unsafe { __lsx_vfcvth_s_h(a) }
+pub fn lsx_vfcvth_s_h(a: m128i) -> m128 {
+    unsafe { transmute(__lsx_vfcvth_s_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcvth_d_s(a: v4f32) -> v2f64 {
-    unsafe { __lsx_vfcvth_d_s(a) }
+pub fn lsx_vfcvth_d_s(a: m128) -> m128d {
+    unsafe { transmute(__lsx_vfcvth_d_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcvtl_s_h(a: v8i16) -> v4f32 {
-    unsafe { __lsx_vfcvtl_s_h(a) }
+pub fn lsx_vfcvtl_s_h(a: m128i) -> m128 {
+    unsafe { transmute(__lsx_vfcvtl_s_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcvtl_d_s(a: v4f32) -> v2f64 {
-    unsafe { __lsx_vfcvtl_d_s(a) }
+pub fn lsx_vfcvtl_d_s(a: m128) -> m128d {
+    unsafe { transmute(__lsx_vfcvtl_d_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftint_w_s(a: v4f32) -> v4i32 {
-    unsafe { __lsx_vftint_w_s(a) }
+pub fn lsx_vftint_w_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftint_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftint_l_d(a: v2f64) -> v2i64 {
-    unsafe { __lsx_vftint_l_d(a) }
+pub fn lsx_vftint_l_d(a: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftint_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftint_wu_s(a: v4f32) -> v4u32 {
-    unsafe { __lsx_vftint_wu_s(a) }
+pub fn lsx_vftint_wu_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftint_wu_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftint_lu_d(a: v2f64) -> v2u64 {
-    unsafe { __lsx_vftint_lu_d(a) }
+pub fn lsx_vftint_lu_d(a: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftint_lu_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrz_w_s(a: v4f32) -> v4i32 {
-    unsafe { __lsx_vftintrz_w_s(a) }
+pub fn lsx_vftintrz_w_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrz_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrz_l_d(a: v2f64) -> v2i64 {
-    unsafe { __lsx_vftintrz_l_d(a) }
+pub fn lsx_vftintrz_l_d(a: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftintrz_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrz_wu_s(a: v4f32) -> v4u32 {
-    unsafe { __lsx_vftintrz_wu_s(a) }
+pub fn lsx_vftintrz_wu_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrz_wu_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrz_lu_d(a: v2f64) -> v2u64 {
-    unsafe { __lsx_vftintrz_lu_d(a) }
+pub fn lsx_vftintrz_lu_d(a: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftintrz_lu_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vffint_s_w(a: v4i32) -> v4f32 {
-    unsafe { __lsx_vffint_s_w(a) }
+pub fn lsx_vffint_s_w(a: m128i) -> m128 {
+    unsafe { transmute(__lsx_vffint_s_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vffint_d_l(a: v2i64) -> v2f64 {
-    unsafe { __lsx_vffint_d_l(a) }
+pub fn lsx_vffint_d_l(a: m128i) -> m128d {
+    unsafe { transmute(__lsx_vffint_d_l(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vffint_s_wu(a: v4u32) -> v4f32 {
-    unsafe { __lsx_vffint_s_wu(a) }
+pub fn lsx_vffint_s_wu(a: m128i) -> m128 {
+    unsafe { transmute(__lsx_vffint_s_wu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vffint_d_lu(a: v2u64) -> v2f64 {
-    unsafe { __lsx_vffint_d_lu(a) }
+pub fn lsx_vffint_d_lu(a: m128i) -> m128d {
+    unsafe { transmute(__lsx_vffint_d_lu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vandn_v(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vandn_v(a, b) }
+pub fn lsx_vandn_v(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vandn_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vneg_b(a: v16i8) -> v16i8 {
-    unsafe { __lsx_vneg_b(a) }
+pub fn lsx_vneg_b(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vneg_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vneg_h(a: v8i16) -> v8i16 {
-    unsafe { __lsx_vneg_h(a) }
+pub fn lsx_vneg_h(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vneg_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vneg_w(a: v4i32) -> v4i32 {
-    unsafe { __lsx_vneg_w(a) }
+pub fn lsx_vneg_w(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vneg_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vneg_d(a: v2i64) -> v2i64 {
-    unsafe { __lsx_vneg_d(a) }
+pub fn lsx_vneg_d(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vneg_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmuh_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vmuh_b(a, b) }
+pub fn lsx_vmuh_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmuh_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmuh_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vmuh_h(a, b) }
+pub fn lsx_vmuh_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmuh_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmuh_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vmuh_w(a, b) }
+pub fn lsx_vmuh_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmuh_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmuh_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vmuh_d(a, b) }
+pub fn lsx_vmuh_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmuh_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmuh_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vmuh_bu(a, b) }
+pub fn lsx_vmuh_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmuh_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmuh_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vmuh_hu(a, b) }
+pub fn lsx_vmuh_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmuh_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmuh_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vmuh_wu(a, b) }
+pub fn lsx_vmuh_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmuh_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmuh_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vmuh_du(a, b) }
+pub fn lsx_vmuh_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmuh_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsllwil_h_b<const IMM3: u32>(a: v16i8) -> v8i16 {
+pub fn lsx_vsllwil_h_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vsllwil_h_b(a, IMM3) }
+    unsafe { transmute(__lsx_vsllwil_h_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsllwil_w_h<const IMM4: u32>(a: v8i16) -> v4i32 {
+pub fn lsx_vsllwil_w_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsllwil_w_h(a, IMM4) }
+    unsafe { transmute(__lsx_vsllwil_w_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsllwil_d_w<const IMM5: u32>(a: v4i32) -> v2i64 {
+pub fn lsx_vsllwil_d_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsllwil_d_w(a, IMM5) }
+    unsafe { transmute(__lsx_vsllwil_d_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsllwil_hu_bu<const IMM3: u32>(a: v16u8) -> v8u16 {
+pub fn lsx_vsllwil_hu_bu<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vsllwil_hu_bu(a, IMM3) }
+    unsafe { transmute(__lsx_vsllwil_hu_bu(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsllwil_wu_hu<const IMM4: u32>(a: v8u16) -> v4u32 {
+pub fn lsx_vsllwil_wu_hu<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsllwil_wu_hu(a, IMM4) }
+    unsafe { transmute(__lsx_vsllwil_wu_hu(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsllwil_du_wu<const IMM5: u32>(a: v4u32) -> v2u64 {
+pub fn lsx_vsllwil_du_wu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsllwil_du_wu(a, IMM5) }
+    unsafe { transmute(__lsx_vsllwil_du_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsran_b_h(a: v8i16, b: v8i16) -> v16i8 {
-    unsafe { __lsx_vsran_b_h(a, b) }
+pub fn lsx_vsran_b_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsran_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsran_h_w(a: v4i32, b: v4i32) -> v8i16 {
-    unsafe { __lsx_vsran_h_w(a, b) }
+pub fn lsx_vsran_h_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsran_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsran_w_d(a: v2i64, b: v2i64) -> v4i32 {
-    unsafe { __lsx_vsran_w_d(a, b) }
+pub fn lsx_vsran_w_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsran_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssran_b_h(a: v8i16, b: v8i16) -> v16i8 {
-    unsafe { __lsx_vssran_b_h(a, b) }
+pub fn lsx_vssran_b_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssran_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssran_h_w(a: v4i32, b: v4i32) -> v8i16 {
-    unsafe { __lsx_vssran_h_w(a, b) }
+pub fn lsx_vssran_h_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssran_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssran_w_d(a: v2i64, b: v2i64) -> v4i32 {
-    unsafe { __lsx_vssran_w_d(a, b) }
+pub fn lsx_vssran_w_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssran_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssran_bu_h(a: v8u16, b: v8u16) -> v16u8 {
-    unsafe { __lsx_vssran_bu_h(a, b) }
+pub fn lsx_vssran_bu_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssran_bu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssran_hu_w(a: v4u32, b: v4u32) -> v8u16 {
-    unsafe { __lsx_vssran_hu_w(a, b) }
+pub fn lsx_vssran_hu_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssran_hu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssran_wu_d(a: v2u64, b: v2u64) -> v4u32 {
-    unsafe { __lsx_vssran_wu_d(a, b) }
+pub fn lsx_vssran_wu_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssran_wu_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrarn_b_h(a: v8i16, b: v8i16) -> v16i8 {
-    unsafe { __lsx_vsrarn_b_h(a, b) }
+pub fn lsx_vsrarn_b_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrarn_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrarn_h_w(a: v4i32, b: v4i32) -> v8i16 {
-    unsafe { __lsx_vsrarn_h_w(a, b) }
+pub fn lsx_vsrarn_h_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrarn_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrarn_w_d(a: v2i64, b: v2i64) -> v4i32 {
-    unsafe { __lsx_vsrarn_w_d(a, b) }
+pub fn lsx_vsrarn_w_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrarn_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarn_b_h(a: v8i16, b: v8i16) -> v16i8 {
-    unsafe { __lsx_vssrarn_b_h(a, b) }
+pub fn lsx_vssrarn_b_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrarn_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarn_h_w(a: v4i32, b: v4i32) -> v8i16 {
-    unsafe { __lsx_vssrarn_h_w(a, b) }
+pub fn lsx_vssrarn_h_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrarn_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarn_w_d(a: v2i64, b: v2i64) -> v4i32 {
-    unsafe { __lsx_vssrarn_w_d(a, b) }
+pub fn lsx_vssrarn_w_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrarn_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarn_bu_h(a: v8u16, b: v8u16) -> v16u8 {
-    unsafe { __lsx_vssrarn_bu_h(a, b) }
+pub fn lsx_vssrarn_bu_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrarn_bu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarn_hu_w(a: v4u32, b: v4u32) -> v8u16 {
-    unsafe { __lsx_vssrarn_hu_w(a, b) }
+pub fn lsx_vssrarn_hu_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrarn_hu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarn_wu_d(a: v2u64, b: v2u64) -> v4u32 {
-    unsafe { __lsx_vssrarn_wu_d(a, b) }
+pub fn lsx_vssrarn_wu_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrarn_wu_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrln_b_h(a: v8i16, b: v8i16) -> v16i8 {
-    unsafe { __lsx_vsrln_b_h(a, b) }
+pub fn lsx_vsrln_b_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrln_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrln_h_w(a: v4i32, b: v4i32) -> v8i16 {
-    unsafe { __lsx_vsrln_h_w(a, b) }
+pub fn lsx_vsrln_h_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrln_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrln_w_d(a: v2i64, b: v2i64) -> v4i32 {
-    unsafe { __lsx_vsrln_w_d(a, b) }
+pub fn lsx_vsrln_w_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrln_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrln_bu_h(a: v8u16, b: v8u16) -> v16u8 {
-    unsafe { __lsx_vssrln_bu_h(a, b) }
+pub fn lsx_vssrln_bu_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrln_bu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrln_hu_w(a: v4u32, b: v4u32) -> v8u16 {
-    unsafe { __lsx_vssrln_hu_w(a, b) }
+pub fn lsx_vssrln_hu_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrln_hu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrln_wu_d(a: v2u64, b: v2u64) -> v4u32 {
-    unsafe { __lsx_vssrln_wu_d(a, b) }
+pub fn lsx_vssrln_wu_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrln_wu_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlrn_b_h(a: v8i16, b: v8i16) -> v16i8 {
-    unsafe { __lsx_vsrlrn_b_h(a, b) }
+pub fn lsx_vsrlrn_b_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrlrn_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlrn_h_w(a: v4i32, b: v4i32) -> v8i16 {
-    unsafe { __lsx_vsrlrn_h_w(a, b) }
+pub fn lsx_vsrlrn_h_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrlrn_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlrn_w_d(a: v2i64, b: v2i64) -> v4i32 {
-    unsafe { __lsx_vsrlrn_w_d(a, b) }
+pub fn lsx_vsrlrn_w_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrlrn_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrn_bu_h(a: v8u16, b: v8u16) -> v16u8 {
-    unsafe { __lsx_vssrlrn_bu_h(a, b) }
+pub fn lsx_vssrlrn_bu_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrlrn_bu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrn_hu_w(a: v4u32, b: v4u32) -> v8u16 {
-    unsafe { __lsx_vssrlrn_hu_w(a, b) }
+pub fn lsx_vssrlrn_hu_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrlrn_hu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrn_wu_d(a: v2u64, b: v2u64) -> v4u32 {
-    unsafe { __lsx_vssrlrn_wu_d(a, b) }
+pub fn lsx_vssrlrn_wu_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrlrn_wu_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrstpi_b<const IMM5: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vfrstpi_b<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vfrstpi_b(a, b, IMM5) }
+    unsafe { transmute(__lsx_vfrstpi_b(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrstpi_h<const IMM5: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vfrstpi_h<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vfrstpi_h(a, b, IMM5) }
+    unsafe { transmute(__lsx_vfrstpi_h(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrstp_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 {
-    unsafe { __lsx_vfrstp_b(a, b, c) }
+pub fn lsx_vfrstp_b(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vfrstp_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrstp_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 {
-    unsafe { __lsx_vfrstp_h(a, b, c) }
+pub fn lsx_vfrstp_h(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vfrstp_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vshuf4i_d<const IMM8: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vshuf4i_d<const IMM8: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vshuf4i_d(a, b, IMM8) }
+    unsafe { transmute(__lsx_vshuf4i_d(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbsrl_v<const IMM5: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vbsrl_v<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vbsrl_v(a, IMM5) }
+    unsafe { transmute(__lsx_vbsrl_v(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbsll_v<const IMM5: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vbsll_v<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vbsll_v(a, IMM5) }
+    unsafe { transmute(__lsx_vbsll_v(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vextrins_b<const IMM8: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vextrins_b<const IMM8: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vextrins_b(a, b, IMM8) }
+    unsafe { transmute(__lsx_vextrins_b(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vextrins_h<const IMM8: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vextrins_h<const IMM8: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vextrins_h(a, b, IMM8) }
+    unsafe { transmute(__lsx_vextrins_h(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vextrins_w<const IMM8: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vextrins_w<const IMM8: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vextrins_w(a, b, IMM8) }
+    unsafe { transmute(__lsx_vextrins_w(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vextrins_d<const IMM8: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vextrins_d<const IMM8: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vextrins_d(a, b, IMM8) }
+    unsafe { transmute(__lsx_vextrins_d(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmskltz_b(a: v16i8) -> v16i8 {
-    unsafe { __lsx_vmskltz_b(a) }
+pub fn lsx_vmskltz_b(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmskltz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmskltz_h(a: v8i16) -> v8i16 {
-    unsafe { __lsx_vmskltz_h(a) }
+pub fn lsx_vmskltz_h(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmskltz_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmskltz_w(a: v4i32) -> v4i32 {
-    unsafe { __lsx_vmskltz_w(a) }
+pub fn lsx_vmskltz_w(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmskltz_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmskltz_d(a: v2i64) -> v2i64 {
-    unsafe { __lsx_vmskltz_d(a) }
+pub fn lsx_vmskltz_d(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmskltz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsigncov_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vsigncov_b(a, b) }
+pub fn lsx_vsigncov_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsigncov_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsigncov_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vsigncov_h(a, b) }
+pub fn lsx_vsigncov_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsigncov_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsigncov_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vsigncov_w(a, b) }
+pub fn lsx_vsigncov_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsigncov_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsigncov_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsigncov_d(a, b) }
+pub fn lsx_vsigncov_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsigncov_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmadd_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 {
-    unsafe { __lsx_vfmadd_s(a, b, c) }
+pub fn lsx_vfmadd_s(a: m128, b: m128, c: m128) -> m128 {
+    unsafe { transmute(__lsx_vfmadd_s(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 {
-    unsafe { __lsx_vfmadd_d(a, b, c) }
+pub fn lsx_vfmadd_d(a: m128d, b: m128d, c: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfmadd_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmsub_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 {
-    unsafe { __lsx_vfmsub_s(a, b, c) }
+pub fn lsx_vfmsub_s(a: m128, b: m128, c: m128) -> m128 {
+    unsafe { transmute(__lsx_vfmsub_s(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 {
-    unsafe { __lsx_vfmsub_d(a, b, c) }
+pub fn lsx_vfmsub_d(a: m128d, b: m128d, c: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfmsub_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfnmadd_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 {
-    unsafe { __lsx_vfnmadd_s(a, b, c) }
+pub fn lsx_vfnmadd_s(a: m128, b: m128, c: m128) -> m128 {
+    unsafe { transmute(__lsx_vfnmadd_s(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfnmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 {
-    unsafe { __lsx_vfnmadd_d(a, b, c) }
+pub fn lsx_vfnmadd_d(a: m128d, b: m128d, c: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfnmadd_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfnmsub_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 {
-    unsafe { __lsx_vfnmsub_s(a, b, c) }
+pub fn lsx_vfnmsub_s(a: m128, b: m128, c: m128) -> m128 {
+    unsafe { transmute(__lsx_vfnmsub_s(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfnmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 {
-    unsafe { __lsx_vfnmsub_d(a, b, c) }
+pub fn lsx_vfnmsub_d(a: m128d, b: m128d, c: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfnmsub_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrne_w_s(a: v4f32) -> v4i32 {
-    unsafe { __lsx_vftintrne_w_s(a) }
+pub fn lsx_vftintrne_w_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrne_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrne_l_d(a: v2f64) -> v2i64 {
-    unsafe { __lsx_vftintrne_l_d(a) }
+pub fn lsx_vftintrne_l_d(a: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftintrne_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrp_w_s(a: v4f32) -> v4i32 {
-    unsafe { __lsx_vftintrp_w_s(a) }
+pub fn lsx_vftintrp_w_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrp_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrp_l_d(a: v2f64) -> v2i64 {
-    unsafe { __lsx_vftintrp_l_d(a) }
+pub fn lsx_vftintrp_l_d(a: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftintrp_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrm_w_s(a: v4f32) -> v4i32 {
-    unsafe { __lsx_vftintrm_w_s(a) }
+pub fn lsx_vftintrm_w_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrm_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrm_l_d(a: v2f64) -> v2i64 {
-    unsafe { __lsx_vftintrm_l_d(a) }
+pub fn lsx_vftintrm_l_d(a: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftintrm_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftint_w_d(a: v2f64, b: v2f64) -> v4i32 {
-    unsafe { __lsx_vftint_w_d(a, b) }
+pub fn lsx_vftint_w_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftint_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vffint_s_l(a: v2i64, b: v2i64) -> v4f32 {
-    unsafe { __lsx_vffint_s_l(a, b) }
+pub fn lsx_vffint_s_l(a: m128i, b: m128i) -> m128 {
+    unsafe { transmute(__lsx_vffint_s_l(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrz_w_d(a: v2f64, b: v2f64) -> v4i32 {
-    unsafe { __lsx_vftintrz_w_d(a, b) }
+pub fn lsx_vftintrz_w_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftintrz_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrp_w_d(a: v2f64, b: v2f64) -> v4i32 {
-    unsafe { __lsx_vftintrp_w_d(a, b) }
+pub fn lsx_vftintrp_w_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftintrp_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrm_w_d(a: v2f64, b: v2f64) -> v4i32 {
-    unsafe { __lsx_vftintrm_w_d(a, b) }
+pub fn lsx_vftintrm_w_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftintrm_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrne_w_d(a: v2f64, b: v2f64) -> v4i32 {
-    unsafe { __lsx_vftintrne_w_d(a, b) }
+pub fn lsx_vftintrne_w_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftintrne_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintl_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftintl_l_s(a) }
+pub fn lsx_vftintl_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintl_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftinth_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftinth_l_s(a) }
+pub fn lsx_vftinth_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftinth_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vffinth_d_w(a: v4i32) -> v2f64 {
-    unsafe { __lsx_vffinth_d_w(a) }
+pub fn lsx_vffinth_d_w(a: m128i) -> m128d {
+    unsafe { transmute(__lsx_vffinth_d_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vffintl_d_w(a: v4i32) -> v2f64 {
-    unsafe { __lsx_vffintl_d_w(a) }
+pub fn lsx_vffintl_d_w(a: m128i) -> m128d {
+    unsafe { transmute(__lsx_vffintl_d_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrzl_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftintrzl_l_s(a) }
+pub fn lsx_vftintrzl_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrzl_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrzh_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftintrzh_l_s(a) }
+pub fn lsx_vftintrzh_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrzh_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrpl_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftintrpl_l_s(a) }
+pub fn lsx_vftintrpl_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrpl_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrph_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftintrph_l_s(a) }
+pub fn lsx_vftintrph_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrph_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrml_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftintrml_l_s(a) }
+pub fn lsx_vftintrml_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrml_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrmh_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftintrmh_l_s(a) }
+pub fn lsx_vftintrmh_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrmh_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrnel_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftintrnel_l_s(a) }
+pub fn lsx_vftintrnel_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrnel_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrneh_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftintrneh_l_s(a) }
+pub fn lsx_vftintrneh_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrneh_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrintrne_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfrintrne_s(a) }
+pub fn lsx_vfrintrne_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfrintrne_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrintrne_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfrintrne_d(a) }
+pub fn lsx_vfrintrne_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfrintrne_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrintrz_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfrintrz_s(a) }
+pub fn lsx_vfrintrz_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfrintrz_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrintrz_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfrintrz_d(a) }
+pub fn lsx_vfrintrz_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfrintrz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrintrp_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfrintrp_s(a) }
+pub fn lsx_vfrintrp_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfrintrp_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrintrp_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfrintrp_d(a) }
+pub fn lsx_vfrintrp_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfrintrp_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrintrm_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfrintrm_s(a) }
+pub fn lsx_vfrintrm_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfrintrm_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrintrm_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfrintrm_d(a) }
+pub fn lsx_vfrintrm_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfrintrm_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2, 3)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vstelm_b<const IMM_S8: i32, const IMM4: u32>(a: v16i8, mem_addr: *mut i8) {
+pub unsafe fn lsx_vstelm_b<const IMM_S8: i32, const IMM4: u32>(a: m128i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S8, 8);
     static_assert_uimm_bits!(IMM4, 4);
-    __lsx_vstelm_b(a, mem_addr, IMM_S8, IMM4)
+    transmute(__lsx_vstelm_b(transmute(a), mem_addr, IMM_S8, IMM4))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2, 3)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vstelm_h<const IMM_S8: i32, const IMM3: u32>(a: v8i16, mem_addr: *mut i8) {
+pub unsafe fn lsx_vstelm_h<const IMM_S8: i32, const IMM3: u32>(a: m128i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S8, 8);
     static_assert_uimm_bits!(IMM3, 3);
-    __lsx_vstelm_h(a, mem_addr, IMM_S8, IMM3)
+    transmute(__lsx_vstelm_h(transmute(a), mem_addr, IMM_S8, IMM3))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2, 3)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vstelm_w<const IMM_S8: i32, const IMM2: u32>(a: v4i32, mem_addr: *mut i8) {
+pub unsafe fn lsx_vstelm_w<const IMM_S8: i32, const IMM2: u32>(a: m128i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S8, 8);
     static_assert_uimm_bits!(IMM2, 2);
-    __lsx_vstelm_w(a, mem_addr, IMM_S8, IMM2)
+    transmute(__lsx_vstelm_w(transmute(a), mem_addr, IMM_S8, IMM2))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2, 3)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vstelm_d<const IMM_S8: i32, const IMM1: u32>(a: v2i64, mem_addr: *mut i8) {
+pub unsafe fn lsx_vstelm_d<const IMM_S8: i32, const IMM1: u32>(a: m128i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S8, 8);
     static_assert_uimm_bits!(IMM1, 1);
-    __lsx_vstelm_d(a, mem_addr, IMM_S8, IMM1)
+    transmute(__lsx_vstelm_d(transmute(a), mem_addr, IMM_S8, IMM1))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_d_w(a: v4i32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vaddwev_d_w(a, b) }
+pub fn lsx_vaddwev_d_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_w_h(a: v8i16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vaddwev_w_h(a, b) }
+pub fn lsx_vaddwev_w_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_h_b(a: v16i8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vaddwev_h_b(a, b) }
+pub fn lsx_vaddwev_h_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_d_w(a: v4i32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vaddwod_d_w(a, b) }
+pub fn lsx_vaddwod_d_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_w_h(a: v8i16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vaddwod_w_h(a, b) }
+pub fn lsx_vaddwod_w_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_h_b(a: v16i8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vaddwod_h_b(a, b) }
+pub fn lsx_vaddwod_h_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_d_wu(a: v4u32, b: v4u32) -> v2i64 {
-    unsafe { __lsx_vaddwev_d_wu(a, b) }
+pub fn lsx_vaddwev_d_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_w_hu(a: v8u16, b: v8u16) -> v4i32 {
-    unsafe { __lsx_vaddwev_w_hu(a, b) }
+pub fn lsx_vaddwev_w_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_h_bu(a: v16u8, b: v16u8) -> v8i16 {
-    unsafe { __lsx_vaddwev_h_bu(a, b) }
+pub fn lsx_vaddwev_h_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_d_wu(a: v4u32, b: v4u32) -> v2i64 {
-    unsafe { __lsx_vaddwod_d_wu(a, b) }
+pub fn lsx_vaddwod_d_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_w_hu(a: v8u16, b: v8u16) -> v4i32 {
-    unsafe { __lsx_vaddwod_w_hu(a, b) }
+pub fn lsx_vaddwod_w_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_h_bu(a: v16u8, b: v16u8) -> v8i16 {
-    unsafe { __lsx_vaddwod_h_bu(a, b) }
+pub fn lsx_vaddwod_h_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_d_wu_w(a: v4u32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vaddwev_d_wu_w(a, b) }
+pub fn lsx_vaddwev_d_wu_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_d_wu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_w_hu_h(a: v8u16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vaddwev_w_hu_h(a, b) }
+pub fn lsx_vaddwev_w_hu_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_w_hu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_h_bu_b(a: v16u8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vaddwev_h_bu_b(a, b) }
+pub fn lsx_vaddwev_h_bu_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_h_bu_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_d_wu_w(a: v4u32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vaddwod_d_wu_w(a, b) }
+pub fn lsx_vaddwod_d_wu_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_d_wu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_w_hu_h(a: v8u16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vaddwod_w_hu_h(a, b) }
+pub fn lsx_vaddwod_w_hu_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_w_hu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_h_bu_b(a: v16u8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vaddwod_h_bu_b(a, b) }
+pub fn lsx_vaddwod_h_bu_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_h_bu_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwev_d_w(a: v4i32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vsubwev_d_w(a, b) }
+pub fn lsx_vsubwev_d_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwev_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwev_w_h(a: v8i16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vsubwev_w_h(a, b) }
+pub fn lsx_vsubwev_w_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwev_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwev_h_b(a: v16i8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vsubwev_h_b(a, b) }
+pub fn lsx_vsubwev_h_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwev_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwod_d_w(a: v4i32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vsubwod_d_w(a, b) }
+pub fn lsx_vsubwod_d_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwod_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwod_w_h(a: v8i16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vsubwod_w_h(a, b) }
+pub fn lsx_vsubwod_w_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwod_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwod_h_b(a: v16i8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vsubwod_h_b(a, b) }
+pub fn lsx_vsubwod_h_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwod_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwev_d_wu(a: v4u32, b: v4u32) -> v2i64 {
-    unsafe { __lsx_vsubwev_d_wu(a, b) }
+pub fn lsx_vsubwev_d_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwev_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwev_w_hu(a: v8u16, b: v8u16) -> v4i32 {
-    unsafe { __lsx_vsubwev_w_hu(a, b) }
+pub fn lsx_vsubwev_w_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwev_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwev_h_bu(a: v16u8, b: v16u8) -> v8i16 {
-    unsafe { __lsx_vsubwev_h_bu(a, b) }
+pub fn lsx_vsubwev_h_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwev_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwod_d_wu(a: v4u32, b: v4u32) -> v2i64 {
-    unsafe { __lsx_vsubwod_d_wu(a, b) }
+pub fn lsx_vsubwod_d_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwod_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwod_w_hu(a: v8u16, b: v8u16) -> v4i32 {
-    unsafe { __lsx_vsubwod_w_hu(a, b) }
+pub fn lsx_vsubwod_w_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwod_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwod_h_bu(a: v16u8, b: v16u8) -> v8i16 {
-    unsafe { __lsx_vsubwod_h_bu(a, b) }
+pub fn lsx_vsubwod_h_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwod_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_q_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vaddwev_q_d(a, b) }
+pub fn lsx_vaddwev_q_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_q_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vaddwod_q_d(a, b) }
+pub fn lsx_vaddwod_q_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_q_du(a: v2u64, b: v2u64) -> v2i64 {
-    unsafe { __lsx_vaddwev_q_du(a, b) }
+pub fn lsx_vaddwev_q_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_q_du(a: v2u64, b: v2u64) -> v2i64 {
-    unsafe { __lsx_vaddwod_q_du(a, b) }
+pub fn lsx_vaddwod_q_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwev_q_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsubwev_q_d(a, b) }
+pub fn lsx_vsubwev_q_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwev_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwod_q_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsubwod_q_d(a, b) }
+pub fn lsx_vsubwod_q_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwod_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwev_q_du(a: v2u64, b: v2u64) -> v2i64 {
-    unsafe { __lsx_vsubwev_q_du(a, b) }
+pub fn lsx_vsubwev_q_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwev_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwod_q_du(a: v2u64, b: v2u64) -> v2i64 {
-    unsafe { __lsx_vsubwod_q_du(a, b) }
+pub fn lsx_vsubwod_q_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwod_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_q_du_d(a: v2u64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vaddwev_q_du_d(a, b) }
+pub fn lsx_vaddwev_q_du_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_q_du_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_q_du_d(a: v2u64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vaddwod_q_du_d(a, b) }
+pub fn lsx_vaddwod_q_du_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_q_du_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_d_w(a: v4i32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vmulwev_d_w(a, b) }
+pub fn lsx_vmulwev_d_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_w_h(a: v8i16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vmulwev_w_h(a, b) }
+pub fn lsx_vmulwev_w_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_h_b(a: v16i8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vmulwev_h_b(a, b) }
+pub fn lsx_vmulwev_h_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_d_w(a: v4i32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vmulwod_d_w(a, b) }
+pub fn lsx_vmulwod_d_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_w_h(a: v8i16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vmulwod_w_h(a, b) }
+pub fn lsx_vmulwod_w_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_h_b(a: v16i8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vmulwod_h_b(a, b) }
+pub fn lsx_vmulwod_h_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_d_wu(a: v4u32, b: v4u32) -> v2i64 {
-    unsafe { __lsx_vmulwev_d_wu(a, b) }
+pub fn lsx_vmulwev_d_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_w_hu(a: v8u16, b: v8u16) -> v4i32 {
-    unsafe { __lsx_vmulwev_w_hu(a, b) }
+pub fn lsx_vmulwev_w_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_h_bu(a: v16u8, b: v16u8) -> v8i16 {
-    unsafe { __lsx_vmulwev_h_bu(a, b) }
+pub fn lsx_vmulwev_h_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_d_wu(a: v4u32, b: v4u32) -> v2i64 {
-    unsafe { __lsx_vmulwod_d_wu(a, b) }
+pub fn lsx_vmulwod_d_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_w_hu(a: v8u16, b: v8u16) -> v4i32 {
-    unsafe { __lsx_vmulwod_w_hu(a, b) }
+pub fn lsx_vmulwod_w_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_h_bu(a: v16u8, b: v16u8) -> v8i16 {
-    unsafe { __lsx_vmulwod_h_bu(a, b) }
+pub fn lsx_vmulwod_h_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_d_wu_w(a: v4u32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vmulwev_d_wu_w(a, b) }
+pub fn lsx_vmulwev_d_wu_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_d_wu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_w_hu_h(a: v8u16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vmulwev_w_hu_h(a, b) }
+pub fn lsx_vmulwev_w_hu_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_w_hu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_h_bu_b(a: v16u8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vmulwev_h_bu_b(a, b) }
+pub fn lsx_vmulwev_h_bu_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_h_bu_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_d_wu_w(a: v4u32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vmulwod_d_wu_w(a, b) }
+pub fn lsx_vmulwod_d_wu_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_d_wu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_w_hu_h(a: v8u16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vmulwod_w_hu_h(a, b) }
+pub fn lsx_vmulwod_w_hu_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_w_hu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_h_bu_b(a: v16u8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vmulwod_h_bu_b(a, b) }
+pub fn lsx_vmulwod_h_bu_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_h_bu_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_q_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vmulwev_q_d(a, b) }
+pub fn lsx_vmulwev_q_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_q_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vmulwod_q_d(a, b) }
+pub fn lsx_vmulwod_q_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_q_du(a: v2u64, b: v2u64) -> v2i64 {
-    unsafe { __lsx_vmulwev_q_du(a, b) }
+pub fn lsx_vmulwev_q_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_q_du(a: v2u64, b: v2u64) -> v2i64 {
-    unsafe { __lsx_vmulwod_q_du(a, b) }
+pub fn lsx_vmulwod_q_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_q_du_d(a: v2u64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vmulwev_q_du_d(a, b) }
+pub fn lsx_vmulwev_q_du_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_q_du_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_q_du_d(a: v2u64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vmulwod_q_du_d(a, b) }
+pub fn lsx_vmulwod_q_du_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_q_du_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhaddw_q_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vhaddw_q_d(a, b) }
+pub fn lsx_vhaddw_q_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhaddw_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhaddw_qu_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vhaddw_qu_du(a, b) }
+pub fn lsx_vhaddw_qu_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhaddw_qu_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhsubw_q_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vhsubw_q_d(a, b) }
+pub fn lsx_vhsubw_q_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhsubw_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhsubw_qu_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vhsubw_qu_du(a, b) }
+pub fn lsx_vhsubw_qu_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhsubw_qu_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_d_w(a: v2i64, b: v4i32, c: v4i32) -> v2i64 {
-    unsafe { __lsx_vmaddwev_d_w(a, b, c) }
+pub fn lsx_vmaddwev_d_w(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_d_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_w_h(a: v4i32, b: v8i16, c: v8i16) -> v4i32 {
-    unsafe { __lsx_vmaddwev_w_h(a, b, c) }
+pub fn lsx_vmaddwev_w_h(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_w_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_h_b(a: v8i16, b: v16i8, c: v16i8) -> v8i16 {
-    unsafe { __lsx_vmaddwev_h_b(a, b, c) }
+pub fn lsx_vmaddwev_h_b(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_h_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_d_wu(a: v2u64, b: v4u32, c: v4u32) -> v2u64 {
-    unsafe { __lsx_vmaddwev_d_wu(a, b, c) }
+pub fn lsx_vmaddwev_d_wu(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_d_wu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_w_hu(a: v4u32, b: v8u16, c: v8u16) -> v4u32 {
-    unsafe { __lsx_vmaddwev_w_hu(a, b, c) }
+pub fn lsx_vmaddwev_w_hu(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_w_hu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_h_bu(a: v8u16, b: v16u8, c: v16u8) -> v8u16 {
-    unsafe { __lsx_vmaddwev_h_bu(a, b, c) }
+pub fn lsx_vmaddwev_h_bu(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_h_bu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_d_w(a: v2i64, b: v4i32, c: v4i32) -> v2i64 {
-    unsafe { __lsx_vmaddwod_d_w(a, b, c) }
+pub fn lsx_vmaddwod_d_w(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_d_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_w_h(a: v4i32, b: v8i16, c: v8i16) -> v4i32 {
-    unsafe { __lsx_vmaddwod_w_h(a, b, c) }
+pub fn lsx_vmaddwod_w_h(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_w_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_h_b(a: v8i16, b: v16i8, c: v16i8) -> v8i16 {
-    unsafe { __lsx_vmaddwod_h_b(a, b, c) }
+pub fn lsx_vmaddwod_h_b(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_h_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_d_wu(a: v2u64, b: v4u32, c: v4u32) -> v2u64 {
-    unsafe { __lsx_vmaddwod_d_wu(a, b, c) }
+pub fn lsx_vmaddwod_d_wu(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_d_wu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_w_hu(a: v4u32, b: v8u16, c: v8u16) -> v4u32 {
-    unsafe { __lsx_vmaddwod_w_hu(a, b, c) }
+pub fn lsx_vmaddwod_w_hu(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_w_hu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_h_bu(a: v8u16, b: v16u8, c: v16u8) -> v8u16 {
-    unsafe { __lsx_vmaddwod_h_bu(a, b, c) }
+pub fn lsx_vmaddwod_h_bu(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_h_bu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_d_wu_w(a: v2i64, b: v4u32, c: v4i32) -> v2i64 {
-    unsafe { __lsx_vmaddwev_d_wu_w(a, b, c) }
+pub fn lsx_vmaddwev_d_wu_w(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_d_wu_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_w_hu_h(a: v4i32, b: v8u16, c: v8i16) -> v4i32 {
-    unsafe { __lsx_vmaddwev_w_hu_h(a, b, c) }
+pub fn lsx_vmaddwev_w_hu_h(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_w_hu_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_h_bu_b(a: v8i16, b: v16u8, c: v16i8) -> v8i16 {
-    unsafe { __lsx_vmaddwev_h_bu_b(a, b, c) }
+pub fn lsx_vmaddwev_h_bu_b(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_h_bu_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_d_wu_w(a: v2i64, b: v4u32, c: v4i32) -> v2i64 {
-    unsafe { __lsx_vmaddwod_d_wu_w(a, b, c) }
+pub fn lsx_vmaddwod_d_wu_w(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_d_wu_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_w_hu_h(a: v4i32, b: v8u16, c: v8i16) -> v4i32 {
-    unsafe { __lsx_vmaddwod_w_hu_h(a, b, c) }
+pub fn lsx_vmaddwod_w_hu_h(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_w_hu_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_h_bu_b(a: v8i16, b: v16u8, c: v16i8) -> v8i16 {
-    unsafe { __lsx_vmaddwod_h_bu_b(a, b, c) }
+pub fn lsx_vmaddwod_h_bu_b(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_h_bu_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_q_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 {
-    unsafe { __lsx_vmaddwev_q_d(a, b, c) }
+pub fn lsx_vmaddwev_q_d(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_q_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_q_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 {
-    unsafe { __lsx_vmaddwod_q_d(a, b, c) }
+pub fn lsx_vmaddwod_q_d(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_q_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_q_du(a: v2u64, b: v2u64, c: v2u64) -> v2u64 {
-    unsafe { __lsx_vmaddwev_q_du(a, b, c) }
+pub fn lsx_vmaddwev_q_du(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_q_du(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_q_du(a: v2u64, b: v2u64, c: v2u64) -> v2u64 {
-    unsafe { __lsx_vmaddwod_q_du(a, b, c) }
+pub fn lsx_vmaddwod_q_du(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_q_du(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_q_du_d(a: v2i64, b: v2u64, c: v2i64) -> v2i64 {
-    unsafe { __lsx_vmaddwev_q_du_d(a, b, c) }
+pub fn lsx_vmaddwev_q_du_d(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_q_du_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_q_du_d(a: v2i64, b: v2u64, c: v2i64) -> v2i64 {
-    unsafe { __lsx_vmaddwod_q_du_d(a, b, c) }
+pub fn lsx_vmaddwod_q_du_d(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_q_du_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrotr_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vrotr_b(a, b) }
+pub fn lsx_vrotr_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vrotr_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrotr_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vrotr_h(a, b) }
+pub fn lsx_vrotr_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vrotr_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrotr_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vrotr_w(a, b) }
+pub fn lsx_vrotr_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vrotr_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrotr_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vrotr_d(a, b) }
+pub fn lsx_vrotr_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vrotr_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vadd_q(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vadd_q(a, b) }
+pub fn lsx_vadd_q(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vadd_q(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsub_q(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsub_q(a, b) }
+pub fn lsx_vsub_q(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsub_q(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vldrepl_b<const IMM_S12: i32>(mem_addr: *const i8) -> v16i8 {
+pub unsafe fn lsx_vldrepl_b<const IMM_S12: i32>(mem_addr: *const i8) -> m128i {
     static_assert_simm_bits!(IMM_S12, 12);
-    __lsx_vldrepl_b(mem_addr, IMM_S12)
+    transmute(__lsx_vldrepl_b(mem_addr, IMM_S12))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vldrepl_h<const IMM_S11: i32>(mem_addr: *const i8) -> v8i16 {
+pub unsafe fn lsx_vldrepl_h<const IMM_S11: i32>(mem_addr: *const i8) -> m128i {
     static_assert_simm_bits!(IMM_S11, 11);
-    __lsx_vldrepl_h(mem_addr, IMM_S11)
+    transmute(__lsx_vldrepl_h(mem_addr, IMM_S11))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vldrepl_w<const IMM_S10: i32>(mem_addr: *const i8) -> v4i32 {
+pub unsafe fn lsx_vldrepl_w<const IMM_S10: i32>(mem_addr: *const i8) -> m128i {
     static_assert_simm_bits!(IMM_S10, 10);
-    __lsx_vldrepl_w(mem_addr, IMM_S10)
+    transmute(__lsx_vldrepl_w(mem_addr, IMM_S10))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vldrepl_d<const IMM_S9: i32>(mem_addr: *const i8) -> v2i64 {
+pub unsafe fn lsx_vldrepl_d<const IMM_S9: i32>(mem_addr: *const i8) -> m128i {
     static_assert_simm_bits!(IMM_S9, 9);
-    __lsx_vldrepl_d(mem_addr, IMM_S9)
+    transmute(__lsx_vldrepl_d(mem_addr, IMM_S9))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmskgez_b(a: v16i8) -> v16i8 {
-    unsafe { __lsx_vmskgez_b(a) }
+pub fn lsx_vmskgez_b(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmskgez_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmsknz_b(a: v16i8) -> v16i8 {
-    unsafe { __lsx_vmsknz_b(a) }
+pub fn lsx_vmsknz_b(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmsknz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vexth_h_b(a: v16i8) -> v8i16 {
-    unsafe { __lsx_vexth_h_b(a) }
+pub fn lsx_vexth_h_b(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vexth_h_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vexth_w_h(a: v8i16) -> v4i32 {
-    unsafe { __lsx_vexth_w_h(a) }
+pub fn lsx_vexth_w_h(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vexth_w_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vexth_d_w(a: v4i32) -> v2i64 {
-    unsafe { __lsx_vexth_d_w(a) }
+pub fn lsx_vexth_d_w(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vexth_d_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vexth_q_d(a: v2i64) -> v2i64 {
-    unsafe { __lsx_vexth_q_d(a) }
+pub fn lsx_vexth_q_d(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vexth_q_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vexth_hu_bu(a: v16u8) -> v8u16 {
-    unsafe { __lsx_vexth_hu_bu(a) }
+pub fn lsx_vexth_hu_bu(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vexth_hu_bu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vexth_wu_hu(a: v8u16) -> v4u32 {
-    unsafe { __lsx_vexth_wu_hu(a) }
+pub fn lsx_vexth_wu_hu(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vexth_wu_hu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vexth_du_wu(a: v4u32) -> v2u64 {
-    unsafe { __lsx_vexth_du_wu(a) }
+pub fn lsx_vexth_du_wu(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vexth_du_wu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vexth_qu_du(a: v2u64) -> v2u64 {
-    unsafe { __lsx_vexth_qu_du(a) }
+pub fn lsx_vexth_qu_du(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vexth_qu_du(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrotri_b<const IMM3: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vrotri_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vrotri_b(a, IMM3) }
+    unsafe { transmute(__lsx_vrotri_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrotri_h<const IMM4: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vrotri_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vrotri_h(a, IMM4) }
+    unsafe { transmute(__lsx_vrotri_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrotri_w<const IMM5: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vrotri_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vrotri_w(a, IMM5) }
+    unsafe { transmute(__lsx_vrotri_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrotri_d<const IMM6: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vrotri_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vrotri_d(a, IMM6) }
+    unsafe { transmute(__lsx_vrotri_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vextl_q_d(a: v2i64) -> v2i64 {
-    unsafe { __lsx_vextl_q_d(a) }
+pub fn lsx_vextl_q_d(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vextl_q_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlni_b_h<const IMM4: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vsrlni_b_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsrlni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vsrlni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlni_h_w<const IMM5: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vsrlni_h_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsrlni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vsrlni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlni_w_d<const IMM6: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vsrlni_w_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsrlni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vsrlni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlni_d_q<const IMM7: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vsrlni_d_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vsrlni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vsrlni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlrni_b_h<const IMM4: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vsrlrni_b_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsrlrni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vsrlrni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlrni_h_w<const IMM5: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vsrlrni_h_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsrlrni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vsrlrni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlrni_w_d<const IMM6: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vsrlrni_w_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsrlrni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vsrlrni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlrni_d_q<const IMM7: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vsrlrni_d_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vsrlrni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vsrlrni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlni_b_h<const IMM4: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vssrlni_b_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vssrlni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vssrlni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlni_h_w<const IMM5: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vssrlni_h_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vssrlni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vssrlni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlni_w_d<const IMM6: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vssrlni_w_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vssrlni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vssrlni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlni_d_q<const IMM7: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vssrlni_d_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vssrlni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vssrlni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlni_bu_h<const IMM4: u32>(a: v16u8, b: v16i8) -> v16u8 {
+pub fn lsx_vssrlni_bu_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vssrlni_bu_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vssrlni_bu_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlni_hu_w<const IMM5: u32>(a: v8u16, b: v8i16) -> v8u16 {
+pub fn lsx_vssrlni_hu_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vssrlni_hu_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vssrlni_hu_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlni_wu_d<const IMM6: u32>(a: v4u32, b: v4i32) -> v4u32 {
+pub fn lsx_vssrlni_wu_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vssrlni_wu_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vssrlni_wu_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlni_du_q<const IMM7: u32>(a: v2u64, b: v2i64) -> v2u64 {
+pub fn lsx_vssrlni_du_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vssrlni_du_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vssrlni_du_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrni_b_h<const IMM4: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vssrlrni_b_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vssrlrni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vssrlrni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrni_h_w<const IMM5: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vssrlrni_h_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vssrlrni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vssrlrni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrni_w_d<const IMM6: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vssrlrni_w_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vssrlrni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vssrlrni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrni_d_q<const IMM7: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vssrlrni_d_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vssrlrni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vssrlrni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrni_bu_h<const IMM4: u32>(a: v16u8, b: v16i8) -> v16u8 {
+pub fn lsx_vssrlrni_bu_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vssrlrni_bu_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vssrlrni_bu_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrni_hu_w<const IMM5: u32>(a: v8u16, b: v8i16) -> v8u16 {
+pub fn lsx_vssrlrni_hu_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vssrlrni_hu_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vssrlrni_hu_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrni_wu_d<const IMM6: u32>(a: v4u32, b: v4i32) -> v4u32 {
+pub fn lsx_vssrlrni_wu_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vssrlrni_wu_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vssrlrni_wu_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrni_du_q<const IMM7: u32>(a: v2u64, b: v2i64) -> v2u64 {
+pub fn lsx_vssrlrni_du_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vssrlrni_du_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vssrlrni_du_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrani_b_h<const IMM4: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vsrani_b_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsrani_b_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vsrani_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrani_h_w<const IMM5: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vsrani_h_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsrani_h_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vsrani_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrani_w_d<const IMM6: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vsrani_w_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsrani_w_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vsrani_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrani_d_q<const IMM7: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vsrani_d_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vsrani_d_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vsrani_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrarni_b_h<const IMM4: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vsrarni_b_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsrarni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vsrarni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrarni_h_w<const IMM5: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vsrarni_h_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsrarni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vsrarni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrarni_w_d<const IMM6: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vsrarni_w_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsrarni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vsrarni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrarni_d_q<const IMM7: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vsrarni_d_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vsrarni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vsrarni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrani_b_h<const IMM4: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vssrani_b_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vssrani_b_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vssrani_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrani_h_w<const IMM5: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vssrani_h_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vssrani_h_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vssrani_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrani_w_d<const IMM6: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vssrani_w_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vssrani_w_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vssrani_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrani_d_q<const IMM7: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vssrani_d_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vssrani_d_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vssrani_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrani_bu_h<const IMM4: u32>(a: v16u8, b: v16i8) -> v16u8 {
+pub fn lsx_vssrani_bu_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vssrani_bu_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vssrani_bu_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrani_hu_w<const IMM5: u32>(a: v8u16, b: v8i16) -> v8u16 {
+pub fn lsx_vssrani_hu_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vssrani_hu_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vssrani_hu_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrani_wu_d<const IMM6: u32>(a: v4u32, b: v4i32) -> v4u32 {
+pub fn lsx_vssrani_wu_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vssrani_wu_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vssrani_wu_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrani_du_q<const IMM7: u32>(a: v2u64, b: v2i64) -> v2u64 {
+pub fn lsx_vssrani_du_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vssrani_du_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vssrani_du_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarni_b_h<const IMM4: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vssrarni_b_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vssrarni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vssrarni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarni_h_w<const IMM5: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vssrarni_h_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vssrarni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vssrarni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarni_w_d<const IMM6: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vssrarni_w_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vssrarni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vssrarni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarni_d_q<const IMM7: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vssrarni_d_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vssrarni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vssrarni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarni_bu_h<const IMM4: u32>(a: v16u8, b: v16i8) -> v16u8 {
+pub fn lsx_vssrarni_bu_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vssrarni_bu_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vssrarni_bu_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarni_hu_w<const IMM5: u32>(a: v8u16, b: v8i16) -> v8u16 {
+pub fn lsx_vssrarni_hu_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vssrarni_hu_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vssrarni_hu_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarni_wu_d<const IMM6: u32>(a: v4u32, b: v4i32) -> v4u32 {
+pub fn lsx_vssrarni_wu_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vssrarni_wu_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vssrarni_wu_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarni_du_q<const IMM7: u32>(a: v2u64, b: v2i64) -> v2u64 {
+pub fn lsx_vssrarni_du_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vssrarni_du_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vssrarni_du_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpermi_w<const IMM8: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vpermi_w<const IMM8: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vpermi_w(a, b, IMM8) }
+    unsafe { transmute(__lsx_vpermi_w(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vld<const IMM_S12: i32>(mem_addr: *const i8) -> v16i8 {
+pub unsafe fn lsx_vld<const IMM_S12: i32>(mem_addr: *const i8) -> m128i {
     static_assert_simm_bits!(IMM_S12, 12);
-    __lsx_vld(mem_addr, IMM_S12)
+    transmute(__lsx_vld(mem_addr, IMM_S12))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vst<const IMM_S12: i32>(a: v16i8, mem_addr: *mut i8) {
+pub unsafe fn lsx_vst<const IMM_S12: i32>(a: m128i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S12, 12);
-    __lsx_vst(a, mem_addr, IMM_S12)
+    transmute(__lsx_vst(transmute(a), mem_addr, IMM_S12))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrn_b_h(a: v8i16, b: v8i16) -> v16i8 {
-    unsafe { __lsx_vssrlrn_b_h(a, b) }
+pub fn lsx_vssrlrn_b_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrlrn_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrn_h_w(a: v4i32, b: v4i32) -> v8i16 {
-    unsafe { __lsx_vssrlrn_h_w(a, b) }
+pub fn lsx_vssrlrn_h_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrlrn_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrn_w_d(a: v2i64, b: v2i64) -> v4i32 {
-    unsafe { __lsx_vssrlrn_w_d(a, b) }
+pub fn lsx_vssrlrn_w_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrlrn_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrln_b_h(a: v8i16, b: v8i16) -> v16i8 {
-    unsafe { __lsx_vssrln_b_h(a, b) }
+pub fn lsx_vssrln_b_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrln_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrln_h_w(a: v4i32, b: v4i32) -> v8i16 {
-    unsafe { __lsx_vssrln_h_w(a, b) }
+pub fn lsx_vssrln_h_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrln_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrln_w_d(a: v2i64, b: v2i64) -> v4i32 {
-    unsafe { __lsx_vssrln_w_d(a, b) }
+pub fn lsx_vssrln_w_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrln_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vorn_v(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vorn_v(a, b) }
+pub fn lsx_vorn_v(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vorn_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vldi<const IMM_S13: i32>() -> v2i64 {
+pub fn lsx_vldi<const IMM_S13: i32>() -> m128i {
     static_assert_simm_bits!(IMM_S13, 13);
-    unsafe { __lsx_vldi(IMM_S13) }
+    unsafe { transmute(__lsx_vldi(IMM_S13)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vshuf_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 {
-    unsafe { __lsx_vshuf_b(a, b, c) }
+pub fn lsx_vshuf_b(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vshuf_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vldx(mem_addr: *const i8, b: i64) -> v16i8 {
-    __lsx_vldx(mem_addr, b)
+pub unsafe fn lsx_vldx(mem_addr: *const i8, b: i64) -> m128i {
+    transmute(__lsx_vldx(mem_addr, transmute(b)))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vstx(a: v16i8, mem_addr: *mut i8, b: i64) {
-    __lsx_vstx(a, mem_addr, b)
+pub unsafe fn lsx_vstx(a: m128i, mem_addr: *mut i8, b: i64) {
+    transmute(__lsx_vstx(transmute(a), mem_addr, transmute(b)))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vextl_qu_du(a: v2u64) -> v2u64 {
-    unsafe { __lsx_vextl_qu_du(a) }
+pub fn lsx_vextl_qu_du(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vextl_qu_du(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bnz_b(a: v16u8) -> i32 {
-    unsafe { __lsx_bnz_b(a) }
+pub fn lsx_bnz_b(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bnz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bnz_d(a: v2u64) -> i32 {
-    unsafe { __lsx_bnz_d(a) }
+pub fn lsx_bnz_d(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bnz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bnz_h(a: v8u16) -> i32 {
-    unsafe { __lsx_bnz_h(a) }
+pub fn lsx_bnz_h(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bnz_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bnz_v(a: v16u8) -> i32 {
-    unsafe { __lsx_bnz_v(a) }
+pub fn lsx_bnz_v(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bnz_v(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bnz_w(a: v4u32) -> i32 {
-    unsafe { __lsx_bnz_w(a) }
+pub fn lsx_bnz_w(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bnz_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bz_b(a: v16u8) -> i32 {
-    unsafe { __lsx_bz_b(a) }
+pub fn lsx_bz_b(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bz_d(a: v2u64) -> i32 {
-    unsafe { __lsx_bz_d(a) }
+pub fn lsx_bz_d(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bz_h(a: v8u16) -> i32 {
-    unsafe { __lsx_bz_h(a) }
+pub fn lsx_bz_h(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bz_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bz_v(a: v16u8) -> i32 {
-    unsafe { __lsx_bz_v(a) }
+pub fn lsx_bz_v(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bz_v(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bz_w(a: v4u32) -> i32 {
-    unsafe { __lsx_bz_w(a) }
+pub fn lsx_bz_w(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bz_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_caf_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_caf_d(a, b) }
+pub fn lsx_vfcmp_caf_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_caf_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_caf_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_caf_s(a, b) }
+pub fn lsx_vfcmp_caf_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_caf_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_ceq_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_ceq_d(a, b) }
+pub fn lsx_vfcmp_ceq_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_ceq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_ceq_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_ceq_s(a, b) }
+pub fn lsx_vfcmp_ceq_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_ceq_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cle_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_cle_d(a, b) }
+pub fn lsx_vfcmp_cle_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cle_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cle_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_cle_s(a, b) }
+pub fn lsx_vfcmp_cle_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cle_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_clt_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_clt_d(a, b) }
+pub fn lsx_vfcmp_clt_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_clt_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_clt_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_clt_s(a, b) }
+pub fn lsx_vfcmp_clt_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_clt_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cne_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_cne_d(a, b) }
+pub fn lsx_vfcmp_cne_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cne_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cne_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_cne_s(a, b) }
+pub fn lsx_vfcmp_cne_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cne_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cor_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_cor_d(a, b) }
+pub fn lsx_vfcmp_cor_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cor_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cor_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_cor_s(a, b) }
+pub fn lsx_vfcmp_cor_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cor_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cueq_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_cueq_d(a, b) }
+pub fn lsx_vfcmp_cueq_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cueq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cueq_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_cueq_s(a, b) }
+pub fn lsx_vfcmp_cueq_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cueq_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cule_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_cule_d(a, b) }
+pub fn lsx_vfcmp_cule_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cule_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cule_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_cule_s(a, b) }
+pub fn lsx_vfcmp_cule_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cule_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cult_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_cult_d(a, b) }
+pub fn lsx_vfcmp_cult_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cult_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cult_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_cult_s(a, b) }
+pub fn lsx_vfcmp_cult_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cult_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cun_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_cun_d(a, b) }
+pub fn lsx_vfcmp_cun_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cun_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cune_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_cune_d(a, b) }
+pub fn lsx_vfcmp_cune_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cune_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cune_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_cune_s(a, b) }
+pub fn lsx_vfcmp_cune_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cune_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cun_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_cun_s(a, b) }
+pub fn lsx_vfcmp_cun_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cun_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_saf_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_saf_d(a, b) }
+pub fn lsx_vfcmp_saf_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_saf_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_saf_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_saf_s(a, b) }
+pub fn lsx_vfcmp_saf_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_saf_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_seq_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_seq_d(a, b) }
+pub fn lsx_vfcmp_seq_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_seq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_seq_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_seq_s(a, b) }
+pub fn lsx_vfcmp_seq_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_seq_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sle_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_sle_d(a, b) }
+pub fn lsx_vfcmp_sle_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sle_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sle_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_sle_s(a, b) }
+pub fn lsx_vfcmp_sle_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sle_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_slt_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_slt_d(a, b) }
+pub fn lsx_vfcmp_slt_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_slt_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_slt_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_slt_s(a, b) }
+pub fn lsx_vfcmp_slt_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_slt_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sne_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_sne_d(a, b) }
+pub fn lsx_vfcmp_sne_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sne_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sne_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_sne_s(a, b) }
+pub fn lsx_vfcmp_sne_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sne_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sor_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_sor_d(a, b) }
+pub fn lsx_vfcmp_sor_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sor_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sor_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_sor_s(a, b) }
+pub fn lsx_vfcmp_sor_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sor_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sueq_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_sueq_d(a, b) }
+pub fn lsx_vfcmp_sueq_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sueq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sueq_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_sueq_s(a, b) }
+pub fn lsx_vfcmp_sueq_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sueq_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sule_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_sule_d(a, b) }
+pub fn lsx_vfcmp_sule_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sule_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sule_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_sule_s(a, b) }
+pub fn lsx_vfcmp_sule_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sule_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sult_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_sult_d(a, b) }
+pub fn lsx_vfcmp_sult_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sult_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sult_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_sult_s(a, b) }
+pub fn lsx_vfcmp_sult_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sult_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sun_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_sun_d(a, b) }
+pub fn lsx_vfcmp_sun_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sun_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sune_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_sune_d(a, b) }
+pub fn lsx_vfcmp_sune_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sune_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sune_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_sune_s(a, b) }
+pub fn lsx_vfcmp_sune_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sune_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sun_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_sun_s(a, b) }
+pub fn lsx_vfcmp_sun_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sun_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrepli_b<const IMM_S10: i32>() -> v16i8 {
+pub fn lsx_vrepli_b<const IMM_S10: i32>() -> m128i {
     static_assert_simm_bits!(IMM_S10, 10);
-    unsafe { __lsx_vrepli_b(IMM_S10) }
+    unsafe { transmute(__lsx_vrepli_b(IMM_S10)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrepli_d<const IMM_S10: i32>() -> v2i64 {
+pub fn lsx_vrepli_d<const IMM_S10: i32>() -> m128i {
     static_assert_simm_bits!(IMM_S10, 10);
-    unsafe { __lsx_vrepli_d(IMM_S10) }
+    unsafe { transmute(__lsx_vrepli_d(IMM_S10)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrepli_h<const IMM_S10: i32>() -> v8i16 {
+pub fn lsx_vrepli_h<const IMM_S10: i32>() -> m128i {
     static_assert_simm_bits!(IMM_S10, 10);
-    unsafe { __lsx_vrepli_h(IMM_S10) }
+    unsafe { transmute(__lsx_vrepli_h(IMM_S10)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrepli_w<const IMM_S10: i32>() -> v4i32 {
+pub fn lsx_vrepli_w<const IMM_S10: i32>() -> m128i {
     static_assert_simm_bits!(IMM_S10, 10);
-    unsafe { __lsx_vrepli_w(IMM_S10) }
+    unsafe { transmute(__lsx_vrepli_w(IMM_S10)) }
 }
diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lsx/types.rs b/library/stdarch/crates/core_arch/src/loongarch64/lsx/types.rs
index 4097164c2fa..4fb69457174 100644
--- a/library/stdarch/crates/core_arch/src/loongarch64/lsx/types.rs
+++ b/library/stdarch/crates/core_arch/src/loongarch64/lsx/types.rs
@@ -1,33 +1,140 @@
 types! {
     #![unstable(feature = "stdarch_loongarch", issue = "117427")]
 
-    /// LOONGARCH-specific 128-bit wide vector of 16 packed `i8`.
-    pub struct v16i8(16 x pub(crate) i8);
+    /// 128-bit wide integer vector type, LoongArch-specific
+    ///
+    /// This type is the same as the `__m128i` type defined in `lsxintrin.h`,
+    /// representing a 128-bit SIMD register. Usage of this type typically
+    /// occurs in conjunction with the `lsx` and higher target features for
+    /// LoongArch.
+    ///
+    /// Internally this type may be viewed as:
+    ///
+    /// * `i8x16` - sixteen `i8` values packed together
+    /// * `i16x8` - eight `i16` values packed together
+    /// * `i32x4` - four `i32` values packed together
+    /// * `i64x2` - two `i64` values packed together
+    ///
+    /// (as well as unsigned versions). Each intrinsic may interpret the
+    /// internal bits differently, check the documentation of the intrinsic
+    /// to see how it's being used.
+    ///
+    /// The in-memory representation of this type is the same as the one of an
+    /// equivalent array (i.e. the in-memory order of elements is the same, and
+    /// there is no padding); however, the alignment is different and equal to
+    /// the size of the type. Note that the ABI for function calls may *not* be
+    /// the same.
+    ///
+    /// Note that this means that an instance of `m128i` typically just means
+    /// a "bag of bits" which is left up to interpretation at the point of use.
+    ///
+    /// Most intrinsics using `m128i` are prefixed with `lsx_` and the integer
+    /// types tend to correspond to suffixes like "b", "h", "w" or "d".
+    pub struct m128i(2 x i64);
 
-    /// LOONGARCH-specific 128-bit wide vector of 8 packed `i16`.
-    pub struct v8i16(8 x pub(crate) i16);
+    /// 128-bit wide set of four `f32` values, LoongArch-specific
+    ///
+    /// This type is the same as the `__m128` type defined in `lsxintrin.h`,
+    /// representing a 128-bit SIMD register which internally consists of
+    /// four packed `f32` instances. Usage of this type typically occurs in
+    /// conjunction with the `lsx` and higher target features for LoongArch.
+    ///
+    /// Note that unlike `m128i`, the integer version of the 128-bit registers,
+    /// this `m128` type has *one* interpretation. Each instance of `m128`
+    /// corresponds to `f32x4`, or four `f32` values packed together.
+    ///
+    /// The in-memory representation of this type is the same as the one of an
+    /// equivalent array (i.e. the in-memory order of elements is the same, and
+    /// there is no padding); however, the alignment is different and equal to
+    /// the size of the type. Note that the ABI for function calls may *not* be
+    /// the same.
+    ///
+    /// Most intrinsics using `m128` are prefixed with `lsx_` and are suffixed
+    /// with "s".
+    pub struct m128(4 x f32);
 
-    /// LOONGARCH-specific 128-bit wide vector of 4 packed `i32`.
-    pub struct v4i32(4 x pub(crate) i32);
-
-    /// LOONGARCH-specific 128-bit wide vector of 2 packed `i64`.
-    pub struct v2i64(2 x pub(crate) i64);
-
-    /// LOONGARCH-specific 128-bit wide vector of 16 packed `u8`.
-    pub struct v16u8(16 x pub(crate) u8);
-
-    /// LOONGARCH-specific 128-bit wide vector of 8 packed `u16`.
-    pub struct v8u16(8 x pub(crate) u16);
-
-    /// LOONGARCH-specific 128-bit wide vector of 4 packed `u32`.
-    pub struct v4u32(4 x pub(crate) u32);
-
-    /// LOONGARCH-specific 128-bit wide vector of 2 packed `u64`.
-    pub struct v2u64(2 x pub(crate) u64);
+    /// 128-bit wide set of two `f64` values, LoongArch-specific
+    ///
+    /// This type is the same as the `__m128d` type defined in `lsxintrin.h`,
+    /// representing a 128-bit SIMD register which internally consists of
+    /// two packed `f64` instances. Usage of this type typically occurs in
+    /// conjunction with the `lsx` and higher target features for LoongArch.
+    ///
+    /// Note that unlike `m128i`, the integer version of the 128-bit registers,
+    /// this `m128d` type has *one* interpretation. Each instance of `m128d`
+    /// always corresponds to `f64x2`, or two `f64` values packed together.
+    ///
+    /// The in-memory representation of this type is the same as the one of an
+    /// equivalent array (i.e. the in-memory order of elements is the same, and
+    /// there is no padding); however, the alignment is different and equal to
+    /// the size of the type. Note that the ABI for function calls may *not* be
+    /// the same.
+    ///
+    /// Most intrinsics using `m128d` are prefixed with `lsx_` and are suffixed
+    /// with "d". Not to be confused with "d" which is used for `m128i`.
+    pub struct m128d(2 x f64);
+}
 
-    /// LOONGARCH-specific 128-bit wide vector of 4 packed `f32`.
-    pub struct v4f32(4 x pub(crate) f32);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v16i8([i8; 16]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v8i16([i16; 8]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v4i32([i32; 4]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v2i64([i64; 2]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v16u8([u8; 16]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v8u16([u16; 8]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v4u32([u32; 4]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v2u64([u64; 2]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v4f32([f32; 4]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v2f64([f64; 2]);
 
-    /// LOONGARCH-specific 128-bit wide vector of 2 packed `f64`.
-    pub struct v2f64(2 x pub(crate) f64);
-}
+// These type aliases are provided solely for transitional compatibility.
+// They are temporary and will be removed when appropriate.
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v16i8 = m128i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v8i16 = m128i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v4i32 = m128i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v2i64 = m128i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v16u8 = m128i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v8u16 = m128i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v4u32 = m128i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v2u64 = m128i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v4f32 = m128;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v2f64 = m128d;
diff --git a/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs b/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs
index 40132097f5d..5076064ffcd 100644
--- a/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs
+++ b/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs
@@ -156,6 +156,7 @@ fn gen_bind(in_file: String, ext_name: &str) -> io::Result<()> {
 // OUT_DIR=`pwd`/crates/core_arch cargo run -p stdarch-gen-loongarch -- {in_file}
 // ```
 
+use crate::mem::transmute;
 use super::types::*;
 "#
     ));
@@ -239,38 +240,63 @@ fn gen_bind_body(
     para_num: i32,
     target: TargetFeature,
 ) -> (String, String) {
-    let type_to_rst = |t: &str, s: bool| -> &str {
-        match (t, s) {
-            ("V16QI", _) => "v16i8",
-            ("V32QI", _) => "v32i8",
-            ("V8HI", _) => "v8i16",
-            ("V16HI", _) => "v16i16",
-            ("V4SI", _) => "v4i32",
-            ("V8SI", _) => "v8i32",
-            ("V2DI", _) => "v2i64",
-            ("V4DI", _) => "v4i64",
-            ("UV16QI", _) => "v16u8",
-            ("UV32QI", _) => "v32u8",
-            ("UV8HI", _) => "v8u16",
-            ("UV16HI", _) => "v16u16",
-            ("UV4SI", _) => "v4u32",
-            ("UV8SI", _) => "v8u32",
-            ("UV2DI", _) => "v2u64",
-            ("UV4DI", _) => "v4u64",
-            ("SI", _) => "i32",
-            ("DI", _) => "i64",
-            ("USI", _) => "u32",
-            ("UDI", _) => "u64",
-            ("V4SF", _) => "v4f32",
-            ("V8SF", _) => "v8f32",
-            ("V2DF", _) => "v2f64",
-            ("V4DF", _) => "v4f64",
-            ("UQI", _) => "u32",
-            ("QI", _) => "i32",
-            ("CVPOINTER", false) => "*const i8",
-            ("CVPOINTER", true) => "*mut i8",
-            ("HI", _) => "i32",
-            (_, _) => panic!("unknown type: {t}"),
+    enum TypeKind {
+        Vector,
+        Intrinsic,
+    }
+    use TypeKind::*;
+    let type_to_rst = |t: &str, s: bool, k: TypeKind| -> &str {
+        match (t, s, k) {
+            ("V16QI", _, Vector) => "__v16i8",
+            ("V16QI", _, Intrinsic) => "m128i",
+            ("V32QI", _, Vector) => "__v32i8",
+            ("V32QI", _, Intrinsic) => "m256i",
+            ("V8HI", _, Vector) => "__v8i16",
+            ("V8HI", _, Intrinsic) => "m128i",
+            ("V16HI", _, Vector) => "__v16i16",
+            ("V16HI", _, Intrinsic) => "m256i",
+            ("V4SI", _, Vector) => "__v4i32",
+            ("V4SI", _, Intrinsic) => "m128i",
+            ("V8SI", _, Vector) => "__v8i32",
+            ("V8SI", _, Intrinsic) => "m256i",
+            ("V2DI", _, Vector) => "__v2i64",
+            ("V2DI", _, Intrinsic) => "m128i",
+            ("V4DI", _, Vector) => "__v4i64",
+            ("V4DI", _, Intrinsic) => "m256i",
+            ("UV16QI", _, Vector) => "__v16u8",
+            ("UV16QI", _, Intrinsic) => "m128i",
+            ("UV32QI", _, Vector) => "__v32u8",
+            ("UV32QI", _, Intrinsic) => "m256i",
+            ("UV8HI", _, Vector) => "__v8u16",
+            ("UV8HI", _, Intrinsic) => "m128i",
+            ("UV16HI", _, Vector) => "__v16u16",
+            ("UV16HI", _, Intrinsic) => "m256i",
+            ("UV4SI", _, Vector) => "__v4u32",
+            ("UV4SI", _, Intrinsic) => "m128i",
+            ("UV8SI", _, Vector) => "__v8u32",
+            ("UV8SI", _, Intrinsic) => "m256i",
+            ("UV2DI", _, Vector) => "__v2u64",
+            ("UV2DI", _, Intrinsic) => "m128i",
+            ("UV4DI", _, Vector) => "__v4u64",
+            ("UV4DI", _, Intrinsic) => "m256i",
+            ("SI", _, _) => "i32",
+            ("DI", _, _) => "i64",
+            ("USI", _, _) => "u32",
+            ("UDI", _, _) => "u64",
+            ("V4SF", _, Vector) => "__v4f32",
+            ("V4SF", _, Intrinsic) => "m128",
+            ("V8SF", _, Vector) => "__v8f32",
+            ("V8SF", _, Intrinsic) => "m256",
+            ("V2DF", _, Vector) => "__v2f64",
+            ("V2DF", _, Intrinsic) => "m128d",
+            ("V4DF", _, Vector) => "__v4f64",
+            ("V4DF", _, Intrinsic) => "m256d",
+            ("UQI", _, _) => "u32",
+            ("QI", _, _) => "i32",
+            ("CVPOINTER", false, _) => "*const i8",
+            ("CVPOINTER", true, _) => "*mut i8",
+            ("HI", _, _) => "i32",
+            (_, _, _) => panic!("unknown type: {t}"),
         }
     };
 
@@ -281,27 +307,27 @@ fn gen_bind_body(
             let fn_output = if out_t.to_lowercase() == "void" {
                 String::new()
             } else {
-                format!(" -> {}", type_to_rst(out_t, is_store))
+                format!(" -> {}", type_to_rst(out_t, is_store, Vector))
             };
             let fn_inputs = match para_num {
-                1 => format!("(a: {})", type_to_rst(in_t[0], is_store)),
+                1 => format!("(a: {})", type_to_rst(in_t[0], is_store, Vector)),
                 2 => format!(
                     "(a: {}, b: {})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store)
+                    type_to_rst(in_t[0], is_store, Vector),
+                    type_to_rst(in_t[1], is_store, Vector)
                 ),
                 3 => format!(
                     "(a: {}, b: {}, c: {})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store),
-                    type_to_rst(in_t[2], is_store)
+                    type_to_rst(in_t[0], is_store, Vector),
+                    type_to_rst(in_t[1], is_store, Vector),
+                    type_to_rst(in_t[2], is_store, Vector)
                 ),
                 4 => format!(
                     "(a: {}, b: {}, c: {}, d: {})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store),
-                    type_to_rst(in_t[2], is_store),
-                    type_to_rst(in_t[3], is_store)
+                    type_to_rst(in_t[0], is_store, Vector),
+                    type_to_rst(in_t[1], is_store, Vector),
+                    type_to_rst(in_t[2], is_store, Vector),
+                    type_to_rst(in_t[3], is_store, Vector)
                 ),
                 _ => panic!("unsupported parameter number"),
             };
@@ -330,34 +356,40 @@ fn gen_bind_body(
         let fn_output = if out_t.to_lowercase() == "void" {
             String::new()
         } else {
-            format!("-> {} ", type_to_rst(out_t, is_store))
+            format!("-> {} ", type_to_rst(out_t, is_store, Intrinsic))
         };
         let mut fn_inputs = match para_num {
-            1 => format!("(a: {})", type_to_rst(in_t[0], is_store)),
+            1 => format!("(a: {})", type_to_rst(in_t[0], is_store, Intrinsic)),
             2 => format!(
                 "(a: {}, b: {})",
-                type_to_rst(in_t[0], is_store),
-                type_to_rst(in_t[1], is_store)
+                type_to_rst(in_t[0], is_store, Intrinsic),
+                type_to_rst(in_t[1], is_store, Intrinsic)
             ),
             3 => format!(
                 "(a: {}, b: {}, c: {})",
-                type_to_rst(in_t[0], is_store),
-                type_to_rst(in_t[1], is_store),
-                type_to_rst(in_t[2], is_store)
+                type_to_rst(in_t[0], is_store, Intrinsic),
+                type_to_rst(in_t[1], is_store, Intrinsic),
+                type_to_rst(in_t[2], is_store, Intrinsic)
             ),
             4 => format!(
                 "(a: {}, b: {}, c: {}, d: {})",
-                type_to_rst(in_t[0], is_store),
-                type_to_rst(in_t[1], is_store),
-                type_to_rst(in_t[2], is_store),
-                type_to_rst(in_t[3], is_store)
+                type_to_rst(in_t[0], is_store, Intrinsic),
+                type_to_rst(in_t[1], is_store, Intrinsic),
+                type_to_rst(in_t[2], is_store, Intrinsic),
+                type_to_rst(in_t[3], is_store, Intrinsic)
             ),
             _ => panic!("unsupported parameter number"),
         };
         if para_num == 1 && in_t[0] == "HI" {
             fn_inputs = match asm_fmts[1].as_str() {
-                "si13" | "i13" => format!("<const IMM_S13: {}>()", type_to_rst(in_t[0], is_store)),
-                "si10" => format!("<const IMM_S10: {}>()", type_to_rst(in_t[0], is_store)),
+                "si13" | "i13" => format!(
+                    "<const IMM_S13: {}>()",
+                    type_to_rst(in_t[0], is_store, Intrinsic)
+                ),
+                "si10" => format!(
+                    "<const IMM_S10: {}>()",
+                    type_to_rst(in_t[0], is_store, Intrinsic)
+                ),
                 _ => panic!("unsupported assembly format: {}", asm_fmts[1]),
             };
             rustc_legacy_const_generics = "rustc_legacy_const_generics(0)";
@@ -365,8 +397,8 @@ fn gen_bind_body(
             fn_inputs = if asm_fmts[2].starts_with("ui") {
                 format!(
                     "<const IMM{2}: {1}>(a: {0})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store),
+                    type_to_rst(in_t[0], is_store, Intrinsic),
+                    type_to_rst(in_t[1], is_store, Intrinsic),
                     asm_fmts[2].get(2..).unwrap()
                 )
             } else {
@@ -377,8 +409,8 @@ fn gen_bind_body(
             fn_inputs = if asm_fmts[2].starts_with("si") {
                 format!(
                     "<const IMM_S{2}: {1}>(a: {0})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store),
+                    type_to_rst(in_t[0], is_store, Intrinsic),
+                    type_to_rst(in_t[1], is_store, Intrinsic),
                     asm_fmts[2].get(2..).unwrap()
                 )
             } else {
@@ -389,8 +421,8 @@ fn gen_bind_body(
             fn_inputs = if asm_fmts[2].starts_with("si") {
                 format!(
                     "<const IMM_S{2}: {1}>(mem_addr: {0})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store),
+                    type_to_rst(in_t[0], is_store, Intrinsic),
+                    type_to_rst(in_t[1], is_store, Intrinsic),
                     asm_fmts[2].get(2..).unwrap()
                 )
             } else {
@@ -401,8 +433,8 @@ fn gen_bind_body(
             fn_inputs = match asm_fmts[2].as_str() {
                 "rk" => format!(
                     "(mem_addr: {}, b: {})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store)
+                    type_to_rst(in_t[0], is_store, Intrinsic),
+                    type_to_rst(in_t[1], is_store, Intrinsic)
                 ),
                 _ => panic!("unsupported assembly format: {}", asm_fmts[2]),
             };
@@ -410,9 +442,9 @@ fn gen_bind_body(
             fn_inputs = if asm_fmts[2].starts_with("ui") {
                 format!(
                     "<const IMM{3}: {2}>(a: {0}, b: {1})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store),
-                    type_to_rst(in_t[2], is_store),
+                    type_to_rst(in_t[0], is_store, Intrinsic),
+                    type_to_rst(in_t[1], is_store, Intrinsic),
+                    type_to_rst(in_t[2], is_store, Intrinsic),
                     asm_fmts[2].get(2..).unwrap()
                 )
             } else {
@@ -423,9 +455,9 @@ fn gen_bind_body(
             fn_inputs = match asm_fmts[2].as_str() {
                 "si12" => format!(
                     "<const IMM_S12: {2}>(a: {0}, mem_addr: {1})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store),
-                    type_to_rst(in_t[2], is_store)
+                    type_to_rst(in_t[0], is_store, Intrinsic),
+                    type_to_rst(in_t[1], is_store, Intrinsic),
+                    type_to_rst(in_t[2], is_store, Intrinsic)
                 ),
                 _ => panic!("unsupported assembly format: {}", asm_fmts[2]),
             };
@@ -434,9 +466,9 @@ fn gen_bind_body(
             fn_inputs = match asm_fmts[2].as_str() {
                 "rk" => format!(
                     "(a: {}, mem_addr: {}, b: {})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store),
-                    type_to_rst(in_t[2], is_store)
+                    type_to_rst(in_t[0], is_store, Intrinsic),
+                    type_to_rst(in_t[1], is_store, Intrinsic),
+                    type_to_rst(in_t[2], is_store, Intrinsic)
                 ),
                 _ => panic!("unsupported assembly format: {}", asm_fmts[2]),
             };
@@ -444,10 +476,10 @@ fn gen_bind_body(
             fn_inputs = match (asm_fmts[2].as_str(), current_name.chars().last().unwrap()) {
                 ("si8", t) => format!(
                     "<const IMM_S8: {2}, const IMM{4}: {3}>(a: {0}, mem_addr: {1})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store),
-                    type_to_rst(in_t[2], is_store),
-                    type_to_rst(in_t[3], is_store),
+                    type_to_rst(in_t[0], is_store, Intrinsic),
+                    type_to_rst(in_t[1], is_store, Intrinsic),
+                    type_to_rst(in_t[2], is_store, Intrinsic),
+                    type_to_rst(in_t[3], is_store, Intrinsic),
                     type_to_imm(t),
                 ),
                 (_, _) => panic!(
@@ -466,10 +498,16 @@ fn gen_bind_body(
     let unsafe_end = if !is_mem { " }" } else { "" };
     let mut call_params = {
         match para_num {
-            1 => format!("{unsafe_start}__{current_name}(a){unsafe_end}"),
-            2 => format!("{unsafe_start}__{current_name}(a, b){unsafe_end}"),
-            3 => format!("{unsafe_start}__{current_name}(a, b, c){unsafe_end}"),
-            4 => format!("{unsafe_start}__{current_name}(a, b, c, d){unsafe_end}"),
+            1 => format!("{unsafe_start}transmute(__{current_name}(transmute(a))){unsafe_end}"),
+            2 => format!(
+                "{unsafe_start}transmute(__{current_name}(transmute(a), transmute(b))){unsafe_end}"
+            ),
+            3 => format!(
+                "{unsafe_start}transmute(__{current_name}(transmute(a), transmute(b), transmute(c))){unsafe_end}"
+            ),
+            4 => format!(
+                "{unsafe_start}transmute(__{current_name}(transmute(a), transmute(b), transmute(c), transmute(d))){unsafe_end}"
+            ),
             _ => panic!("unsupported parameter number"),
         }
     };
@@ -477,12 +515,12 @@ fn gen_bind_body(
         call_params = match asm_fmts[1].as_str() {
             "si10" => {
                 format!(
-                    "static_assert_simm_bits!(IMM_S10, 10);\n    {unsafe_start}__{current_name}(IMM_S10){unsafe_end}"
+                    "static_assert_simm_bits!(IMM_S10, 10);\n    {unsafe_start}transmute(__{current_name}(IMM_S10)){unsafe_end}"
                 )
             }
             "i13" => {
                 format!(
-                    "static_assert_simm_bits!(IMM_S13, 13);\n    {unsafe_start}__{current_name}(IMM_S13){unsafe_end}"
+                    "static_assert_simm_bits!(IMM_S13, 13);\n    {unsafe_start}transmute(__{current_name}(IMM_S13)){unsafe_end}"
                 )
             }
             _ => panic!("unsupported assembly format: {}", asm_fmts[2]),
@@ -490,7 +528,7 @@ fn gen_bind_body(
     } else if para_num == 2 && (in_t[1] == "UQI" || in_t[1] == "USI") {
         call_params = if asm_fmts[2].starts_with("ui") {
             format!(
-                "static_assert_uimm_bits!(IMM{0}, {0});\n    {unsafe_start}__{current_name}(a, IMM{0}){unsafe_end}",
+                "static_assert_uimm_bits!(IMM{0}, {0});\n    {unsafe_start}transmute(__{current_name}(transmute(a), IMM{0})){unsafe_end}",
                 asm_fmts[2].get(2..).unwrap()
             )
         } else {
@@ -500,7 +538,7 @@ fn gen_bind_body(
         call_params = match asm_fmts[2].as_str() {
             "si5" => {
                 format!(
-                    "static_assert_simm_bits!(IMM_S5, 5);\n    {unsafe_start}__{current_name}(a, IMM_S5){unsafe_end}"
+                    "static_assert_simm_bits!(IMM_S5, 5);\n    {unsafe_start}transmute(__{current_name}(transmute(a), IMM_S5)){unsafe_end}"
                 )
             }
             _ => panic!("unsupported assembly format: {}", asm_fmts[2]),
@@ -508,7 +546,7 @@ fn gen_bind_body(
     } else if para_num == 2 && in_t[0] == "CVPOINTER" && in_t[1] == "SI" {
         call_params = if asm_fmts[2].starts_with("si") {
             format!(
-                "static_assert_simm_bits!(IMM_S{0}, {0});\n    {unsafe_start}__{current_name}(mem_addr, IMM_S{0}){unsafe_end}",
+                "static_assert_simm_bits!(IMM_S{0}, {0});\n    {unsafe_start}transmute(__{current_name}(mem_addr, IMM_S{0})){unsafe_end}",
                 asm_fmts[2].get(2..).unwrap()
             )
         } else {
@@ -516,13 +554,15 @@ fn gen_bind_body(
         }
     } else if para_num == 2 && in_t[0] == "CVPOINTER" && in_t[1] == "DI" {
         call_params = match asm_fmts[2].as_str() {
-            "rk" => format!("{unsafe_start}__{current_name}(mem_addr, b){unsafe_end}"),
+            "rk" => format!(
+                "{unsafe_start}transmute(__{current_name}(mem_addr, transmute(b))){unsafe_end}"
+            ),
             _ => panic!("unsupported assembly format: {}", asm_fmts[2]),
         };
     } else if para_num == 3 && (in_t[2] == "USI" || in_t[2] == "UQI") {
         call_params = if asm_fmts[2].starts_with("ui") {
             format!(
-                "static_assert_uimm_bits!(IMM{0}, {0});\n    {unsafe_start}__{current_name}(a, b, IMM{0}){unsafe_end}",
+                "static_assert_uimm_bits!(IMM{0}, {0});\n    {unsafe_start}transmute(__{current_name}(transmute(a), transmute(b), IMM{0})){unsafe_end}",
                 asm_fmts[2].get(2..).unwrap()
             )
         } else {
@@ -531,19 +571,21 @@ fn gen_bind_body(
     } else if para_num == 3 && in_t[1] == "CVPOINTER" && in_t[2] == "SI" {
         call_params = match asm_fmts[2].as_str() {
             "si12" => format!(
-                "static_assert_simm_bits!(IMM_S12, 12);\n    {unsafe_start}__{current_name}(a, mem_addr, IMM_S12){unsafe_end}"
+                "static_assert_simm_bits!(IMM_S12, 12);\n    {unsafe_start}transmute(__{current_name}(transmute(a), mem_addr, IMM_S12)){unsafe_end}"
             ),
             _ => panic!("unsupported assembly format: {}", asm_fmts[2]),
         };
     } else if para_num == 3 && in_t[1] == "CVPOINTER" && in_t[2] == "DI" {
         call_params = match asm_fmts[2].as_str() {
-            "rk" => format!("{unsafe_start}__{current_name}(a, mem_addr, b){unsafe_end}"),
+            "rk" => format!(
+                "{unsafe_start}transmute(__{current_name}(transmute(a), mem_addr, transmute(b))){unsafe_end}"
+            ),
             _ => panic!("unsupported assembly format: {}", asm_fmts[2]),
         };
     } else if para_num == 4 {
         call_params = match (asm_fmts[2].as_str(), current_name.chars().last().unwrap()) {
             ("si8", t) => format!(
-                "static_assert_simm_bits!(IMM_S8, 8);\n    static_assert_uimm_bits!(IMM{0}, {0});\n    {unsafe_start}__{current_name}(a, mem_addr, IMM_S8, IMM{0}){unsafe_end}",
+                "static_assert_simm_bits!(IMM_S8, 8);\n    static_assert_uimm_bits!(IMM{0}, {0});\n    {unsafe_start}transmute(__{current_name}(transmute(a), mem_addr, IMM_S8, IMM{0})){unsafe_end}",
                 type_to_imm(t)
             ),
             (_, _) => panic!(