about summary refs log tree commit diff
path: root/library
diff options
context:
space:
mode:
Diffstat (limited to 'library')
-rw-r--r--library/alloc/src/collections/btree/map.rs4
-rw-r--r--library/alloc/src/ffi/c_str.rs16
-rw-r--r--library/alloc/src/lib.rs1
-rw-r--r--library/alloc/src/vec/mod.rs74
-rw-r--r--library/compiler-builtins/.github/workflows/main.yaml38
-rw-r--r--library/compiler-builtins/.github/workflows/rustc-pull.yml5
-rw-r--r--library/compiler-builtins/builtins-shim/Cargo.toml5
-rw-r--r--library/compiler-builtins/builtins-test-intrinsics/src/main.rs84
-rw-r--r--library/compiler-builtins/builtins-test/benches/float_conv.rs9
-rw-r--r--library/compiler-builtins/builtins-test/benches/float_extend.rs2
-rw-r--r--library/compiler-builtins/builtins-test/benches/float_trunc.rs5
-rw-r--r--library/compiler-builtins/builtins-test/src/bench.rs28
-rw-r--r--library/compiler-builtins/builtins-test/tests/addsub.rs4
-rw-r--r--library/compiler-builtins/builtins-test/tests/conv.rs38
-rw-r--r--library/compiler-builtins/builtins-test/tests/div_rem.rs2
-rw-r--r--library/compiler-builtins/builtins-test/tests/float_pow.rs3
-rw-r--r--library/compiler-builtins/builtins-test/tests/lse.rs2
-rw-r--r--library/compiler-builtins/builtins-test/tests/mul.rs4
-rwxr-xr-xlibrary/compiler-builtins/ci/ci-util.py126
-rw-r--r--library/compiler-builtins/ci/docker/aarch64-unknown-linux-gnu/Dockerfile2
-rw-r--r--library/compiler-builtins/ci/docker/arm-unknown-linux-gnueabi/Dockerfile2
-rw-r--r--library/compiler-builtins/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile2
-rw-r--r--library/compiler-builtins/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile2
-rw-r--r--library/compiler-builtins/ci/docker/i586-unknown-linux-gnu/Dockerfile2
-rw-r--r--library/compiler-builtins/ci/docker/i686-unknown-linux-gnu/Dockerfile2
-rw-r--r--library/compiler-builtins/ci/docker/loongarch64-unknown-linux-gnu/Dockerfile2
-rw-r--r--library/compiler-builtins/ci/docker/mips-unknown-linux-gnu/Dockerfile2
-rw-r--r--library/compiler-builtins/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile2
-rw-r--r--library/compiler-builtins/ci/docker/mips64el-unknown-linux-gnuabi64/Dockerfile2
-rw-r--r--library/compiler-builtins/ci/docker/mipsel-unknown-linux-gnu/Dockerfile2
-rw-r--r--library/compiler-builtins/ci/docker/powerpc-unknown-linux-gnu/Dockerfile2
-rw-r--r--library/compiler-builtins/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile2
-rw-r--r--library/compiler-builtins/ci/docker/powerpc64le-unknown-linux-gnu/Dockerfile3
-rw-r--r--library/compiler-builtins/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile2
-rw-r--r--library/compiler-builtins/ci/docker/thumbv6m-none-eabi/Dockerfile2
-rw-r--r--library/compiler-builtins/ci/docker/thumbv7em-none-eabi/Dockerfile2
-rw-r--r--library/compiler-builtins/ci/docker/thumbv7em-none-eabihf/Dockerfile2
-rw-r--r--library/compiler-builtins/ci/docker/thumbv7m-none-eabi/Dockerfile2
-rw-r--r--library/compiler-builtins/ci/docker/x86_64-unknown-linux-gnu/Dockerfile2
-rwxr-xr-xlibrary/compiler-builtins/ci/run-docker.sh2
-rwxr-xr-xlibrary/compiler-builtins/ci/run.sh9
-rwxr-xr-xlibrary/compiler-builtins/ci/update-musl.sh2
-rw-r--r--library/compiler-builtins/compiler-builtins/Cargo.toml5
-rw-r--r--library/compiler-builtins/compiler-builtins/build.rs7
-rw-r--r--library/compiler-builtins/compiler-builtins/configure.rs34
-rw-r--r--library/compiler-builtins/compiler-builtins/src/aarch64.rs2
-rw-r--r--library/compiler-builtins/compiler-builtins/src/aarch64_linux.rs76
-rw-r--r--library/compiler-builtins/compiler-builtins/src/arm.rs2
-rw-r--r--library/compiler-builtins/compiler-builtins/src/hexagon.rs2
-rw-r--r--library/compiler-builtins/compiler-builtins/src/lib.rs2
-rw-r--r--library/compiler-builtins/compiler-builtins/src/probestack.rs2
-rw-r--r--library/compiler-builtins/compiler-builtins/src/x86.rs10
-rw-r--r--library/compiler-builtins/compiler-builtins/src/x86_64.rs9
-rw-r--r--library/compiler-builtins/crates/musl-math-sys/src/lib.rs2
-rw-r--r--library/compiler-builtins/crates/symbol-check/Cargo.toml3
-rw-r--r--library/compiler-builtins/libm-test/src/precision.rs22
-rw-r--r--library/compiler-builtins/libm/configure.rs30
-rw-r--r--library/compiler-builtins/libm/src/math/acos.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/acosf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/acosh.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/acoshf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/arch/i586.rs85
-rw-r--r--library/compiler-builtins/libm/src/math/asin.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/asinf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/asinh.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/asinhf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/atan.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/atan2.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/atan2f.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/atanf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/atanh.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/atanhf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/cbrt.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/cbrtf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/ceil.rs8
-rw-r--r--library/compiler-builtins/libm/src/math/copysign.rs8
-rw-r--r--library/compiler-builtins/libm/src/math/cos.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/cosf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/cosh.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/coshf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/erf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/erff.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/exp.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/exp10.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/exp10f.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/exp2.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/exp2f.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/expf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/expm1.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/expm1f.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/expo2.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/fabs.rs8
-rw-r--r--library/compiler-builtins/libm/src/math/fdim.rs8
-rw-r--r--library/compiler-builtins/libm/src/math/floor.rs8
-rw-r--r--library/compiler-builtins/libm/src/math/fma.rs8
-rw-r--r--library/compiler-builtins/libm/src/math/fmin_fmax.rs16
-rw-r--r--library/compiler-builtins/libm/src/math/fminimum_fmaximum.rs16
-rw-r--r--library/compiler-builtins/libm/src/math/fminimum_fmaximum_num.rs16
-rw-r--r--library/compiler-builtins/libm/src/math/fmod.rs8
-rw-r--r--library/compiler-builtins/libm/src/math/frexp.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/frexpf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/hypot.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/hypotf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/ilogb.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/ilogbf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/j0.rs4
-rw-r--r--library/compiler-builtins/libm/src/math/j0f.rs4
-rw-r--r--library/compiler-builtins/libm/src/math/j1.rs4
-rw-r--r--library/compiler-builtins/libm/src/math/j1f.rs7
-rw-r--r--library/compiler-builtins/libm/src/math/jn.rs4
-rw-r--r--library/compiler-builtins/libm/src/math/jnf.rs4
-rw-r--r--library/compiler-builtins/libm/src/math/k_cos.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/k_cosf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/k_expo2.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/k_expo2f.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/k_sin.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/k_sinf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/k_tan.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/k_tanf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/ldexp.rs8
-rw-r--r--library/compiler-builtins/libm/src/math/lgamma.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/lgamma_r.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/lgammaf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/lgammaf_r.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/log.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/log10.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/log10f.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/log1p.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/log1pf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/log2.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/log2f.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/logf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/mod.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/modf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/modff.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/nextafter.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/nextafterf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/pow.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/powf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/rem_pio2.rs4
-rw-r--r--library/compiler-builtins/libm/src/math/rem_pio2_large.rs12
-rw-r--r--library/compiler-builtins/libm/src/math/rem_pio2f.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/remainder.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/remainderf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/remquo.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/remquof.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/rint.rs8
-rw-r--r--library/compiler-builtins/libm/src/math/round.rs8
-rw-r--r--library/compiler-builtins/libm/src/math/roundeven.rs8
-rw-r--r--library/compiler-builtins/libm/src/math/scalbn.rs8
-rw-r--r--library/compiler-builtins/libm/src/math/sin.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/sincos.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/sincosf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/sinf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/sinh.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/sinhf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/sqrt.rs8
-rw-r--r--library/compiler-builtins/libm/src/math/support/mod.rs3
-rw-r--r--library/compiler-builtins/libm/src/math/tan.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/tanf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/tanh.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/tanhf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/tgamma.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/tgammaf.rs2
-rw-r--r--library/compiler-builtins/libm/src/math/trunc.rs8
-rw-r--r--library/compiler-builtins/triagebot.toml3
-rw-r--r--library/core/src/alloc/layout.rs8
-rw-r--r--library/core/src/any.rs4
-rw-r--r--library/core/src/array/mod.rs4
-rw-r--r--library/core/src/ascii/ascii_char.rs612
-rw-r--r--library/core/src/cell.rs8
-rw-r--r--library/core/src/char/methods.rs6
-rw-r--r--library/core/src/clone.rs35
-rw-r--r--library/core/src/cmp.rs8
-rw-r--r--library/core/src/cmp/bytewise.rs2
-rw-r--r--library/core/src/convert/mod.rs12
-rw-r--r--library/core/src/ffi/c_str.rs2
-rw-r--r--library/core/src/internal_macros.rs38
-rw-r--r--library/core/src/intrinsics/mod.rs42
-rw-r--r--library/core/src/iter/adapters/map_windows.rs2
-rw-r--r--library/core/src/iter/traits/iterator.rs2
-rw-r--r--library/core/src/lib.rs1
-rw-r--r--library/core/src/net/ip_addr.rs47
-rw-r--r--library/core/src/net/socket_addr.rs2
-rw-r--r--library/core/src/num/int_macros.rs86
-rw-r--r--library/core/src/num/nonzero.rs53
-rw-r--r--library/core/src/num/saturating.rs185
-rw-r--r--library/core/src/num/uint_macros.rs99
-rw-r--r--library/core/src/num/wrapping.rs224
-rw-r--r--library/core/src/ops/arith.rs82
-rw-r--r--library/core/src/ops/bit.rs102
-rw-r--r--library/core/src/ops/control_flow.rs147
-rw-r--r--library/core/src/ops/deref.rs2
-rw-r--r--library/core/src/ops/function.rs10
-rw-r--r--library/core/src/ops/index.rs2
-rw-r--r--library/core/src/ops/range.rs2
-rw-r--r--library/core/src/ops/try_trait.rs2
-rw-r--r--library/core/src/option.rs72
-rw-r--r--library/core/src/ptr/const_ptr.rs24
-rw-r--r--library/core/src/ptr/mod.rs16
-rw-r--r--library/core/src/ptr/mut_ptr.rs27
-rw-r--r--library/core/src/ptr/non_null.rs32
-rw-r--r--library/core/src/ptr/unique.rs8
-rw-r--r--library/core/src/result.rs82
-rw-r--r--library/core/src/slice/cmp.rs6
-rw-r--r--library/core/src/slice/index.rs4
-rw-r--r--library/core/src/slice/mod.rs10
-rw-r--r--library/core/src/str/mod.rs4
-rw-r--r--library/core/src/str/traits.rs4
-rw-r--r--library/core/src/sync/atomic.rs89
-rw-r--r--library/core/src/time.rs35
-rw-r--r--library/coretests/tests/char.rs2
-rw-r--r--library/coretests/tests/convert.rs2
-rw-r--r--library/coretests/tests/floats/f128.rs290
-rw-r--r--library/coretests/tests/floats/f16.rs294
-rw-r--r--library/coretests/tests/floats/f32.rs298
-rw-r--r--library/coretests/tests/floats/f64.rs296
-rw-r--r--library/coretests/tests/floats/mod.rs459
-rw-r--r--library/coretests/tests/lib.rs4
-rw-r--r--library/coretests/tests/nonzero.rs22
-rw-r--r--library/coretests/tests/num/const_from.rs2
-rw-r--r--library/coretests/tests/num/int_macros.rs12
-rw-r--r--library/coretests/tests/num/uint_macros.rs12
-rw-r--r--library/coretests/tests/ops/control_flow.rs12
-rw-r--r--library/coretests/tests/option.rs2
-rw-r--r--library/coretests/tests/tuple.rs2
-rw-r--r--library/std/src/collections/hash/map.rs4
-rw-r--r--library/std/src/fs.rs11
-rw-r--r--library/std/src/lib.rs8
-rw-r--r--library/std/src/num/f32.rs12
-rw-r--r--library/std/src/num/f64.rs12
-rw-r--r--library/std/src/panic.rs6
-rw-r--r--library/std/src/panicking.rs5
-rw-r--r--library/std/src/path.rs5
-rw-r--r--library/std/src/sys/args/common.rs2
-rw-r--r--library/std/src/sys/configure_builtins.rs22
-rw-r--r--library/std/src/sys/fs/windows.rs17
-rw-r--r--library/std/src/sys/io/io_slice/uefi.rs74
-rw-r--r--library/std/src/sys/io/mod.rs3
-rw-r--r--library/std/src/sys/mod.rs5
-rw-r--r--library/std/src/sys/pal/hermit/thread.rs4
-rw-r--r--library/std/src/sys/pal/hermit/time.rs32
-rw-r--r--library/std/src/sys/pal/itron/thread.rs4
-rw-r--r--library/std/src/sys/pal/sgx/thread.rs6
-rw-r--r--library/std/src/sys/pal/sgx/time.rs15
-rw-r--r--library/std/src/sys/pal/solid/time.rs9
-rw-r--r--library/std/src/sys/pal/teeos/thread.rs4
-rw-r--r--library/std/src/sys/pal/uefi/tests.rs176
-rw-r--r--library/std/src/sys/pal/uefi/thread.rs4
-rw-r--r--library/std/src/sys/pal/uefi/time.rs178
-rw-r--r--library/std/src/sys/pal/unix/stack_overflow.rs6
-rw-r--r--library/std/src/sys/pal/unix/thread.rs56
-rw-r--r--library/std/src/sys/pal/unix/time.rs30
-rw-r--r--library/std/src/sys/pal/unsupported/thread.rs4
-rw-r--r--library/std/src/sys/pal/unsupported/time.rs15
-rw-r--r--library/std/src/sys/pal/wasi/thread.rs4
-rw-r--r--library/std/src/sys/pal/wasi/time.rs25
-rw-r--r--library/std/src/sys/pal/wasm/atomics/thread.rs4
-rw-r--r--library/std/src/sys/pal/windows/c/bindings.txt1
-rw-r--r--library/std/src/sys/pal/windows/c/windows_sys.rs1
-rw-r--r--library/std/src/sys/pal/windows/stack_overflow.rs3
-rw-r--r--library/std/src/sys/pal/windows/thread.rs8
-rw-r--r--library/std/src/sys/pal/windows/time.rs30
-rw-r--r--library/std/src/sys/pal/xous/thread.rs4
-rw-r--r--library/std/src/sys/pal/xous/time.rs15
-rw-r--r--library/std/src/sys/process/windows.rs8
-rw-r--r--library/std/src/thread/current.rs13
-rw-r--r--library/std/src/thread/mod.rs2
-rw-r--r--library/std/src/thread/tests.rs7
-rw-r--r--library/std/src/time.rs30
-rw-r--r--library/std/tests/env.rs2
-rw-r--r--library/std/tests/path.rs8
-rw-r--r--library/stdarch/.github/workflows/rustc-pull.yml3
-rw-r--r--library/stdarch/Cargo.lock110
-rw-r--r--library/stdarch/ci/docker/loongarch64-unknown-linux-gnu/Dockerfile2
-rw-r--r--library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs4441
-rw-r--r--library/stdarch/crates/core_arch/src/loongarch64/lasx/types.rs157
-rw-r--r--library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs4321
-rw-r--r--library/stdarch/crates/core_arch/src/loongarch64/lsx/types.rs159
-rw-r--r--library/stdarch/crates/core_arch/src/s390x/vector.rs14
-rw-r--r--library/stdarch/crates/core_arch/src/x86/sse2.rs11
-rw-r--r--library/stdarch/crates/intrinsic-test/Cargo.toml3
-rw-r--r--library/stdarch/crates/intrinsic-test/src/arm/argument.rs15
-rw-r--r--library/stdarch/crates/intrinsic-test/src/arm/compile.rs6
-rw-r--r--library/stdarch/crates/intrinsic-test/src/arm/intrinsic.rs9
-rw-r--r--library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs11
-rw-r--r--library/stdarch/crates/intrinsic-test/src/arm/mod.rs130
-rw-r--r--library/stdarch/crates/intrinsic-test/src/arm/types.rs82
-rw-r--r--library/stdarch/crates/intrinsic-test/src/common/argument.rs78
-rw-r--r--library/stdarch/crates/intrinsic-test/src/common/compare.rs30
-rw-r--r--library/stdarch/crates/intrinsic-test/src/common/compile_c.rs15
-rw-r--r--library/stdarch/crates/intrinsic-test/src/common/constraint.rs13
-rw-r--r--library/stdarch/crates/intrinsic-test/src/common/gen_c.rs2
-rw-r--r--library/stdarch/crates/intrinsic-test/src/common/gen_rust.rs363
-rw-r--r--library/stdarch/crates/intrinsic-test/src/common/intrinsic_helpers.rs10
-rw-r--r--library/stdarch/crates/intrinsic-test/src/common/mod.rs8
-rw-r--r--library/stdarch/crates/intrinsic-test/src/common/write_file.rs33
-rw-r--r--library/stdarch/crates/stdarch-gen-arm/Cargo.toml2
-rw-r--r--library/stdarch/crates/stdarch-gen-loongarch/src/main.rs224
-rw-r--r--library/stdarch/examples/connect5.rs6
-rw-r--r--library/stdarch/rust-version2
-rw-r--r--library/test/src/cli.rs17
-rw-r--r--library/test/src/console.rs26
-rw-r--r--library/test/src/formatters/json.rs11
-rw-r--r--library/test/src/formatters/junit.rs10
-rw-r--r--library/test/src/formatters/mod.rs5
-rw-r--r--library/test/src/formatters/pretty.rs10
-rw-r--r--library/test/src/formatters/terse.rs10
-rw-r--r--library/test/src/lib.rs15
309 files changed, 8847 insertions, 7532 deletions
diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs
index 17c16e4aaff..c4e599222e5 100644
--- a/library/alloc/src/collections/btree/map.rs
+++ b/library/alloc/src/collections/btree/map.rs
@@ -135,6 +135,8 @@ pub(super) const MIN_LEN: usize = node::MIN_LEN_AFTER_SPLIT;
 /// ]);
 /// ```
 ///
+/// ## `Entry` API
+///
 /// `BTreeMap` implements an [`Entry API`], which allows for complex
 /// methods of getting, setting, updating and removing keys and their values:
 ///
@@ -382,6 +384,7 @@ impl<'a, K: 'a, V: 'a> Default for Iter<'a, K, V> {
 /// documentation for more.
 ///
 /// [`iter_mut`]: BTreeMap::iter_mut
+#[must_use = "iterators are lazy and do nothing unless consumed"]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct IterMut<'a, K: 'a, V: 'a> {
     range: LazyLeafRange<marker::ValMut<'a>, K, V>,
@@ -391,7 +394,6 @@ pub struct IterMut<'a, K: 'a, V: 'a> {
     _marker: PhantomData<&'a mut (K, V)>,
 }
 
-#[must_use = "iterators are lazy and do nothing unless consumed"]
 #[stable(feature = "collection_debug", since = "1.17.0")]
 impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IterMut<'_, K, V> {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
diff --git a/library/alloc/src/ffi/c_str.rs b/library/alloc/src/ffi/c_str.rs
index 93bdad75380..fe6c89a3094 100644
--- a/library/alloc/src/ffi/c_str.rs
+++ b/library/alloc/src/ffi/c_str.rs
@@ -1099,7 +1099,7 @@ impl From<&CStr> for CString {
     }
 }
 
-#[stable(feature = "c_string_eq_c_str", since = "CURRENT_RUSTC_VERSION")]
+#[stable(feature = "c_string_eq_c_str", since = "1.90.0")]
 impl PartialEq<CStr> for CString {
     #[inline]
     fn eq(&self, other: &CStr) -> bool {
@@ -1112,7 +1112,7 @@ impl PartialEq<CStr> for CString {
     }
 }
 
-#[stable(feature = "c_string_eq_c_str", since = "CURRENT_RUSTC_VERSION")]
+#[stable(feature = "c_string_eq_c_str", since = "1.90.0")]
 impl PartialEq<&CStr> for CString {
     #[inline]
     fn eq(&self, other: &&CStr) -> bool {
@@ -1126,7 +1126,7 @@ impl PartialEq<&CStr> for CString {
 }
 
 #[cfg(not(no_global_oom_handling))]
-#[stable(feature = "c_string_eq_c_str", since = "CURRENT_RUSTC_VERSION")]
+#[stable(feature = "c_string_eq_c_str", since = "1.90.0")]
 impl PartialEq<Cow<'_, CStr>> for CString {
     #[inline]
     fn eq(&self, other: &Cow<'_, CStr>) -> bool {
@@ -1221,7 +1221,7 @@ impl CStr {
     }
 }
 
-#[stable(feature = "c_string_eq_c_str", since = "CURRENT_RUSTC_VERSION")]
+#[stable(feature = "c_string_eq_c_str", since = "1.90.0")]
 impl PartialEq<CString> for CStr {
     #[inline]
     fn eq(&self, other: &CString) -> bool {
@@ -1235,7 +1235,7 @@ impl PartialEq<CString> for CStr {
 }
 
 #[cfg(not(no_global_oom_handling))]
-#[stable(feature = "c_string_eq_c_str", since = "CURRENT_RUSTC_VERSION")]
+#[stable(feature = "c_string_eq_c_str", since = "1.90.0")]
 impl PartialEq<Cow<'_, Self>> for CStr {
     #[inline]
     fn eq(&self, other: &Cow<'_, Self>) -> bool {
@@ -1249,7 +1249,7 @@ impl PartialEq<Cow<'_, Self>> for CStr {
 }
 
 #[cfg(not(no_global_oom_handling))]
-#[stable(feature = "c_string_eq_c_str", since = "CURRENT_RUSTC_VERSION")]
+#[stable(feature = "c_string_eq_c_str", since = "1.90.0")]
 impl PartialEq<CStr> for Cow<'_, CStr> {
     #[inline]
     fn eq(&self, other: &CStr) -> bool {
@@ -1263,7 +1263,7 @@ impl PartialEq<CStr> for Cow<'_, CStr> {
 }
 
 #[cfg(not(no_global_oom_handling))]
-#[stable(feature = "c_string_eq_c_str", since = "CURRENT_RUSTC_VERSION")]
+#[stable(feature = "c_string_eq_c_str", since = "1.90.0")]
 impl PartialEq<&CStr> for Cow<'_, CStr> {
     #[inline]
     fn eq(&self, other: &&CStr) -> bool {
@@ -1277,7 +1277,7 @@ impl PartialEq<&CStr> for Cow<'_, CStr> {
 }
 
 #[cfg(not(no_global_oom_handling))]
-#[stable(feature = "c_string_eq_c_str", since = "CURRENT_RUSTC_VERSION")]
+#[stable(feature = "c_string_eq_c_str", since = "1.90.0")]
 impl PartialEq<CString> for Cow<'_, CStr> {
     #[inline]
     fn eq(&self, other: &CString) -> bool {
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs
index c091e496c50..639c5d4c930 100644
--- a/library/alloc/src/lib.rs
+++ b/library/alloc/src/lib.rs
@@ -102,6 +102,7 @@
 #![feature(async_iterator)]
 #![feature(bstr)]
 #![feature(bstr_internals)]
+#![feature(cast_maybe_uninit)]
 #![feature(char_internals)]
 #![feature(char_max_len)]
 #![feature(clone_to_uninit)]
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index 2321aab2c51..2e40227a058 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -49,7 +49,27 @@
 //! v[1] = v[1] + 5;
 //! ```
 //!
+//! # Memory layout
+//!
+//! When the type is non-zero-sized and the capacity is nonzero, [`Vec`] uses the [`Global`]
+//! allocator for its allocation. It is valid to convert both ways between such a [`Vec`] and a raw
+//! pointer allocated with the [`Global`] allocator, provided that the [`Layout`] used with the
+//! allocator is correct for a sequence of `capacity` elements of the type, and the first `len`
+//! values pointed to by the raw pointer are valid. More precisely, a `ptr: *mut T` that has been
+//! allocated with the [`Global`] allocator with [`Layout::array::<T>(capacity)`][Layout::array] may
+//! be converted into a vec using
+//! [`Vec::<T>::from_raw_parts(ptr, len, capacity)`](Vec::from_raw_parts). Conversely, the memory
+//! backing a `value: *mut T` obtained from [`Vec::<T>::as_mut_ptr`] may be deallocated using the
+//! [`Global`] allocator with the same layout.
+//!
+//! For zero-sized types (ZSTs), or when the capacity is zero, the `Vec` pointer must be non-null
+//! and sufficiently aligned. The recommended way to build a `Vec` of ZSTs if [`vec!`] cannot be
+//! used is to use [`ptr::NonNull::dangling`].
+//!
 //! [`push`]: Vec::push
+//! [`ptr::NonNull::dangling`]: NonNull::dangling
+//! [`Layout`]: crate::alloc::Layout
+//! [Layout::array]: crate::alloc::Layout::array
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
@@ -523,18 +543,23 @@ impl<T> Vec<T> {
     /// This is highly unsafe, due to the number of invariants that aren't
     /// checked:
     ///
-    /// * `ptr` must have been allocated using the global allocator, such as via
-    ///   the [`alloc::alloc`] function.
-    /// * `T` needs to have the same alignment as what `ptr` was allocated with.
+    /// * If `T` is not a zero-sized type and the capacity is nonzero, `ptr` must have
+    ///   been allocated using the global allocator, such as via the [`alloc::alloc`]
+    ///   function. If `T` is a zero-sized type or the capacity is zero, `ptr` need
+    ///   only be non-null and aligned.
+    /// * `T` needs to have the same alignment as what `ptr` was allocated with,
+    ///   if the pointer is required to be allocated.
     ///   (`T` having a less strict alignment is not sufficient, the alignment really
     ///   needs to be equal to satisfy the [`dealloc`] requirement that memory must be
     ///   allocated and deallocated with the same layout.)
-    /// * The size of `T` times the `capacity` (ie. the allocated size in bytes) needs
-    ///   to be the same size as the pointer was allocated with. (Because similar to
-    ///   alignment, [`dealloc`] must be called with the same layout `size`.)
+    /// * The size of `T` times the `capacity` (ie. the allocated size in bytes), if
+    ///   nonzero, needs to be the same size as the pointer was allocated with.
+    ///   (Because similar to alignment, [`dealloc`] must be called with the same
+    ///   layout `size`.)
     /// * `length` needs to be less than or equal to `capacity`.
     /// * The first `length` values must be properly initialized values of type `T`.
-    /// * `capacity` needs to be the capacity that the pointer was allocated with.
+    /// * `capacity` needs to be the capacity that the pointer was allocated with,
+    ///   if the pointer is required to be allocated.
     /// * The allocated size in bytes must be no larger than `isize::MAX`.
     ///   See the safety documentation of [`pointer::offset`].
     ///
@@ -770,12 +795,16 @@ impl<T> Vec<T> {
     /// order as the arguments to [`from_raw_parts`].
     ///
     /// After calling this function, the caller is responsible for the
-    /// memory previously managed by the `Vec`. The only way to do
-    /// this is to convert the raw pointer, length, and capacity back
-    /// into a `Vec` with the [`from_raw_parts`] function, allowing
-    /// the destructor to perform the cleanup.
+    /// memory previously managed by the `Vec`. Most often, one does
+    /// this by converting the raw pointer, length, and capacity back
+    /// into a `Vec` with the [`from_raw_parts`] function; more generally,
+    /// if `T` is non-zero-sized and the capacity is nonzero, one may use
+    /// any method that calls [`dealloc`] with a layout of
+    /// `Layout::array::<T>(capacity)`; if `T` is zero-sized or the
+    /// capacity is zero, nothing needs to be done.
     ///
     /// [`from_raw_parts`]: Vec::from_raw_parts
+    /// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
     ///
     /// # Examples
     ///
@@ -1755,6 +1784,12 @@ impl<T, A: Allocator> Vec<T, A> {
     /// may still invalidate this pointer.
     /// See the second example below for how this guarantee can be used.
     ///
+    /// The method also guarantees that, as long as `T` is not zero-sized and the capacity is
+    /// nonzero, the pointer may be passed into [`dealloc`] with a layout of
+    /// `Layout::array::<T>(capacity)` in order to deallocate the backing memory. If this is done,
+    /// be careful not to run the destructor of the `Vec`, as dropping it will result in
+    /// double-frees. Wrapping the `Vec` in a [`ManuallyDrop`] is the typical way to achieve this.
+    ///
     /// # Examples
     ///
     /// ```
@@ -1787,9 +1822,24 @@ impl<T, A: Allocator> Vec<T, A> {
     /// }
     /// ```
     ///
+    /// Deallocating a vector using [`Box`] (which uses [`dealloc`] internally):
+    ///
+    /// ```
+    /// use std::mem::{ManuallyDrop, MaybeUninit};
+    ///
+    /// let mut v = ManuallyDrop::new(vec![0, 1, 2]);
+    /// let ptr = v.as_mut_ptr();
+    /// let capacity = v.capacity();
+    /// let slice_ptr: *mut [MaybeUninit<i32>] =
+    ///     std::ptr::slice_from_raw_parts_mut(ptr.cast(), capacity);
+    /// drop(unsafe { Box::from_raw(slice_ptr) });
+    /// ```
+    ///
     /// [`as_mut_ptr`]: Vec::as_mut_ptr
     /// [`as_ptr`]: Vec::as_ptr
     /// [`as_non_null`]: Vec::as_non_null
+    /// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
+    /// [`ManuallyDrop`]: core::mem::ManuallyDrop
     #[stable(feature = "vec_as_ptr", since = "1.37.0")]
     #[rustc_const_stable(feature = "const_vec_string_slice", since = "1.87.0")]
     #[rustc_never_returns_null_ptr]
@@ -3126,7 +3176,7 @@ impl<T, A: Allocator> Vec<T, A> {
         // - but the allocation extends out to `self.buf.capacity()` elements, possibly
         // uninitialized
         let spare_ptr = unsafe { ptr.add(self.len) };
-        let spare_ptr = spare_ptr.cast::<MaybeUninit<T>>();
+        let spare_ptr = spare_ptr.cast_uninit();
         let spare_len = self.buf.capacity() - self.len;
 
         // SAFETY:
diff --git a/library/compiler-builtins/.github/workflows/main.yaml b/library/compiler-builtins/.github/workflows/main.yaml
index 541c99c828d..c54df2e90b7 100644
--- a/library/compiler-builtins/.github/workflows/main.yaml
+++ b/library/compiler-builtins/.github/workflows/main.yaml
@@ -34,7 +34,9 @@ jobs:
       - name: Fetch pull request ref
         run: git fetch origin "$GITHUB_REF:$GITHUB_REF"
         if: github.event_name == 'pull_request'
-      - run: python3 ci/ci-util.py generate-matrix >> "$GITHUB_OUTPUT"
+      - run: |
+          set -eo pipefail # Needed to actually fail the job if ci-util fails
+          python3 ci/ci-util.py generate-matrix | tee "$GITHUB_OUTPUT"
         id: script
 
   test:
@@ -50,7 +52,6 @@ jobs:
           os: ubuntu-24.04-arm
         - target: aarch64-pc-windows-msvc
           os: windows-2025
-          test_verbatim: 1
           build_only: 1
         - target: arm-unknown-linux-gnueabi
           os: ubuntu-24.04
@@ -70,8 +71,12 @@ jobs:
           os: ubuntu-24.04
         - target: powerpc64le-unknown-linux-gnu
           os: ubuntu-24.04
+        - target: powerpc64le-unknown-linux-gnu
+          os: ubuntu-24.04-ppc64le
         - target: riscv64gc-unknown-linux-gnu
           os: ubuntu-24.04
+        - target: s390x-unknown-linux-gnu
+          os: ubuntu-24.04-s390x
         - target: thumbv6m-none-eabi
           os: ubuntu-24.04
         - target: thumbv7em-none-eabi
@@ -88,10 +93,8 @@ jobs:
           os: macos-13
         - target: i686-pc-windows-msvc
           os: windows-2025
-          test_verbatim: 1
         - target: x86_64-pc-windows-msvc
           os: windows-2025
-          test_verbatim: 1
         - target: i686-pc-windows-gnu
           os: windows-2025
           channel: nightly-i686-gnu
@@ -102,11 +105,23 @@ jobs:
     needs: [calculate_vars]
     env:
       BUILD_ONLY: ${{ matrix.build_only }}
-      TEST_VERBATIM: ${{ matrix.test_verbatim }}
       MAY_SKIP_LIBM_CI: ${{ needs.calculate_vars.outputs.may_skip_libm_ci }}
     steps:
+    - name: Print $HOME
+      shell: bash
+      run: |
+        set -x
+        echo "${HOME:-not found}"
+        pwd
+        printenv
     - name: Print runner information
       run: uname -a
+
+    # Native ppc and s390x runners don't have rustup by default
+    - name: Install rustup
+      if: matrix.os == 'ubuntu-24.04-ppc64le' || matrix.os == 'ubuntu-24.04-s390x'
+      run: sudo apt-get update && sudo apt-get install -y rustup
+
     - uses: actions/checkout@v4
     - name: Install Rust (rustup)
       shell: bash
@@ -117,7 +132,12 @@ jobs:
         rustup update "$channel" --no-self-update
         rustup default "$channel"
         rustup target add "${{ matrix.target }}"
+
+    # Our scripts use nextest if possible. This is skipped on the native ppc
+    # and s390x runners since install-action doesn't support them.
     - uses: taiki-e/install-action@nextest
+      if: "!(matrix.os == 'ubuntu-24.04-ppc64le' || matrix.os == 'ubuntu-24.04-s390x')"
+
     - uses: Swatinem/rust-cache@v2
       with:
         key: ${{ matrix.target }}
@@ -144,7 +164,7 @@ jobs:
       shell: bash
     - run: echo "RUST_COMPILER_RT_ROOT=$(realpath ./compiler-rt)" >> "$GITHUB_ENV"
       shell: bash
-      
+
     - name: Download musl source
       run: ./ci/update-musl.sh
       shell: bash
@@ -256,7 +276,7 @@ jobs:
       with:
         name: ${{ env.BASELINE_NAME }}
         path: ${{ env.BASELINE_NAME }}.tar.xz
-    
+
     - name: Run wall time benchmarks
       run: |
         # Always use the same seed for benchmarks. Ideally we should switch to a
@@ -311,8 +331,8 @@ jobs:
     timeout-minutes: 10
     steps:
     - uses: actions/checkout@v4
-    - name: Install stable `rustfmt`
-      run: rustup set profile minimal && rustup default stable && rustup component add rustfmt
+    - name: Install nightly `rustfmt`
+      run: rustup set profile minimal && rustup default nightly && rustup component add rustfmt
     - run: cargo fmt -- --check
 
   extensive:
diff --git a/library/compiler-builtins/.github/workflows/rustc-pull.yml b/library/compiler-builtins/.github/workflows/rustc-pull.yml
index ba698492e42..ad7693e17b0 100644
--- a/library/compiler-builtins/.github/workflows/rustc-pull.yml
+++ b/library/compiler-builtins/.github/workflows/rustc-pull.yml
@@ -12,12 +12,13 @@ jobs:
     if: github.repository == 'rust-lang/compiler-builtins'
     uses: rust-lang/josh-sync/.github/workflows/rustc-pull.yml@main
     with:
+      github-app-id: ${{ vars.APP_CLIENT_ID }}
       # https://rust-lang.zulipchat.com/#narrow/channel/219381-t-libs/topic/compiler-builtins.20subtree.20sync.20automation/with/528482375
       zulip-stream-id: 219381
       zulip-topic: 'compiler-builtins subtree sync automation'
-      zulip-bot-email:  "compiler-builtins-ci-bot@rust-lang.zulipchat.com"
+      zulip-bot-email: "compiler-builtins-ci-bot@rust-lang.zulipchat.com"
       pr-base-branch: master
       branch-name: rustc-pull
     secrets:
       zulip-api-token: ${{ secrets.ZULIP_API_TOKEN }}
-      token: ${{ secrets.GITHUB_TOKEN }}
+      github-app-secret: ${{ secrets.APP_PRIVATE_KEY }}
diff --git a/library/compiler-builtins/builtins-shim/Cargo.toml b/library/compiler-builtins/builtins-shim/Cargo.toml
index 8eb880c6fd1..707ebdbc77b 100644
--- a/library/compiler-builtins/builtins-shim/Cargo.toml
+++ b/library/compiler-builtins/builtins-shim/Cargo.toml
@@ -37,8 +37,9 @@ default = ["compiler-builtins"]
 # implementations and also filling in unimplemented intrinsics
 c = ["dep:cc"]
 
-# Workaround for the Cranelift codegen backend. Disables any implementations
-# which use inline assembly and fall back to pure Rust versions (if available).
+# For implementations where there is both a generic version and a platform-
+# specific version, use the generic version. This is meant to enable testing
+# the generic versions on all platforms.
 no-asm = []
 
 # Workaround for codegen backends which haven't yet implemented `f16` and
diff --git a/library/compiler-builtins/builtins-test-intrinsics/src/main.rs b/library/compiler-builtins/builtins-test-intrinsics/src/main.rs
index 66744a0817f..b9d19ea7725 100644
--- a/library/compiler-builtins/builtins-test-intrinsics/src/main.rs
+++ b/library/compiler-builtins/builtins-test-intrinsics/src/main.rs
@@ -40,11 +40,7 @@ mod intrinsics {
         x as f64
     }
 
-    #[cfg(all(
-        f16_enabled,
-        f128_enabled,
-        not(any(target_arch = "powerpc", target_arch = "powerpc64"))
-    ))]
+    #[cfg(all(f16_enabled, f128_enabled))]
     pub fn extendhftf(x: f16) -> f128 {
         x as f128
     }
@@ -201,11 +197,7 @@ mod intrinsics {
 
     /* f128 operations */
 
-    #[cfg(all(
-        f16_enabled,
-        f128_enabled,
-        not(any(target_arch = "powerpc", target_arch = "powerpc64"))
-    ))]
+    #[cfg(all(f16_enabled, f128_enabled))]
     pub fn trunctfhf(x: f128) -> f16 {
         x as f16
     }
@@ -220,50 +212,32 @@ mod intrinsics {
         x as f64
     }
 
-    #[cfg(all(
-        f128_enabled,
-        not(any(target_arch = "powerpc", target_arch = "powerpc64"))
-    ))]
+    #[cfg(f128_enabled)]
     pub fn fixtfsi(x: f128) -> i32 {
         x as i32
     }
 
-    #[cfg(all(
-        f128_enabled,
-        not(any(target_arch = "powerpc", target_arch = "powerpc64"))
-    ))]
+    #[cfg(f128_enabled)]
     pub fn fixtfdi(x: f128) -> i64 {
         x as i64
     }
 
-    #[cfg(all(
-        f128_enabled,
-        not(any(target_arch = "powerpc", target_arch = "powerpc64"))
-    ))]
+    #[cfg(f128_enabled)]
     pub fn fixtfti(x: f128) -> i128 {
         x as i128
     }
 
-    #[cfg(all(
-        f128_enabled,
-        not(any(target_arch = "powerpc", target_arch = "powerpc64"))
-    ))]
+    #[cfg(f128_enabled)]
     pub fn fixunstfsi(x: f128) -> u32 {
         x as u32
     }
 
-    #[cfg(all(
-        f128_enabled,
-        not(any(target_arch = "powerpc", target_arch = "powerpc64"))
-    ))]
+    #[cfg(f128_enabled)]
     pub fn fixunstfdi(x: f128) -> u64 {
         x as u64
     }
 
-    #[cfg(all(
-        f128_enabled,
-        not(any(target_arch = "powerpc", target_arch = "powerpc64"))
-    ))]
+    #[cfg(f128_enabled)]
     pub fn fixunstfti(x: f128) -> u128 {
         x as u128
     }
@@ -540,47 +514,25 @@ fn run() {
     bb(extendhfdf(bb(2.)));
     #[cfg(f16_enabled)]
     bb(extendhfsf(bb(2.)));
-    #[cfg(all(
-        f16_enabled,
-        f128_enabled,
-        not(any(target_arch = "powerpc", target_arch = "powerpc64"))
-    ))]
+    #[cfg(all(f16_enabled, f128_enabled))]
     bb(extendhftf(bb(2.)));
     #[cfg(f128_enabled)]
     bb(extendsftf(bb(2.)));
     bb(fixdfti(bb(2.)));
     bb(fixsfti(bb(2.)));
-    #[cfg(all(
-        f128_enabled,
-        not(any(target_arch = "powerpc", target_arch = "powerpc64"))
-    ))]
+    #[cfg(f128_enabled)]
     bb(fixtfdi(bb(2.)));
-    #[cfg(all(
-        f128_enabled,
-        not(any(target_arch = "powerpc", target_arch = "powerpc64"))
-    ))]
+    #[cfg(f128_enabled)]
     bb(fixtfsi(bb(2.)));
-    #[cfg(all(
-        f128_enabled,
-        not(any(target_arch = "powerpc", target_arch = "powerpc64"))
-    ))]
+    #[cfg(f128_enabled)]
     bb(fixtfti(bb(2.)));
     bb(fixunsdfti(bb(2.)));
     bb(fixunssfti(bb(2.)));
-    #[cfg(all(
-        f128_enabled,
-        not(any(target_arch = "powerpc", target_arch = "powerpc64"))
-    ))]
+    #[cfg(f128_enabled)]
     bb(fixunstfdi(bb(2.)));
-    #[cfg(all(
-        f128_enabled,
-        not(any(target_arch = "powerpc", target_arch = "powerpc64"))
-    ))]
+    #[cfg(f128_enabled)]
     bb(fixunstfsi(bb(2.)));
-    #[cfg(all(
-        f128_enabled,
-        not(any(target_arch = "powerpc", target_arch = "powerpc64"))
-    ))]
+    #[cfg(f128_enabled)]
     bb(fixunstfti(bb(2.)));
     #[cfg(f128_enabled)]
     bb(floatditf(bb(2)));
@@ -616,11 +568,7 @@ fn run() {
     bb(truncsfhf(bb(2.)));
     #[cfg(f128_enabled)]
     bb(trunctfdf(bb(2.)));
-    #[cfg(all(
-        f16_enabled,
-        f128_enabled,
-        not(any(target_arch = "powerpc", target_arch = "powerpc64"))
-    ))]
+    #[cfg(all(f16_enabled, f128_enabled))]
     bb(trunctfhf(bb(2.)));
     #[cfg(f128_enabled)]
     bb(trunctfsf(bb(2.)));
diff --git a/library/compiler-builtins/builtins-test/benches/float_conv.rs b/library/compiler-builtins/builtins-test/benches/float_conv.rs
index d4a7346d1d5..e0f488eb685 100644
--- a/library/compiler-builtins/builtins-test/benches/float_conv.rs
+++ b/library/compiler-builtins/builtins-test/benches/float_conv.rs
@@ -365,7 +365,6 @@ float_bench! {
 
 /* float -> unsigned int */
 
-#[cfg(not(all(target_arch = "powerpc64", target_endian = "little")))]
 float_bench! {
     name: conv_f32_u32,
     sig: (a: f32) -> u32,
@@ -387,7 +386,6 @@ float_bench! {
     ],
 }
 
-#[cfg(not(all(target_arch = "powerpc64", target_endian = "little")))]
 float_bench! {
     name: conv_f32_u64,
     sig: (a: f32) -> u64,
@@ -409,7 +407,6 @@ float_bench! {
     ],
 }
 
-#[cfg(not(all(target_arch = "powerpc64", target_endian = "little")))]
 float_bench! {
     name: conv_f32_u128,
     sig: (a: f32) -> u128,
@@ -505,7 +502,6 @@ float_bench! {
 
 /* float -> signed int */
 
-#[cfg(not(all(target_arch = "powerpc64", target_endian = "little")))]
 float_bench! {
     name: conv_f32_i32,
     sig: (a: f32) -> i32,
@@ -527,7 +523,6 @@ float_bench! {
     ],
 }
 
-#[cfg(not(all(target_arch = "powerpc64", target_endian = "little")))]
 float_bench! {
     name: conv_f32_i64,
     sig: (a: f32) -> i64,
@@ -549,7 +544,6 @@ float_bench! {
     ],
 }
 
-#[cfg(not(all(target_arch = "powerpc64", target_endian = "little")))]
 float_bench! {
     name: conv_f32_i128,
     sig: (a: f32) -> i128,
@@ -666,9 +660,6 @@ pub fn float_conv() {
     conv_f64_i128(&mut criterion);
 
     #[cfg(f128_enabled)]
-    // FIXME: ppc64le has a sporadic overflow panic in the crate functions
-    // <https://github.com/rust-lang/compiler-builtins/issues/617#issuecomment-2125914639>
-    #[cfg(not(all(target_arch = "powerpc64", target_endian = "little")))]
     {
         conv_u32_f128(&mut criterion);
         conv_u64_f128(&mut criterion);
diff --git a/library/compiler-builtins/builtins-test/benches/float_extend.rs b/library/compiler-builtins/builtins-test/benches/float_extend.rs
index fc44e80c9e1..939dc60f95f 100644
--- a/library/compiler-builtins/builtins-test/benches/float_extend.rs
+++ b/library/compiler-builtins/builtins-test/benches/float_extend.rs
@@ -110,9 +110,7 @@ float_bench! {
 pub fn float_extend() {
     let mut criterion = Criterion::default().configure_from_args();
 
-    // FIXME(#655): `f16` tests disabled until we can bootstrap symbols
     #[cfg(f16_enabled)]
-    #[cfg(not(any(target_arch = "powerpc", target_arch = "powerpc64")))]
     {
         extend_f16_f32(&mut criterion);
         extend_f16_f64(&mut criterion);
diff --git a/library/compiler-builtins/builtins-test/benches/float_trunc.rs b/library/compiler-builtins/builtins-test/benches/float_trunc.rs
index 43310c7cfc8..9373f945bb2 100644
--- a/library/compiler-builtins/builtins-test/benches/float_trunc.rs
+++ b/library/compiler-builtins/builtins-test/benches/float_trunc.rs
@@ -121,9 +121,7 @@ float_bench! {
 pub fn float_trunc() {
     let mut criterion = Criterion::default().configure_from_args();
 
-    // FIXME(#655): `f16` tests disabled until we can bootstrap symbols
     #[cfg(f16_enabled)]
-    #[cfg(not(any(target_arch = "powerpc", target_arch = "powerpc64")))]
     {
         trunc_f32_f16(&mut criterion);
         trunc_f64_f16(&mut criterion);
@@ -133,11 +131,8 @@ pub fn float_trunc() {
 
     #[cfg(f128_enabled)]
     {
-        // FIXME(#655): `f16` tests disabled until we can bootstrap symbols
         #[cfg(f16_enabled)]
-        #[cfg(not(any(target_arch = "powerpc", target_arch = "powerpc64")))]
         trunc_f128_f16(&mut criterion);
-
         trunc_f128_f32(&mut criterion);
         trunc_f128_f64(&mut criterion);
     }
diff --git a/library/compiler-builtins/builtins-test/src/bench.rs b/library/compiler-builtins/builtins-test/src/bench.rs
index 0987185670e..4bdcf482cd6 100644
--- a/library/compiler-builtins/builtins-test/src/bench.rs
+++ b/library/compiler-builtins/builtins-test/src/bench.rs
@@ -17,28 +17,14 @@ pub fn skip_sys_checks(test_name: &str) -> bool {
         "extend_f16_f32",
         "trunc_f32_f16",
         "trunc_f64_f16",
-        // FIXME(#616): re-enable once fix is in nightly
-        // <https://github.com/rust-lang/compiler-builtins/issues/616>
-        "mul_f32",
-        "mul_f64",
     ];
 
-    // FIXME(f16_f128): error on LE ppc64. There are more tests that are cfg-ed out completely
-    // in their benchmark modules due to runtime panics.
-    // <https://github.com/rust-lang/compiler-builtins/issues/617#issuecomment-2125914639>
-    const PPC64LE_SKIPPED: &[&str] = &["extend_f32_f128"];
-
     // FIXME(f16_f128): system symbols have incorrect results
     // <https://github.com/rust-lang/compiler-builtins/issues/617#issuecomment-2125914639>
     const X86_NO_SSE_SKIPPED: &[&str] = &[
         "add_f128", "sub_f128", "mul_f128", "div_f128", "powi_f32", "powi_f64",
     ];
 
-    // FIXME(f16_f128): Wide multiply carry bug in `compiler-rt`, re-enable when nightly no longer
-    // uses `compiler-rt` version.
-    // <https://github.com/llvm/llvm-project/issues/91840>
-    const AARCH64_SKIPPED: &[&str] = &["mul_f128", "div_f128"];
-
     // FIXME(llvm): system symbols have incorrect results on Windows
     // <https://github.com/rust-lang/compiler-builtins/issues/617#issuecomment-2121359807>
     const WINDOWS_SKIPPED: &[&str] = &[
@@ -57,19 +43,7 @@ pub fn skip_sys_checks(test_name: &str) -> bool {
         return true;
     }
 
-    if cfg!(all(target_arch = "powerpc64", target_endian = "little"))
-        && PPC64LE_SKIPPED.contains(&test_name)
-    {
-        return true;
-    }
-
-    if cfg!(all(target_arch = "x86", not(target_feature = "sse")))
-        && X86_NO_SSE_SKIPPED.contains(&test_name)
-    {
-        return true;
-    }
-
-    if cfg!(target_arch = "aarch64") && AARCH64_SKIPPED.contains(&test_name) {
+    if cfg!(x86_no_sse) && X86_NO_SSE_SKIPPED.contains(&test_name) {
         return true;
     }
 
diff --git a/library/compiler-builtins/builtins-test/tests/addsub.rs b/library/compiler-builtins/builtins-test/tests/addsub.rs
index 865b9e472ab..abe7dde645e 100644
--- a/library/compiler-builtins/builtins-test/tests/addsub.rs
+++ b/library/compiler-builtins/builtins-test/tests/addsub.rs
@@ -111,7 +111,7 @@ macro_rules! float_sum {
     }
 }
 
-#[cfg(not(all(target_arch = "x86", not(target_feature = "sse"))))]
+#[cfg(not(x86_no_sse))]
 mod float_addsub {
     use super::*;
 
@@ -122,7 +122,7 @@ mod float_addsub {
 }
 
 #[cfg(f128_enabled)]
-#[cfg(not(all(target_arch = "x86", not(target_feature = "sse"))))]
+#[cfg(not(x86_no_sse))]
 #[cfg(not(any(target_arch = "powerpc", target_arch = "powerpc64")))]
 mod float_addsub_f128 {
     use super::*;
diff --git a/library/compiler-builtins/builtins-test/tests/conv.rs b/library/compiler-builtins/builtins-test/tests/conv.rs
index 7d729364fae..9b04295d2ef 100644
--- a/library/compiler-builtins/builtins-test/tests/conv.rs
+++ b/library/compiler-builtins/builtins-test/tests/conv.rs
@@ -59,32 +59,28 @@ mod i_to_f {
                                 || ((error_minus == error || error_plus == error)
                                     && ((f0.to_bits() & 1) != 0))
                             {
-                                if !cfg!(any(
-                                    target_arch = "powerpc",
-                                    target_arch = "powerpc64"
-                                )) {
-                                    panic!(
-                                        "incorrect rounding by {}({}): {}, ({}, {}, {}), errors ({}, {}, {})",
-                                        stringify!($fn),
-                                        x,
-                                        f1.to_bits(),
-                                        y_minus_ulp,
-                                        y,
-                                        y_plus_ulp,
-                                        error_minus,
-                                        error,
-                                        error_plus,
-                                    );
-                                }
+                                panic!(
+                                    "incorrect rounding by {}({}): {}, ({}, {}, {}), errors ({}, {}, {})",
+                                    stringify!($fn),
+                                    x,
+                                    f1.to_bits(),
+                                    y_minus_ulp,
+                                    y,
+                                    y_plus_ulp,
+                                    error_minus,
+                                    error,
+                                    error_plus,
+                                );
                             }
                         }
 
-                        // Test against native conversion. We disable testing on all `x86` because of
-                        // rounding bugs with `i686`. `powerpc` also has the same rounding bug.
+                        // Test against native conversion.
+                        // FIXME(x86,ppc): the platform version has rounding bugs on i686 and
+                        // PowerPC64le (for PPC this only shows up in Docker, not the native runner).
+                        // https://github.com/rust-lang/compiler-builtins/pull/384#issuecomment-740413334
                         if !Float::eq_repr(f0, f1) && !cfg!(any(
                             target_arch = "x86",
-                            target_arch = "powerpc",
-                            target_arch = "powerpc64"
+                            all(target_arch = "powerpc64", target_endian = "little")
                         )) {
                             panic!(
                                 "{}({}): std: {:?}, builtins: {:?}",
diff --git a/library/compiler-builtins/builtins-test/tests/div_rem.rs b/library/compiler-builtins/builtins-test/tests/div_rem.rs
index e8327f9b4b8..caee4166c99 100644
--- a/library/compiler-builtins/builtins-test/tests/div_rem.rs
+++ b/library/compiler-builtins/builtins-test/tests/div_rem.rs
@@ -138,7 +138,7 @@ macro_rules! float {
     };
 }
 
-#[cfg(not(all(target_arch = "x86", not(target_feature = "sse"))))]
+#[cfg(not(x86_no_sse))]
 mod float_div {
     use super::*;
 
diff --git a/library/compiler-builtins/builtins-test/tests/float_pow.rs b/library/compiler-builtins/builtins-test/tests/float_pow.rs
index 0e8ae88e83e..a17dff27c10 100644
--- a/library/compiler-builtins/builtins-test/tests/float_pow.rs
+++ b/library/compiler-builtins/builtins-test/tests/float_pow.rs
@@ -1,7 +1,7 @@
 #![allow(unused_macros)]
 #![cfg_attr(f128_enabled, feature(f128))]
-#![cfg(not(all(target_arch = "x86", not(target_feature = "sse"))))]
 
+#[cfg_attr(x86_no_sse, allow(unused))]
 use builtins_test::*;
 
 // This is approximate because of issues related to
@@ -52,6 +52,7 @@ macro_rules! pow {
     };
 }
 
+#[cfg(not(x86_no_sse))] // FIXME(i586): failure for powidf2
 pow! {
     f32, 1e-4, __powisf2, all();
     f64, 1e-12, __powidf2, all();
diff --git a/library/compiler-builtins/builtins-test/tests/lse.rs b/library/compiler-builtins/builtins-test/tests/lse.rs
index 0d85228d7a2..5d59fbb7f44 100644
--- a/library/compiler-builtins/builtins-test/tests/lse.rs
+++ b/library/compiler-builtins/builtins-test/tests/lse.rs
@@ -1,6 +1,6 @@
 #![feature(decl_macro)] // so we can use pub(super)
 #![feature(macro_metavar_expr_concat)]
-#![cfg(all(target_arch = "aarch64", target_os = "linux", not(feature = "no-asm")))]
+#![cfg(all(target_arch = "aarch64", target_os = "linux"))]
 
 /// Translate a byte size to a Rust type.
 macro int_ty {
diff --git a/library/compiler-builtins/builtins-test/tests/mul.rs b/library/compiler-builtins/builtins-test/tests/mul.rs
index 58bc9ab4ac9..3072b45dca0 100644
--- a/library/compiler-builtins/builtins-test/tests/mul.rs
+++ b/library/compiler-builtins/builtins-test/tests/mul.rs
@@ -113,7 +113,7 @@ macro_rules! float_mul {
     };
 }
 
-#[cfg(not(all(target_arch = "x86", not(target_feature = "sse"))))]
+#[cfg(not(x86_no_sse))]
 mod float_mul {
     use super::*;
 
@@ -126,7 +126,7 @@ mod float_mul {
 }
 
 #[cfg(f128_enabled)]
-#[cfg(not(all(target_arch = "x86", not(target_feature = "sse"))))]
+#[cfg(not(x86_no_sse))]
 #[cfg(not(any(target_arch = "powerpc", target_arch = "powerpc64")))]
 mod float_mul_f128 {
     use super::*;
diff --git a/library/compiler-builtins/ci/ci-util.py b/library/compiler-builtins/ci/ci-util.py
index 3437d304f48..c1db17c6c90 100755
--- a/library/compiler-builtins/ci/ci-util.py
+++ b/library/compiler-builtins/ci/ci-util.py
@@ -7,10 +7,12 @@ git history.
 
 import json
 import os
+import pprint
 import re
 import subprocess as sp
 import sys
 from dataclasses import dataclass
+from functools import cache
 from glob import glob
 from inspect import cleandoc
 from os import getenv
@@ -50,15 +52,6 @@ GIT = ["git", "-C", REPO_ROOT]
 DEFAULT_BRANCH = "master"
 WORKFLOW_NAME = "CI"  # Workflow that generates the benchmark artifacts
 ARTIFACT_PREFIX = "baseline-icount*"
-# Place this in a PR body to skip regression checks (must be at the start of a line).
-REGRESSION_DIRECTIVE = "ci: allow-regressions"
-# Place this in a PR body to skip extensive tests
-SKIP_EXTENSIVE_DIRECTIVE = "ci: skip-extensive"
-# Place this in a PR body to allow running a large number of extensive tests. If not
-# set, this script will error out if a threshold is exceeded in order to avoid
-# accidentally spending huge amounts of CI time.
-ALLOW_MANY_EXTENSIVE_DIRECTIVE = "ci: allow-many-extensive"
-MANY_EXTENSIVE_THRESHOLD = 20
 
 # Don't run exhaustive tests if these files change, even if they contaiin a function
 # definition.
@@ -70,7 +63,7 @@ IGNORE_FILES = [
 
 # libm PR CI takes a long time and doesn't need to run unless relevant files have been
 # changed. Anything matching this regex pattern will trigger a run.
-TRIGGER_LIBM_PR_CI = ".*(libm|musl).*"
+TRIGGER_LIBM_CI_FILE_PAT = ".*(libm|musl).*"
 
 TYPES = ["f16", "f32", "f64", "f128"]
 
@@ -80,6 +73,54 @@ def eprint(*args, **kwargs):
     print(*args, file=sys.stderr, **kwargs)
 
 
+@dataclass(init=False)
+class PrCfg:
+    """Directives that we allow in the commit body to control test behavior.
+
+    These are of the form `ci: foo`, at the start of a line.
+    """
+
+    # Skip regression checks (must be at the start of a line).
+    allow_regressions: bool = False
+    # Don't run extensive tests
+    skip_extensive: bool = False
+
+    # Allow running a large number of extensive tests. If not set, this script
+    # will error out if a threshold is exceeded in order to avoid accidentally
+    # spending huge amounts of CI time.
+    allow_many_extensive: bool = False
+
+    # Max number of extensive tests to run by default
+    MANY_EXTENSIVE_THRESHOLD: int = 20
+
+    # Run tests for `libm` that may otherwise be skipped due to no changed files.
+    always_test_libm: bool = False
+
+    # String values of directive names
+    DIR_ALLOW_REGRESSIONS: str = "allow-regressions"
+    DIR_SKIP_EXTENSIVE: str = "skip-extensive"
+    DIR_ALLOW_MANY_EXTENSIVE: str = "allow-many-extensive"
+    DIR_TEST_LIBM: str = "test-libm"
+
+    def __init__(self, body: str):
+        directives = re.finditer(r"^\s*ci:\s*(?P<dir_name>\S*)", body, re.MULTILINE)
+        for dir in directives:
+            name = dir.group("dir_name")
+            if name == self.DIR_ALLOW_REGRESSIONS:
+                self.allow_regressions = True
+            elif name == self.DIR_SKIP_EXTENSIVE:
+                self.skip_extensive = True
+            elif name == self.DIR_ALLOW_MANY_EXTENSIVE:
+                self.allow_many_extensive = True
+            elif name == self.DIR_TEST_LIBM:
+                self.always_test_libm = True
+            else:
+                eprint(f"Found unexpected directive `{name}`")
+                exit(1)
+
+        pprint.pp(self)
+
+
 @dataclass
 class PrInfo:
     """GitHub response for PR query"""
@@ -88,10 +129,21 @@ class PrInfo:
     commits: list[str]
     created_at: str
     number: int
+    cfg: PrCfg
 
     @classmethod
-    def load(cls, pr_number: int | str) -> Self:
-        """For a given PR number, query the body and commit list"""
+    def from_env(cls) -> Self | None:
+        """Create a PR object from the PR_NUMBER environment if set, `None` otherwise."""
+        pr_env = os.environ.get("PR_NUMBER")
+        if pr_env is not None and len(pr_env) > 0:
+            return cls.from_pr(pr_env)
+
+        return None
+
+    @classmethod
+    @cache  # Cache so we don't print info messages multiple times
+    def from_pr(cls, pr_number: int | str) -> Self:
+        """For a given PR number, query the body and commit list."""
         pr_info = sp.check_output(
             [
                 "gh",
@@ -104,13 +156,9 @@ class PrInfo:
             ],
             text=True,
         )
-        eprint("PR info:", json.dumps(pr_info, indent=4))
-        return cls(**json.loads(pr_info))
-
-    def contains_directive(self, directive: str) -> bool:
-        """Return true if the provided directive is on a line in the PR body"""
-        lines = self.body.splitlines()
-        return any(line.startswith(directive) for line in lines)
+        pr_json = json.loads(pr_info)
+        eprint("PR info:", json.dumps(pr_json, indent=4))
+        return cls(**json.loads(pr_info), cfg=PrCfg(pr_json["body"]))
 
 
 class FunctionDef(TypedDict):
@@ -207,26 +255,32 @@ class Context:
         """If this is a PR and no libm files were changed, allow skipping libm
         jobs."""
 
-        if self.is_pr():
-            return all(not re.match(TRIGGER_LIBM_PR_CI, str(f)) for f in self.changed)
+        # Always run on merge CI
+        if not self.is_pr():
+            return False
 
-        return False
+        pr = PrInfo.from_env()
+        assert pr is not None, "Is a PR but couldn't load PrInfo"
+
+        # Allow opting in to libm tests
+        if pr.cfg.always_test_libm:
+            return False
+
+        # By default, run if there are any changed files matching the pattern
+        return all(not re.match(TRIGGER_LIBM_CI_FILE_PAT, str(f)) for f in self.changed)
 
     def emit_workflow_output(self):
         """Create a JSON object a list items for each type's changed files, if any
         did change, and the routines that were affected by the change.
         """
 
-        pr_number = os.environ.get("PR_NUMBER")
         skip_tests = False
         error_on_many_tests = False
 
-        if pr_number is not None and len(pr_number) > 0:
-            pr = PrInfo.load(pr_number)
-            skip_tests = pr.contains_directive(SKIP_EXTENSIVE_DIRECTIVE)
-            error_on_many_tests = not pr.contains_directive(
-                ALLOW_MANY_EXTENSIVE_DIRECTIVE
-            )
+        pr = PrInfo.from_env()
+        if pr is not None:
+            skip_tests = pr.cfg.skip_extensive
+            error_on_many_tests = not pr.cfg.allow_many_extensive
 
             if skip_tests:
                 eprint("Skipping all extensive tests")
@@ -253,16 +307,14 @@ class Context:
         may_skip = str(self.may_skip_libm_ci()).lower()
         print(f"extensive_matrix={ext_matrix}")
         print(f"may_skip_libm_ci={may_skip}")
-        eprint(f"extensive_matrix={ext_matrix}")
-        eprint(f"may_skip_libm_ci={may_skip}")
         eprint(f"total extensive tests: {total_to_test}")
 
-        if error_on_many_tests and total_to_test > MANY_EXTENSIVE_THRESHOLD:
+        if error_on_many_tests and total_to_test > PrCfg.MANY_EXTENSIVE_THRESHOLD:
             eprint(
-                f"More than {MANY_EXTENSIVE_THRESHOLD} tests would be run; add"
-                f" `{ALLOW_MANY_EXTENSIVE_DIRECTIVE}` to the PR body if this is"
+                f"More than {PrCfg.MANY_EXTENSIVE_THRESHOLD} tests would be run; add"
+                f" `{PrCfg.DIR_ALLOW_MANY_EXTENSIVE}` to the PR body if this is"
                 " intentional. If this is refactoring that happens to touch a lot of"
-                f" files, `{SKIP_EXTENSIVE_DIRECTIVE}` can be used instead."
+                f" files, `{PrCfg.DIR_SKIP_EXTENSIVE}` can be used instead."
             )
             exit(1)
 
@@ -371,8 +423,8 @@ def handle_bench_regressions(args: list[str]):
             eprint(USAGE)
             exit(1)
 
-    pr = PrInfo.load(pr_number)
-    if pr.contains_directive(REGRESSION_DIRECTIVE):
+    pr = PrInfo.from_pr(pr_number)
+    if pr.cfg.allow_regressions:
         eprint("PR allows regressions")
         return
 
diff --git a/library/compiler-builtins/ci/docker/aarch64-unknown-linux-gnu/Dockerfile b/library/compiler-builtins/ci/docker/aarch64-unknown-linux-gnu/Dockerfile
index df71804ba23..69b99f5b6b3 100644
--- a/library/compiler-builtins/ci/docker/aarch64-unknown-linux-gnu/Dockerfile
+++ b/library/compiler-builtins/ci/docker/aarch64-unknown-linux-gnu/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
diff --git a/library/compiler-builtins/ci/docker/arm-unknown-linux-gnueabi/Dockerfile b/library/compiler-builtins/ci/docker/arm-unknown-linux-gnueabi/Dockerfile
index 38ad1a13623..2fa6f852052 100644
--- a/library/compiler-builtins/ci/docker/arm-unknown-linux-gnueabi/Dockerfile
+++ b/library/compiler-builtins/ci/docker/arm-unknown-linux-gnueabi/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
diff --git a/library/compiler-builtins/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile b/library/compiler-builtins/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile
index ffead05d5f2..85f7335f5a8 100644
--- a/library/compiler-builtins/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile
+++ b/library/compiler-builtins/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
diff --git a/library/compiler-builtins/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile b/library/compiler-builtins/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile
index 9ab49e46ee3..42511479f36 100644
--- a/library/compiler-builtins/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile
+++ b/library/compiler-builtins/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
diff --git a/library/compiler-builtins/ci/docker/i586-unknown-linux-gnu/Dockerfile b/library/compiler-builtins/ci/docker/i586-unknown-linux-gnu/Dockerfile
index d12ced3257f..35488c47749 100644
--- a/library/compiler-builtins/ci/docker/i586-unknown-linux-gnu/Dockerfile
+++ b/library/compiler-builtins/ci/docker/i586-unknown-linux-gnu/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
diff --git a/library/compiler-builtins/ci/docker/i686-unknown-linux-gnu/Dockerfile b/library/compiler-builtins/ci/docker/i686-unknown-linux-gnu/Dockerfile
index d12ced3257f..35488c47749 100644
--- a/library/compiler-builtins/ci/docker/i686-unknown-linux-gnu/Dockerfile
+++ b/library/compiler-builtins/ci/docker/i686-unknown-linux-gnu/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
diff --git a/library/compiler-builtins/ci/docker/loongarch64-unknown-linux-gnu/Dockerfile b/library/compiler-builtins/ci/docker/loongarch64-unknown-linux-gnu/Dockerfile
index 62b43da9e70..e95a1b9163f 100644
--- a/library/compiler-builtins/ci/docker/loongarch64-unknown-linux-gnu/Dockerfile
+++ b/library/compiler-builtins/ci/docker/loongarch64-unknown-linux-gnu/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
diff --git a/library/compiler-builtins/ci/docker/mips-unknown-linux-gnu/Dockerfile b/library/compiler-builtins/ci/docker/mips-unknown-linux-gnu/Dockerfile
index c02a9467234..fd187760310 100644
--- a/library/compiler-builtins/ci/docker/mips-unknown-linux-gnu/Dockerfile
+++ b/library/compiler-builtins/ci/docker/mips-unknown-linux-gnu/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
diff --git a/library/compiler-builtins/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile b/library/compiler-builtins/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile
index 6d8b96069be..4e542ce6858 100644
--- a/library/compiler-builtins/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile
+++ b/library/compiler-builtins/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
diff --git a/library/compiler-builtins/ci/docker/mips64el-unknown-linux-gnuabi64/Dockerfile b/library/compiler-builtins/ci/docker/mips64el-unknown-linux-gnuabi64/Dockerfile
index 7e6ac7c3b8a..528dfd8940d 100644
--- a/library/compiler-builtins/ci/docker/mips64el-unknown-linux-gnuabi64/Dockerfile
+++ b/library/compiler-builtins/ci/docker/mips64el-unknown-linux-gnuabi64/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
diff --git a/library/compiler-builtins/ci/docker/mipsel-unknown-linux-gnu/Dockerfile b/library/compiler-builtins/ci/docker/mipsel-unknown-linux-gnu/Dockerfile
index 9feadc7b5ce..2572180238e 100644
--- a/library/compiler-builtins/ci/docker/mipsel-unknown-linux-gnu/Dockerfile
+++ b/library/compiler-builtins/ci/docker/mipsel-unknown-linux-gnu/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
diff --git a/library/compiler-builtins/ci/docker/powerpc-unknown-linux-gnu/Dockerfile b/library/compiler-builtins/ci/docker/powerpc-unknown-linux-gnu/Dockerfile
index 84dcaf47ed5..cac1f23610a 100644
--- a/library/compiler-builtins/ci/docker/powerpc-unknown-linux-gnu/Dockerfile
+++ b/library/compiler-builtins/ci/docker/powerpc-unknown-linux-gnu/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
diff --git a/library/compiler-builtins/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile b/library/compiler-builtins/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile
index b90fd5ec545..76127b7dbb8 100644
--- a/library/compiler-builtins/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile
+++ b/library/compiler-builtins/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
diff --git a/library/compiler-builtins/ci/docker/powerpc64le-unknown-linux-gnu/Dockerfile b/library/compiler-builtins/ci/docker/powerpc64le-unknown-linux-gnu/Dockerfile
index e6d1d1cd0b5..da1d56ca66f 100644
--- a/library/compiler-builtins/ci/docker/powerpc64le-unknown-linux-gnu/Dockerfile
+++ b/library/compiler-builtins/ci/docker/powerpc64le-unknown-linux-gnu/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
@@ -12,6 +12,5 @@ ENV CARGO_TARGET_POWERPC64LE_UNKNOWN_LINUX_GNU_LINKER="$TOOLCHAIN_PREFIX"gcc \
     CARGO_TARGET_POWERPC64LE_UNKNOWN_LINUX_GNU_RUNNER=qemu-ppc64le-static \
     AR_powerpc64le_unknown_linux_gnu="$TOOLCHAIN_PREFIX"ar \
     CC_powerpc64le_unknown_linux_gnu="$TOOLCHAIN_PREFIX"gcc \
-    QEMU_CPU=POWER8 \
     QEMU_LD_PREFIX=/usr/powerpc64le-linux-gnu \
     RUST_TEST_THREADS=1
diff --git a/library/compiler-builtins/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile b/library/compiler-builtins/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile
index eeb4ed0193e..513efacd6d9 100644
--- a/library/compiler-builtins/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile
+++ b/library/compiler-builtins/ci/docker/riscv64gc-unknown-linux-gnu/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
diff --git a/library/compiler-builtins/ci/docker/thumbv6m-none-eabi/Dockerfile b/library/compiler-builtins/ci/docker/thumbv6m-none-eabi/Dockerfile
index ad0d4351ea6..a9a172a2113 100644
--- a/library/compiler-builtins/ci/docker/thumbv6m-none-eabi/Dockerfile
+++ b/library/compiler-builtins/ci/docker/thumbv6m-none-eabi/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
diff --git a/library/compiler-builtins/ci/docker/thumbv7em-none-eabi/Dockerfile b/library/compiler-builtins/ci/docker/thumbv7em-none-eabi/Dockerfile
index ad0d4351ea6..a9a172a2113 100644
--- a/library/compiler-builtins/ci/docker/thumbv7em-none-eabi/Dockerfile
+++ b/library/compiler-builtins/ci/docker/thumbv7em-none-eabi/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
diff --git a/library/compiler-builtins/ci/docker/thumbv7em-none-eabihf/Dockerfile b/library/compiler-builtins/ci/docker/thumbv7em-none-eabihf/Dockerfile
index ad0d4351ea6..a9a172a2113 100644
--- a/library/compiler-builtins/ci/docker/thumbv7em-none-eabihf/Dockerfile
+++ b/library/compiler-builtins/ci/docker/thumbv7em-none-eabihf/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
diff --git a/library/compiler-builtins/ci/docker/thumbv7m-none-eabi/Dockerfile b/library/compiler-builtins/ci/docker/thumbv7m-none-eabi/Dockerfile
index ad0d4351ea6..a9a172a2113 100644
--- a/library/compiler-builtins/ci/docker/thumbv7m-none-eabi/Dockerfile
+++ b/library/compiler-builtins/ci/docker/thumbv7m-none-eabi/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
diff --git a/library/compiler-builtins/ci/docker/x86_64-unknown-linux-gnu/Dockerfile b/library/compiler-builtins/ci/docker/x86_64-unknown-linux-gnu/Dockerfile
index c590adcddf6..2ef800129d6 100644
--- a/library/compiler-builtins/ci/docker/x86_64-unknown-linux-gnu/Dockerfile
+++ b/library/compiler-builtins/ci/docker/x86_64-unknown-linux-gnu/Dockerfile
@@ -1,4 +1,4 @@
-ARG IMAGE=ubuntu:24.04
+ARG IMAGE=ubuntu:25.04
 FROM $IMAGE
 
 RUN apt-get update && \
diff --git a/library/compiler-builtins/ci/run-docker.sh b/library/compiler-builtins/ci/run-docker.sh
index d0122dee5c8..4c1fe0fe264 100755
--- a/library/compiler-builtins/ci/run-docker.sh
+++ b/library/compiler-builtins/ci/run-docker.sh
@@ -97,7 +97,7 @@ if [ "${1:-}" = "--help" ] || [ "$#" -gt 1 ]; then
     usage: ./ci/run-docker.sh [target]
 
     you can also set DOCKER_BASE_IMAGE to use something other than the default
-    ubuntu:24.04 (or rustlang/rust:nightly).
+    ubuntu:25.04 (or rustlang/rust:nightly).
     "
     exit
 fi
diff --git a/library/compiler-builtins/ci/run.sh b/library/compiler-builtins/ci/run.sh
index 8b7965bb205..bc94d42fe83 100755
--- a/library/compiler-builtins/ci/run.sh
+++ b/library/compiler-builtins/ci/run.sh
@@ -41,7 +41,10 @@ else
     "${test_builtins[@]}" --benches
     "${test_builtins[@]}" --benches --release
 
-    if [ "${TEST_VERBATIM:-}" = "1" ]; then
+    # Validate that having a verbatim path for the target directory works
+    # (trivial to regress using `/` in paths to build artifacts rather than
+    # `Path::join`). MinGW does not currently support these paths.
+    if [[ "$target" = *"windows"* ]] && [[ "$target" != *"gnu"* ]]; then
         verb_path=$(cmd.exe //C echo \\\\?\\%cd%\\builtins-test\\target2)
         "${test_builtins[@]}" --target-dir "$verb_path" --features c
     fi
@@ -161,7 +164,7 @@ else
     mflags+=(--workspace --target "$target")
     cmd=(cargo test "${mflags[@]}")
     profile_flag="--profile"
-    
+
     # If nextest is available, use that
     command -v cargo-nextest && nextest=1 || nextest=0
     if [ "$nextest" = "1" ]; then
@@ -204,7 +207,7 @@ else
     "${cmd[@]}" "$profile_flag" release-checked --features unstable-intrinsics --benches
 
     # Ensure that the routines do not panic.
-    # 
+    #
     # `--tests` must be passed because no-panic is only enabled as a dev
     # dependency. The `release-opt` profile must be used to enable LTO and a
     # single CGU.
diff --git a/library/compiler-builtins/ci/update-musl.sh b/library/compiler-builtins/ci/update-musl.sh
index b71cf577830..637ab139485 100755
--- a/library/compiler-builtins/ci/update-musl.sh
+++ b/library/compiler-builtins/ci/update-musl.sh
@@ -3,7 +3,7 @@
 
 set -eux
 
-url=git://git.musl-libc.org/musl
+url=https://github.com/kraj/musl.git
 ref=c47ad25ea3b484e10326f933e927c0bc8cded3da
 dst=crates/musl-math-sys/musl
 
diff --git a/library/compiler-builtins/compiler-builtins/Cargo.toml b/library/compiler-builtins/compiler-builtins/Cargo.toml
index 3ccb05f73fb..8bbe136ce33 100644
--- a/library/compiler-builtins/compiler-builtins/Cargo.toml
+++ b/library/compiler-builtins/compiler-builtins/Cargo.toml
@@ -35,8 +35,9 @@ default = ["compiler-builtins"]
 # implementations and also filling in unimplemented intrinsics
 c = ["dep:cc"]
 
-# Workaround for the Cranelift codegen backend. Disables any implementations
-# which use inline assembly and fall back to pure Rust versions (if available).
+# For implementations where there is both a generic version and a platform-
+# specific version, use the generic version. This is meant to enable testing
+# the generic versions on all platforms.
 no-asm = []
 
 # Workaround for codegen backends which haven't yet implemented `f16` and
diff --git a/library/compiler-builtins/compiler-builtins/build.rs b/library/compiler-builtins/compiler-builtins/build.rs
index 8f51c12b535..43b978606e5 100644
--- a/library/compiler-builtins/compiler-builtins/build.rs
+++ b/library/compiler-builtins/compiler-builtins/build.rs
@@ -106,13 +106,6 @@ fn configure_libm(target: &Target) {
         println!("cargo:rustc-cfg=optimizations_enabled");
     }
 
-    // Config shorthands
-    println!("cargo:rustc-check-cfg=cfg(x86_no_sse)");
-    if target.arch == "x86" && !target.features.iter().any(|f| f == "sse") {
-        // Shorthand to detect i586 targets
-        println!("cargo:rustc-cfg=x86_no_sse");
-    }
-
     println!(
         "cargo:rustc-env=CFG_CARGO_FEATURES={:?}",
         target.cargo_features
diff --git a/library/compiler-builtins/compiler-builtins/configure.rs b/library/compiler-builtins/compiler-builtins/configure.rs
index 9721ddf090c..79e238abc0f 100644
--- a/library/compiler-builtins/compiler-builtins/configure.rs
+++ b/library/compiler-builtins/compiler-builtins/configure.rs
@@ -1,6 +1,5 @@
 // Configuration that is shared between `compiler_builtins` and `builtins_test`.
 
-use std::process::{Command, Stdio};
 use std::{env, str};
 
 #[derive(Debug)]
@@ -35,26 +34,6 @@ impl Target {
             .map(|s| s.to_lowercase().replace("_", "-"))
             .collect();
 
-        // Query rustc for options that Cargo does not provide env for. The bootstrap hack is used
-        // to get consistent output regardless of channel (`f16`/`f128` config options are hidden
-        // on stable otherwise).
-        let mut cmd = Command::new(env::var("RUSTC").unwrap());
-        cmd.args(["--print=cfg", "--target", &triple])
-            .env("RUSTC_BOOTSTRAP", "1")
-            .stderr(Stdio::inherit());
-        let out = cmd
-            .output()
-            .unwrap_or_else(|e| panic!("failed to run `{cmd:?}`: {e}"));
-        let rustc_cfg = str::from_utf8(&out.stdout).unwrap();
-
-        // If we couldn't query `rustc` (e.g. a custom JSON target was used), make the safe
-        // choice and leave `f16` and `f128` disabled.
-        let rustc_output_ok = out.status.success();
-        let reliable_f128 =
-            rustc_output_ok && rustc_cfg.lines().any(|l| l == "target_has_reliable_f128");
-        let reliable_f16 =
-            rustc_output_ok && rustc_cfg.lines().any(|l| l == "target_has_reliable_f16");
-
         Self {
             triple,
             triple_split,
@@ -74,8 +53,10 @@ impl Target {
                 .split(",")
                 .map(ToOwned::to_owned)
                 .collect(),
-            reliable_f128,
-            reliable_f16,
+            // Note that these are unstable options, so only show up with the nightly compiler or
+            // with `RUSTC_BOOTSTRAP=1` (which is required to use the types anyway).
+            reliable_f128: env::var_os("CARGO_CFG_TARGET_HAS_RELIABLE_F128").is_some(),
+            reliable_f16: env::var_os("CARGO_CFG_TARGET_HAS_RELIABLE_F16").is_some(),
         }
     }
 
@@ -100,6 +81,13 @@ pub fn configure_aliases(target: &Target) {
         println!("cargo:rustc-cfg=thumb_1")
     }
 
+    // Config shorthands
+    println!("cargo:rustc-check-cfg=cfg(x86_no_sse)");
+    if target.arch == "x86" && !target.features.iter().any(|f| f == "sse") {
+        // Shorthand to detect i586 targets
+        println!("cargo:rustc-cfg=x86_no_sse");
+    }
+
     /* Not all backends support `f16` and `f128` to the same level on all architectures, so we
      * need to disable things if the compiler may crash. See configuration at:
      * * https://github.com/rust-lang/rust/blob/c65dccabacdfd6c8a7f7439eba13422fdd89b91e/compiler/rustc_codegen_llvm/src/llvm_util.rs#L367-L432
diff --git a/library/compiler-builtins/compiler-builtins/src/aarch64.rs b/library/compiler-builtins/compiler-builtins/src/aarch64.rs
index a72b30d29f0..039fab2061c 100644
--- a/library/compiler-builtins/compiler-builtins/src/aarch64.rs
+++ b/library/compiler-builtins/compiler-builtins/src/aarch64.rs
@@ -4,7 +4,7 @@ use core::intrinsics;
 
 intrinsics! {
     #[unsafe(naked)]
-    #[cfg(all(target_os = "uefi", not(feature = "no-asm")))]
+    #[cfg(target_os = "uefi")]
     pub unsafe extern "custom" fn __chkstk() {
         core::arch::naked_asm!(
             ".p2align 2",
diff --git a/library/compiler-builtins/compiler-builtins/src/aarch64_linux.rs b/library/compiler-builtins/compiler-builtins/src/aarch64_linux.rs
index 38fcab152ae..01d7fb47329 100644
--- a/library/compiler-builtins/compiler-builtins/src/aarch64_linux.rs
+++ b/library/compiler-builtins/compiler-builtins/src/aarch64_linux.rs
@@ -6,9 +6,6 @@
 //! which is supported on the current CPU.
 //! See <https://community.arm.com/arm-community-blogs/b/tools-software-ides-blog/posts/making-the-most-of-the-arm-architecture-in-gcc-10#:~:text=out%20of%20line%20atomics> for more discussion.
 //!
-//! Currently we only support LL/SC, because LSE requires `getauxval` from libc in order to do runtime detection.
-//! Use the `compiler-rt` intrinsics if you want LSE support.
-//!
 //! Ported from `aarch64/lse.S` in LLVM's compiler-rt.
 //!
 //! Generate functions for each of the following symbols:
@@ -24,7 +21,18 @@
 //! We do something similar, but with macro arguments.
 #![cfg_attr(feature = "c", allow(unused_macros))] // avoid putting the macros into a submodule
 
-// We don't do runtime dispatch so we don't have to worry about the `__aarch64_have_lse_atomics` global ctor.
+use core::sync::atomic::{AtomicU8, Ordering};
+
+/// non-zero if the host supports LSE atomics.
+static HAVE_LSE_ATOMICS: AtomicU8 = AtomicU8::new(0);
+
+intrinsics! {
+    /// Call to enable LSE in outline atomic operations. The caller must verify
+    /// LSE operations are supported.
+    pub extern "C" fn __rust_enable_lse() {
+        HAVE_LSE_ATOMICS.store(1, Ordering::Relaxed);
+    }
+}
 
 /// Translate a byte size to a Rust type.
 #[rustfmt::skip]
@@ -45,6 +53,7 @@ macro_rules! reg {
     (2, $num:literal) => { concat!("w", $num) };
     (4, $num:literal) => { concat!("w", $num) };
     (8, $num:literal) => { concat!("x", $num) };
+    (16, $num:literal) => { concat!("x", $num) };
 }
 
 /// Given an atomic ordering, translate it to the acquire suffix for the lxdr aarch64 ASM instruction.
@@ -126,6 +135,41 @@ macro_rules! stxp {
     };
 }
 
+// If supported, perform the requested LSE op and return, or fallthrough.
+macro_rules! try_lse_op {
+    ($op: literal, $ordering:ident, $bytes:tt, $($reg:literal,)* [ $mem:ident ] ) => {
+        concat!(
+            ".arch_extension lse; ",
+            "adrp    x16, {have_lse}; ",
+            "ldrb    w16, [x16, :lo12:{have_lse}]; ",
+            "cbz     w16, 8f; ",
+            // LSE_OP  s(reg),* [$mem]
+            concat!(lse!($op, $ordering, $bytes), $( " ", reg!($bytes, $reg), ", " ,)* "[", stringify!($mem), "]; ",),
+            "ret; ",
+            "8:"
+        )
+    };
+}
+
+// Translate memory ordering to the LSE suffix
+#[rustfmt::skip]
+macro_rules! lse_mem_sfx {
+    (Relaxed) => { "" };
+    (Acquire) => { "a" };
+    (Release) => { "l" };
+    (AcqRel) => { "al" };
+}
+
+// Generate the aarch64 LSE operation for memory ordering and width
+macro_rules! lse {
+    ($op:literal, $order:ident, 16) => {
+        concat!($op, "p", lse_mem_sfx!($order))
+    };
+    ($op:literal, $order:ident, $bytes:tt) => {
+        concat!($op, lse_mem_sfx!($order), size!($bytes))
+    };
+}
+
 /// See <https://doc.rust-lang.org/stable/std/sync/atomic/struct.AtomicI8.html#method.compare_and_swap>.
 macro_rules! compare_and_swap {
     ($ordering:ident, $bytes:tt, $name:ident) => {
@@ -137,7 +181,9 @@ macro_rules! compare_and_swap {
             ) -> int_ty!($bytes) {
                 // We can't use `AtomicI8::compare_and_swap`; we *are* compare_and_swap.
                 core::arch::naked_asm! {
-                    // UXT s(tmp0), s(0)
+                    // CAS    s(0), s(1), [x2]; if LSE supported.
+                    try_lse_op!("cas", $ordering, $bytes, 0, 1, [x2]),
+                    // UXT    s(tmp0), s(0)
                     concat!(uxt!($bytes), " ", reg!($bytes, 16), ", ", reg!($bytes, 0)),
                     "0:",
                     // LDXR   s(0), [x2]
@@ -150,6 +196,7 @@ macro_rules! compare_and_swap {
                     "cbnz   w17, 0b",
                     "1:",
                     "ret",
+                    have_lse = sym crate::aarch64_linux::HAVE_LSE_ATOMICS,
                 }
             }
         }
@@ -166,6 +213,8 @@ macro_rules! compare_and_swap_i128 {
                 expected: i128, desired: i128, ptr: *mut i128
             ) -> i128 {
                 core::arch::naked_asm! {
+                    // CASP   x0, x1, x2, x3, [x4]; if LSE supported.
+                    try_lse_op!("cas", $ordering, 16, 0, 1, 2, 3, [x4]),
                     "mov    x16, x0",
                     "mov    x17, x1",
                     "0:",
@@ -179,6 +228,7 @@ macro_rules! compare_and_swap_i128 {
                     "cbnz   w15, 0b",
                     "1:",
                     "ret",
+                    have_lse = sym crate::aarch64_linux::HAVE_LSE_ATOMICS,
                 }
             }
         }
@@ -195,6 +245,8 @@ macro_rules! swap {
                 left: int_ty!($bytes), right_ptr: *mut int_ty!($bytes)
             ) -> int_ty!($bytes) {
                 core::arch::naked_asm! {
+                    // SWP    s(0), s(0), [x1]; if LSE supported.
+                    try_lse_op!("swp", $ordering, $bytes, 0, 0, [x1]),
                     // mov    s(tmp0), s(0)
                     concat!("mov ", reg!($bytes, 16), ", ", reg!($bytes, 0)),
                     "0:",
@@ -204,6 +256,7 @@ macro_rules! swap {
                     concat!(stxr!($ordering, $bytes), " w17, ", reg!($bytes, 16), ", [x1]"),
                     "cbnz   w17, 0b",
                     "ret",
+                    have_lse = sym crate::aarch64_linux::HAVE_LSE_ATOMICS,
                 }
             }
         }
@@ -212,7 +265,7 @@ macro_rules! swap {
 
 /// See (e.g.) <https://doc.rust-lang.org/stable/std/sync/atomic/struct.AtomicI8.html#method.fetch_add>.
 macro_rules! fetch_op {
-    ($ordering:ident, $bytes:tt, $name:ident, $op:literal) => {
+    ($ordering:ident, $bytes:tt, $name:ident, $op:literal, $lse_op:literal) => {
         intrinsics! {
             #[maybe_use_optimized_c_shim]
             #[unsafe(naked)]
@@ -220,6 +273,8 @@ macro_rules! fetch_op {
                 val: int_ty!($bytes), ptr: *mut int_ty!($bytes)
             ) -> int_ty!($bytes) {
                 core::arch::naked_asm! {
+                    // LSEOP  s(0), s(0), [x1]; if LSE supported.
+                    try_lse_op!($lse_op, $ordering, $bytes, 0, 0, [x1]),
                     // mov    s(tmp0), s(0)
                     concat!("mov ", reg!($bytes, 16), ", ", reg!($bytes, 0)),
                     "0:",
@@ -231,6 +286,7 @@ macro_rules! fetch_op {
                     concat!(stxr!($ordering, $bytes), " w15, ", reg!($bytes, 17), ", [x1]"),
                     "cbnz  w15, 0b",
                     "ret",
+                    have_lse = sym crate::aarch64_linux::HAVE_LSE_ATOMICS,
                 }
             }
         }
@@ -240,25 +296,25 @@ macro_rules! fetch_op {
 // We need a single macro to pass to `foreach_ldadd`.
 macro_rules! add {
     ($ordering:ident, $bytes:tt, $name:ident) => {
-        fetch_op! { $ordering, $bytes, $name, "add" }
+        fetch_op! { $ordering, $bytes, $name, "add", "ldadd" }
     };
 }
 
 macro_rules! and {
     ($ordering:ident, $bytes:tt, $name:ident) => {
-        fetch_op! { $ordering, $bytes, $name, "bic" }
+        fetch_op! { $ordering, $bytes, $name, "bic", "ldclr" }
     };
 }
 
 macro_rules! xor {
     ($ordering:ident, $bytes:tt, $name:ident) => {
-        fetch_op! { $ordering, $bytes, $name, "eor" }
+        fetch_op! { $ordering, $bytes, $name, "eor", "ldeor" }
     };
 }
 
 macro_rules! or {
     ($ordering:ident, $bytes:tt, $name:ident) => {
-        fetch_op! { $ordering, $bytes, $name, "orr" }
+        fetch_op! { $ordering, $bytes, $name, "orr", "ldset" }
     };
 }
 
diff --git a/library/compiler-builtins/compiler-builtins/src/arm.rs b/library/compiler-builtins/compiler-builtins/src/arm.rs
index fbec93ca431..0c15b37df1d 100644
--- a/library/compiler-builtins/compiler-builtins/src/arm.rs
+++ b/library/compiler-builtins/compiler-builtins/src/arm.rs
@@ -1,5 +1,3 @@
-#![cfg(not(feature = "no-asm"))]
-
 // Interfaces used by naked trampolines.
 // SAFETY: these are defined in compiler-builtins
 unsafe extern "C" {
diff --git a/library/compiler-builtins/compiler-builtins/src/hexagon.rs b/library/compiler-builtins/compiler-builtins/src/hexagon.rs
index 91cf91c3142..a5c7b4dfdda 100644
--- a/library/compiler-builtins/compiler-builtins/src/hexagon.rs
+++ b/library/compiler-builtins/compiler-builtins/src/hexagon.rs
@@ -1,5 +1,3 @@
-#![cfg(not(feature = "no-asm"))]
-
 use core::arch::global_asm;
 
 global_asm!(include_str!("hexagon/func_macro.s"), options(raw));
diff --git a/library/compiler-builtins/compiler-builtins/src/lib.rs b/library/compiler-builtins/compiler-builtins/src/lib.rs
index fe0ad81dd3a..ca75f44e02a 100644
--- a/library/compiler-builtins/compiler-builtins/src/lib.rs
+++ b/library/compiler-builtins/compiler-builtins/src/lib.rs
@@ -60,7 +60,7 @@ pub mod arm;
 #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))]
 pub mod aarch64;
 
-#[cfg(all(target_arch = "aarch64", target_os = "linux", not(feature = "no-asm"),))]
+#[cfg(all(target_arch = "aarch64", target_os = "linux"))]
 pub mod aarch64_linux;
 
 #[cfg(all(
diff --git a/library/compiler-builtins/compiler-builtins/src/probestack.rs b/library/compiler-builtins/compiler-builtins/src/probestack.rs
index f4105dde57e..9a18216da99 100644
--- a/library/compiler-builtins/compiler-builtins/src/probestack.rs
+++ b/library/compiler-builtins/compiler-builtins/src/probestack.rs
@@ -44,8 +44,6 @@
 #![cfg(not(feature = "mangled-names"))]
 // Windows and Cygwin already has builtins to do this.
 #![cfg(not(any(windows, target_os = "cygwin")))]
-// All these builtins require assembly
-#![cfg(not(feature = "no-asm"))]
 // We only define stack probing for these architectures today.
 #![cfg(any(target_arch = "x86_64", target_arch = "x86"))]
 
diff --git a/library/compiler-builtins/compiler-builtins/src/x86.rs b/library/compiler-builtins/compiler-builtins/src/x86.rs
index 16e50922a94..51940b3b338 100644
--- a/library/compiler-builtins/compiler-builtins/src/x86.rs
+++ b/library/compiler-builtins/compiler-builtins/src/x86.rs
@@ -9,10 +9,7 @@ use core::intrinsics;
 
 intrinsics! {
     #[unsafe(naked)]
-    #[cfg(all(
-        any(all(windows, target_env = "gnu"), target_os = "uefi"),
-        not(feature = "no-asm")
-    ))]
+    #[cfg(any(all(windows, target_env = "gnu"), target_os = "uefi"))]
     pub unsafe extern "custom" fn __chkstk() {
         core::arch::naked_asm!(
             "jmp {}", // Jump to __alloca since fallthrough may be unreliable"
@@ -21,10 +18,7 @@ intrinsics! {
     }
 
     #[unsafe(naked)]
-    #[cfg(all(
-        any(all(windows, target_env = "gnu"), target_os = "uefi"),
-        not(feature = "no-asm")
-    ))]
+    #[cfg(any(all(windows, target_env = "gnu"), target_os = "uefi"))]
     pub unsafe extern "custom" fn _alloca() {
         // __chkstk and _alloca are the same function
         core::arch::naked_asm!(
diff --git a/library/compiler-builtins/compiler-builtins/src/x86_64.rs b/library/compiler-builtins/compiler-builtins/src/x86_64.rs
index 9b7133b482e..f9ae784d575 100644
--- a/library/compiler-builtins/compiler-builtins/src/x86_64.rs
+++ b/library/compiler-builtins/compiler-builtins/src/x86_64.rs
@@ -9,14 +9,7 @@ use core::intrinsics;
 
 intrinsics! {
     #[unsafe(naked)]
-    #[cfg(all(
-        any(
-            all(windows, target_env = "gnu"),
-            target_os = "cygwin",
-            target_os = "uefi"
-        ),
-        not(feature = "no-asm")
-    ))]
+    #[cfg(any(all(windows, target_env = "gnu"), target_os = "cygwin", target_os = "uefi"))]
     pub unsafe extern "custom" fn ___chkstk_ms() {
         core::arch::naked_asm!(
             "push   %rcx",
diff --git a/library/compiler-builtins/crates/musl-math-sys/src/lib.rs b/library/compiler-builtins/crates/musl-math-sys/src/lib.rs
index 6a4bf4859d9..9cab8deefde 100644
--- a/library/compiler-builtins/crates/musl-math-sys/src/lib.rs
+++ b/library/compiler-builtins/crates/musl-math-sys/src/lib.rs
@@ -40,8 +40,6 @@ macro_rules! functions {
     ) => {
         // Run a simple check to ensure we can link and call the function without crashing.
         #[test]
-        // FIXME(#309): LE PPC crashes calling some musl functions
-        #[cfg_attr(all(target_arch = "powerpc64", target_endian = "little"), ignore)]
         fn $name() {
             <fn($($aty),+) -> $rty>::check(super::$name);
         }
diff --git a/library/compiler-builtins/crates/symbol-check/Cargo.toml b/library/compiler-builtins/crates/symbol-check/Cargo.toml
index 30969ee406a..e2218b49172 100644
--- a/library/compiler-builtins/crates/symbol-check/Cargo.toml
+++ b/library/compiler-builtins/crates/symbol-check/Cargo.toml
@@ -5,8 +5,7 @@ edition = "2024"
 publish = false
 
 [dependencies]
-# FIXME: used as a git dependency since the latest release does not support wasm
-object = { git = "https://github.com/gimli-rs/object.git", rev = "013fac75da56a684377af4151b8164b78c1790e0" }
+object = "0.37.1"
 serde_json = "1.0.140"
 
 [features]
diff --git a/library/compiler-builtins/libm-test/src/precision.rs b/library/compiler-builtins/libm-test/src/precision.rs
index 32825b15d47..3fb8c1b3710 100644
--- a/library/compiler-builtins/libm-test/src/precision.rs
+++ b/library/compiler-builtins/libm-test/src/precision.rs
@@ -272,18 +272,6 @@ impl MaybeOverride<(f32,)> for SpecialCase {
 impl MaybeOverride<(f64,)> for SpecialCase {
     fn check_float<F: Float>(input: (f64,), actual: F, expected: F, ctx: &CheckCtx) -> CheckAction {
         if cfg!(x86_no_sse)
-            && ctx.base_name == BaseName::Ceil
-            && ctx.basis == CheckBasis::Musl
-            && input.0 < 0.0
-            && input.0 > -1.0
-            && expected == F::ZERO
-            && actual == F::ZERO
-        {
-            // musl returns -0.0, we return +0.0
-            return XFAIL("i586 ceil signed zero");
-        }
-
-        if cfg!(x86_no_sse)
             && (ctx.base_name == BaseName::Rint || ctx.base_name == BaseName::Roundeven)
             && (expected - actual).abs() <= F::ONE
             && (expected - actual).abs() > F::ZERO
@@ -293,16 +281,6 @@ impl MaybeOverride<(f64,)> for SpecialCase {
         }
 
         if cfg!(x86_no_sse)
-            && (ctx.fn_ident == Identifier::Ceil || ctx.fn_ident == Identifier::Floor)
-            && expected.eq_repr(F::NEG_ZERO)
-            && actual.eq_repr(F::ZERO)
-        {
-            // FIXME: the x87 implementations do not keep the distinction between -0.0 and 0.0.
-            // See https://github.com/rust-lang/libm/pull/404#issuecomment-2572399955
-            return XFAIL("i586 ceil/floor signed zero");
-        }
-
-        if cfg!(x86_no_sse)
             && (ctx.fn_ident == Identifier::Exp10 || ctx.fn_ident == Identifier::Exp2)
         {
             // FIXME: i586 has very imprecise results with ULP > u32::MAX for these
diff --git a/library/compiler-builtins/libm/configure.rs b/library/compiler-builtins/libm/configure.rs
index f9100d2d58b..76186e63652 100644
--- a/library/compiler-builtins/libm/configure.rs
+++ b/library/compiler-builtins/libm/configure.rs
@@ -1,9 +1,9 @@
 // Configuration shared with both libm and libm-test
 
+use std::env;
 use std::path::PathBuf;
-use std::process::{Command, Stdio};
-use std::{env, str};
 
+#[derive(Debug)]
 #[allow(dead_code)]
 pub struct Config {
     pub manifest_dir: PathBuf,
@@ -33,26 +33,6 @@ impl Config {
             .map(|s| s.to_lowercase().replace("_", "-"))
             .collect();
 
-        // Query rustc for options that Cargo does not provide env for. The bootstrap hack is used
-        // to get consistent output regardless of channel (`f16`/`f128` config options are hidden
-        // on stable otherwise).
-        let mut cmd = Command::new(env::var("RUSTC").unwrap());
-        cmd.args(["--print=cfg", "--target", &target_triple])
-            .env("RUSTC_BOOTSTRAP", "1")
-            .stderr(Stdio::inherit());
-        let out = cmd
-            .output()
-            .unwrap_or_else(|e| panic!("failed to run `{cmd:?}`: {e}"));
-        let rustc_cfg = str::from_utf8(&out.stdout).unwrap();
-
-        // If we couldn't query `rustc` (e.g. a custom JSON target was used), make the safe
-        // choice and leave `f16` and `f128` disabled.
-        let rustc_output_ok = out.status.success();
-        let reliable_f128 =
-            rustc_output_ok && rustc_cfg.lines().any(|l| l == "target_has_reliable_f128");
-        let reliable_f16 =
-            rustc_output_ok && rustc_cfg.lines().any(|l| l == "target_has_reliable_f16");
-
         Self {
             target_triple,
             manifest_dir: PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap()),
@@ -66,8 +46,10 @@ impl Config {
             target_string: env::var("TARGET").unwrap(),
             target_vendor: env::var("CARGO_CFG_TARGET_VENDOR").unwrap(),
             target_features,
-            reliable_f128,
-            reliable_f16,
+            // Note that these are unstable options, so only show up with the nightly compiler or
+            // with `RUSTC_BOOTSTRAP=1` (which is required to use the types anyway).
+            reliable_f128: env::var_os("CARGO_CFG_TARGET_HAS_RELIABLE_F128").is_some(),
+            reliable_f16: env::var_os("CARGO_CFG_TARGET_HAS_RELIABLE_F16").is_some(),
         }
     }
 }
diff --git a/library/compiler-builtins/libm/src/math/acos.rs b/library/compiler-builtins/libm/src/math/acos.rs
index 23b13251ee2..89b2e7c5f30 100644
--- a/library/compiler-builtins/libm/src/math/acos.rs
+++ b/library/compiler-builtins/libm/src/math/acos.rs
@@ -59,7 +59,7 @@ fn r(z: f64) -> f64 {
 /// Computes the inverse cosine (arc cosine) of the input value.
 /// Arguments must be in the range -1 to 1.
 /// Returns values in radians, in the range of 0 to pi.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn acos(x: f64) -> f64 {
     let x1p_120f = f64::from_bits(0x3870000000000000); // 0x1p-120 === 2 ^ -120
     let z: f64;
diff --git a/library/compiler-builtins/libm/src/math/acosf.rs b/library/compiler-builtins/libm/src/math/acosf.rs
index dd88eea5b13..d263b3f2ce3 100644
--- a/library/compiler-builtins/libm/src/math/acosf.rs
+++ b/library/compiler-builtins/libm/src/math/acosf.rs
@@ -33,7 +33,7 @@ fn r(z: f32) -> f32 {
 /// Computes the inverse cosine (arc cosine) of the input value.
 /// Arguments must be in the range -1 to 1.
 /// Returns values in radians, in the range of 0 to pi.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn acosf(x: f32) -> f32 {
     let x1p_120 = f32::from_bits(0x03800000); // 0x1p-120 === 2 ^ (-120)
 
diff --git a/library/compiler-builtins/libm/src/math/acosh.rs b/library/compiler-builtins/libm/src/math/acosh.rs
index d1f5b9fa937..8737bad012c 100644
--- a/library/compiler-builtins/libm/src/math/acosh.rs
+++ b/library/compiler-builtins/libm/src/math/acosh.rs
@@ -7,7 +7,7 @@ const LN2: f64 = 0.693147180559945309417232121458176568; /* 0x3fe62e42,  0xfefa3
 /// Calculates the inverse hyperbolic cosine of `x`.
 /// Is defined as `log(x + sqrt(x*x-1))`.
 /// `x` must be a number greater than or equal to 1.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn acosh(x: f64) -> f64 {
     let u = x.to_bits();
     let e = ((u >> 52) as usize) & 0x7ff;
diff --git a/library/compiler-builtins/libm/src/math/acoshf.rs b/library/compiler-builtins/libm/src/math/acoshf.rs
index ad3455fdd48..432fa03f116 100644
--- a/library/compiler-builtins/libm/src/math/acoshf.rs
+++ b/library/compiler-builtins/libm/src/math/acoshf.rs
@@ -7,7 +7,7 @@ const LN2: f32 = 0.693147180559945309417232121458176568;
 /// Calculates the inverse hyperbolic cosine of `x`.
 /// Is defined as `log(x + sqrt(x*x-1))`.
 /// `x` must be a number greater than or equal to 1.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn acoshf(x: f32) -> f32 {
     let u = x.to_bits();
     let a = u & 0x7fffffff;
diff --git a/library/compiler-builtins/libm/src/math/arch/i586.rs b/library/compiler-builtins/libm/src/math/arch/i586.rs
index f92b9a2af71..b9a66762063 100644
--- a/library/compiler-builtins/libm/src/math/arch/i586.rs
+++ b/library/compiler-builtins/libm/src/math/arch/i586.rs
@@ -1,37 +1,62 @@
 //! Architecture-specific support for x86-32 without SSE2
+//!
+//! We use an alternative implementation on x86, because the
+//! main implementation fails with the x87 FPU used by
+//! debian i386, probably due to excess precision issues.
+//!
+//! See https://github.com/rust-lang/compiler-builtins/pull/976 for discussion on why these
+//! functions are implemented in this way.
 
-use super::super::fabs;
-
-/// Use an alternative implementation on x86, because the
-/// main implementation fails with the x87 FPU used by
-/// debian i386, probably due to excess precision issues.
-/// Basic implementation taken from https://github.com/rust-lang/libm/issues/219.
-pub fn ceil(x: f64) -> f64 {
-    if fabs(x).to_bits() < 4503599627370496.0_f64.to_bits() {
-        let truncated = x as i64 as f64;
-        if truncated < x {
-            return truncated + 1.0;
-        } else {
-            return truncated;
-        }
-    } else {
-        return x;
+pub fn ceil(mut x: f64) -> f64 {
+    unsafe {
+        core::arch::asm!(
+            "fld qword ptr [{x}]",
+            // Save the FPU control word, using `x` as scratch space.
+            "fstcw [{x}]",
+            // Set rounding control to 0b10 (+∞).
+            "mov word ptr [{x} + 2], 0x0b7f",
+            "fldcw [{x} + 2]",
+            // Round.
+            "frndint",
+            // Restore FPU control word.
+            "fldcw [{x}]",
+            // Save rounded value to memory.
+            "fstp qword ptr [{x}]",
+            x = in(reg) &mut x,
+            // All the x87 FPU stack is used, all registers must be clobbered
+            out("st(0)") _, out("st(1)") _,
+            out("st(2)") _, out("st(3)") _,
+            out("st(4)") _, out("st(5)") _,
+            out("st(6)") _, out("st(7)") _,
+            options(nostack),
+        );
     }
+    x
 }
 
-/// Use an alternative implementation on x86, because the
-/// main implementation fails with the x87 FPU used by
-/// debian i386, probably due to excess precision issues.
-/// Basic implementation taken from https://github.com/rust-lang/libm/issues/219.
-pub fn floor(x: f64) -> f64 {
-    if fabs(x).to_bits() < 4503599627370496.0_f64.to_bits() {
-        let truncated = x as i64 as f64;
-        if truncated > x {
-            return truncated - 1.0;
-        } else {
-            return truncated;
-        }
-    } else {
-        return x;
+pub fn floor(mut x: f64) -> f64 {
+    unsafe {
+        core::arch::asm!(
+            "fld qword ptr [{x}]",
+            // Save the FPU control word, using `x` as scratch space.
+            "fstcw [{x}]",
+            // Set rounding control to 0b01 (-∞).
+            "mov word ptr [{x} + 2], 0x077f",
+            "fldcw [{x} + 2]",
+            // Round.
+            "frndint",
+            // Restore FPU control word.
+            "fldcw [{x}]",
+            // Save rounded value to memory.
+            "fstp qword ptr [{x}]",
+            x = in(reg) &mut x,
+            // All the x87 FPU stack is used, all registers must be clobbered
+            out("st(0)") _, out("st(1)") _,
+            out("st(2)") _, out("st(3)") _,
+            out("st(4)") _, out("st(5)") _,
+            out("st(6)") _, out("st(7)") _,
+            options(nostack),
+        );
     }
+    x
 }
diff --git a/library/compiler-builtins/libm/src/math/asin.rs b/library/compiler-builtins/libm/src/math/asin.rs
index 12d0cd35fa5..9554a3eacc2 100644
--- a/library/compiler-builtins/libm/src/math/asin.rs
+++ b/library/compiler-builtins/libm/src/math/asin.rs
@@ -66,7 +66,7 @@ fn comp_r(z: f64) -> f64 {
 /// Computes the inverse sine (arc sine) of the argument `x`.
 /// Arguments to asin must be in the range -1 to 1.
 /// Returns values in radians, in the range of -pi/2 to pi/2.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn asin(mut x: f64) -> f64 {
     let z: f64;
     let r: f64;
diff --git a/library/compiler-builtins/libm/src/math/asinf.rs b/library/compiler-builtins/libm/src/math/asinf.rs
index ed685556730..2dfe2a6d486 100644
--- a/library/compiler-builtins/libm/src/math/asinf.rs
+++ b/library/compiler-builtins/libm/src/math/asinf.rs
@@ -35,7 +35,7 @@ fn r(z: f32) -> f32 {
 /// Computes the inverse sine (arc sine) of the argument `x`.
 /// Arguments to asin must be in the range -1 to 1.
 /// Returns values in radians, in the range of -pi/2 to pi/2.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn asinf(mut x: f32) -> f32 {
     let x1p_120 = f64::from_bits(0x3870000000000000); // 0x1p-120 === 2 ^ (-120)
 
diff --git a/library/compiler-builtins/libm/src/math/asinh.rs b/library/compiler-builtins/libm/src/math/asinh.rs
index 75d3c3ad462..d63bc0aa9c3 100644
--- a/library/compiler-builtins/libm/src/math/asinh.rs
+++ b/library/compiler-builtins/libm/src/math/asinh.rs
@@ -7,7 +7,7 @@ const LN2: f64 = 0.693147180559945309417232121458176568; /* 0x3fe62e42,  0xfefa3
 ///
 /// Calculates the inverse hyperbolic sine of `x`.
 /// Is defined as `sgn(x)*log(|x|+sqrt(x*x+1))`.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn asinh(mut x: f64) -> f64 {
     let mut u = x.to_bits();
     let e = ((u >> 52) as usize) & 0x7ff;
diff --git a/library/compiler-builtins/libm/src/math/asinhf.rs b/library/compiler-builtins/libm/src/math/asinhf.rs
index 27ed9dd372d..3ca2d44894d 100644
--- a/library/compiler-builtins/libm/src/math/asinhf.rs
+++ b/library/compiler-builtins/libm/src/math/asinhf.rs
@@ -7,7 +7,7 @@ const LN2: f32 = 0.693147180559945309417232121458176568;
 ///
 /// Calculates the inverse hyperbolic sine of `x`.
 /// Is defined as `sgn(x)*log(|x|+sqrt(x*x+1))`.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn asinhf(mut x: f32) -> f32 {
     let u = x.to_bits();
     let i = u & 0x7fffffff;
diff --git a/library/compiler-builtins/libm/src/math/atan.rs b/library/compiler-builtins/libm/src/math/atan.rs
index 4ca5cc91a1e..0590ba87cf8 100644
--- a/library/compiler-builtins/libm/src/math/atan.rs
+++ b/library/compiler-builtins/libm/src/math/atan.rs
@@ -65,7 +65,7 @@ const AT: [f64; 11] = [
 ///
 /// Computes the inverse tangent (arc tangent) of the input value.
 /// Returns a value in radians, in the range of -pi/2 to pi/2.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn atan(x: f64) -> f64 {
     let mut x = x;
     let mut ix = (x.to_bits() >> 32) as u32;
diff --git a/library/compiler-builtins/libm/src/math/atan2.rs b/library/compiler-builtins/libm/src/math/atan2.rs
index c668731cf37..51456e409b8 100644
--- a/library/compiler-builtins/libm/src/math/atan2.rs
+++ b/library/compiler-builtins/libm/src/math/atan2.rs
@@ -47,7 +47,7 @@ const PI_LO: f64 = 1.2246467991473531772E-16; /* 0x3CA1A626, 0x33145C07 */
 /// Computes the inverse tangent (arc tangent) of `y/x`.
 /// Produces the correct result even for angles near pi/2 or -pi/2 (that is, when `x` is near 0).
 /// Returns a value in radians, in the range of -pi to pi.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn atan2(y: f64, x: f64) -> f64 {
     if x.is_nan() || y.is_nan() {
         return x + y;
diff --git a/library/compiler-builtins/libm/src/math/atan2f.rs b/library/compiler-builtins/libm/src/math/atan2f.rs
index 95b466fff4e..0f46c9f3906 100644
--- a/library/compiler-builtins/libm/src/math/atan2f.rs
+++ b/library/compiler-builtins/libm/src/math/atan2f.rs
@@ -23,7 +23,7 @@ const PI_LO: f32 = -8.7422776573e-08; /* 0xb3bbbd2e */
 /// Computes the inverse tangent (arc tangent) of `y/x`.
 /// Produces the correct result even for angles near pi/2 or -pi/2 (that is, when `x` is near 0).
 /// Returns a value in radians, in the range of -pi to pi.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn atan2f(y: f32, x: f32) -> f32 {
     if x.is_nan() || y.is_nan() {
         return x + y;
diff --git a/library/compiler-builtins/libm/src/math/atanf.rs b/library/compiler-builtins/libm/src/math/atanf.rs
index da8daa41a01..58568d9a81f 100644
--- a/library/compiler-builtins/libm/src/math/atanf.rs
+++ b/library/compiler-builtins/libm/src/math/atanf.rs
@@ -41,7 +41,7 @@ const A_T: [f32; 5] = [
 ///
 /// Computes the inverse tangent (arc tangent) of the input value.
 /// Returns a value in radians, in the range of -pi/2 to pi/2.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn atanf(mut x: f32) -> f32 {
     let x1p_120 = f32::from_bits(0x03800000); // 0x1p-120 === 2 ^ (-120)
 
diff --git a/library/compiler-builtins/libm/src/math/atanh.rs b/library/compiler-builtins/libm/src/math/atanh.rs
index 9dc826f5605..883ff150fd6 100644
--- a/library/compiler-builtins/libm/src/math/atanh.rs
+++ b/library/compiler-builtins/libm/src/math/atanh.rs
@@ -5,7 +5,7 @@ use super::log1p;
 ///
 /// Calculates the inverse hyperbolic tangent of `x`.
 /// Is defined as `log((1+x)/(1-x))/2 = log1p(2x/(1-x))/2`.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn atanh(x: f64) -> f64 {
     let u = x.to_bits();
     let e = ((u >> 52) as usize) & 0x7ff;
diff --git a/library/compiler-builtins/libm/src/math/atanhf.rs b/library/compiler-builtins/libm/src/math/atanhf.rs
index 80ccec1f67f..e4e356d18d8 100644
--- a/library/compiler-builtins/libm/src/math/atanhf.rs
+++ b/library/compiler-builtins/libm/src/math/atanhf.rs
@@ -5,7 +5,7 @@ use super::log1pf;
 ///
 /// Calculates the inverse hyperbolic tangent of `x`.
 /// Is defined as `log((1+x)/(1-x))/2 = log1p(2x/(1-x))/2`.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn atanhf(mut x: f32) -> f32 {
     let mut u = x.to_bits();
     let sign = (u >> 31) != 0;
diff --git a/library/compiler-builtins/libm/src/math/cbrt.rs b/library/compiler-builtins/libm/src/math/cbrt.rs
index cf56f7a9792..e905e15f13f 100644
--- a/library/compiler-builtins/libm/src/math/cbrt.rs
+++ b/library/compiler-builtins/libm/src/math/cbrt.rs
@@ -8,7 +8,7 @@ use super::Float;
 use super::support::{FpResult, Round, cold_path};
 
 /// Compute the cube root of the argument.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn cbrt(x: f64) -> f64 {
     cbrt_round(x, Round::Nearest).val
 }
diff --git a/library/compiler-builtins/libm/src/math/cbrtf.rs b/library/compiler-builtins/libm/src/math/cbrtf.rs
index 9d70305c647..9d69584834a 100644
--- a/library/compiler-builtins/libm/src/math/cbrtf.rs
+++ b/library/compiler-builtins/libm/src/math/cbrtf.rs
@@ -25,7 +25,7 @@ const B2: u32 = 642849266; /* B2 = (127-127.0/3-24/3-0.03306235651)*2**23 */
 /// Cube root (f32)
 ///
 /// Computes the cube root of the argument.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn cbrtf(x: f32) -> f32 {
     let x1p24 = f32::from_bits(0x4b800000); // 0x1p24f === 2 ^ 24
 
diff --git a/library/compiler-builtins/libm/src/math/ceil.rs b/library/compiler-builtins/libm/src/math/ceil.rs
index 4e103545727..2cac49f29ba 100644
--- a/library/compiler-builtins/libm/src/math/ceil.rs
+++ b/library/compiler-builtins/libm/src/math/ceil.rs
@@ -2,7 +2,7 @@
 ///
 /// Finds the nearest integer greater than or equal to `x`.
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn ceilf16(x: f16) -> f16 {
     super::generic::ceil(x)
 }
@@ -10,7 +10,7 @@ pub fn ceilf16(x: f16) -> f16 {
 /// Ceil (f32)
 ///
 /// Finds the nearest integer greater than or equal to `x`.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn ceilf(x: f32) -> f32 {
     select_implementation! {
         name: ceilf,
@@ -24,7 +24,7 @@ pub fn ceilf(x: f32) -> f32 {
 /// Ceil (f64)
 ///
 /// Finds the nearest integer greater than or equal to `x`.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn ceil(x: f64) -> f64 {
     select_implementation! {
         name: ceil,
@@ -40,7 +40,7 @@ pub fn ceil(x: f64) -> f64 {
 ///
 /// Finds the nearest integer greater than or equal to `x`.
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn ceilf128(x: f128) -> f128 {
     super::generic::ceil(x)
 }
diff --git a/library/compiler-builtins/libm/src/math/copysign.rs b/library/compiler-builtins/libm/src/math/copysign.rs
index d093d610727..591a87a940e 100644
--- a/library/compiler-builtins/libm/src/math/copysign.rs
+++ b/library/compiler-builtins/libm/src/math/copysign.rs
@@ -3,7 +3,7 @@
 /// Constructs a number with the magnitude (absolute value) of its
 /// first argument, `x`, and the sign of its second argument, `y`.
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn copysignf16(x: f16, y: f16) -> f16 {
     super::generic::copysign(x, y)
 }
@@ -12,7 +12,7 @@ pub fn copysignf16(x: f16, y: f16) -> f16 {
 ///
 /// Constructs a number with the magnitude (absolute value) of its
 /// first argument, `x`, and the sign of its second argument, `y`.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn copysignf(x: f32, y: f32) -> f32 {
     super::generic::copysign(x, y)
 }
@@ -21,7 +21,7 @@ pub fn copysignf(x: f32, y: f32) -> f32 {
 ///
 /// Constructs a number with the magnitude (absolute value) of its
 /// first argument, `x`, and the sign of its second argument, `y`.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn copysign(x: f64, y: f64) -> f64 {
     super::generic::copysign(x, y)
 }
@@ -31,7 +31,7 @@ pub fn copysign(x: f64, y: f64) -> f64 {
 /// Constructs a number with the magnitude (absolute value) of its
 /// first argument, `x`, and the sign of its second argument, `y`.
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn copysignf128(x: f128, y: f128) -> f128 {
     super::generic::copysign(x, y)
 }
diff --git a/library/compiler-builtins/libm/src/math/cos.rs b/library/compiler-builtins/libm/src/math/cos.rs
index de99cd4c5e4..b2f786323f4 100644
--- a/library/compiler-builtins/libm/src/math/cos.rs
+++ b/library/compiler-builtins/libm/src/math/cos.rs
@@ -45,7 +45,7 @@ use super::{k_cos, k_sin, rem_pio2};
 /// The cosine of `x` (f64).
 ///
 /// `x` is specified in radians.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn cos(x: f64) -> f64 {
     let ix = (f64::to_bits(x) >> 32) as u32 & 0x7fffffff;
 
diff --git a/library/compiler-builtins/libm/src/math/cosf.rs b/library/compiler-builtins/libm/src/math/cosf.rs
index 27c2fc3b994..bf5cb9196a3 100644
--- a/library/compiler-builtins/libm/src/math/cosf.rs
+++ b/library/compiler-builtins/libm/src/math/cosf.rs
@@ -27,7 +27,7 @@ const C4_PIO2: f64 = 4. * FRAC_PI_2; /* 0x401921FB, 0x54442D18 */
 /// The cosine of `x` (f32).
 ///
 /// `x` is specified in radians.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn cosf(x: f32) -> f32 {
     let x64 = x as f64;
 
diff --git a/library/compiler-builtins/libm/src/math/cosh.rs b/library/compiler-builtins/libm/src/math/cosh.rs
index d2e43fd6cb6..01081cfc77e 100644
--- a/library/compiler-builtins/libm/src/math/cosh.rs
+++ b/library/compiler-builtins/libm/src/math/cosh.rs
@@ -5,7 +5,7 @@ use super::{exp, expm1, k_expo2};
 /// Computes the hyperbolic cosine of the argument x.
 /// Is defined as `(exp(x) + exp(-x))/2`
 /// Angles are specified in radians.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn cosh(mut x: f64) -> f64 {
     /* |x| */
     let mut ix = x.to_bits();
diff --git a/library/compiler-builtins/libm/src/math/coshf.rs b/library/compiler-builtins/libm/src/math/coshf.rs
index 567a24410e7..dc039a3117c 100644
--- a/library/compiler-builtins/libm/src/math/coshf.rs
+++ b/library/compiler-builtins/libm/src/math/coshf.rs
@@ -5,7 +5,7 @@ use super::{expf, expm1f, k_expo2f};
 /// Computes the hyperbolic cosine of the argument x.
 /// Is defined as `(exp(x) + exp(-x))/2`
 /// Angles are specified in radians.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn coshf(mut x: f32) -> f32 {
     let x1p120 = f32::from_bits(0x7b800000); // 0x1p120f === 2 ^ 120
 
diff --git a/library/compiler-builtins/libm/src/math/erf.rs b/library/compiler-builtins/libm/src/math/erf.rs
index 5d82228a05f..6c78440afcf 100644
--- a/library/compiler-builtins/libm/src/math/erf.rs
+++ b/library/compiler-builtins/libm/src/math/erf.rs
@@ -219,7 +219,7 @@ fn erfc2(ix: u32, mut x: f64) -> f64 {
 /// Calculates an approximation to the “error function”, which estimates
 /// the probability that an observation will fall within x standard
 /// deviations of the mean (assuming a normal distribution).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn erf(x: f64) -> f64 {
     let r: f64;
     let s: f64;
diff --git a/library/compiler-builtins/libm/src/math/erff.rs b/library/compiler-builtins/libm/src/math/erff.rs
index fe15f01082e..2a7680275b9 100644
--- a/library/compiler-builtins/libm/src/math/erff.rs
+++ b/library/compiler-builtins/libm/src/math/erff.rs
@@ -130,7 +130,7 @@ fn erfc2(mut ix: u32, mut x: f32) -> f32 {
 /// Calculates an approximation to the “error function”, which estimates
 /// the probability that an observation will fall within x standard
 /// deviations of the mean (assuming a normal distribution).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn erff(x: f32) -> f32 {
     let r: f32;
     let s: f32;
diff --git a/library/compiler-builtins/libm/src/math/exp.rs b/library/compiler-builtins/libm/src/math/exp.rs
index 782042b62cd..78ce5dd134a 100644
--- a/library/compiler-builtins/libm/src/math/exp.rs
+++ b/library/compiler-builtins/libm/src/math/exp.rs
@@ -81,7 +81,7 @@ const P5: f64 = 4.13813679705723846039e-08; /* 0x3E663769, 0x72BEA4D0 */
 ///
 /// Calculate the exponential of `x`, that is, *e* raised to the power `x`
 /// (where *e* is the base of the natural system of logarithms, approximately 2.71828).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn exp(mut x: f64) -> f64 {
     let x1p1023 = f64::from_bits(0x7fe0000000000000); // 0x1p1023 === 2 ^ 1023
     let x1p_149 = f64::from_bits(0x36a0000000000000); // 0x1p-149 === 2 ^ -149
diff --git a/library/compiler-builtins/libm/src/math/exp10.rs b/library/compiler-builtins/libm/src/math/exp10.rs
index 7c33c92b603..1f49f5e9697 100644
--- a/library/compiler-builtins/libm/src/math/exp10.rs
+++ b/library/compiler-builtins/libm/src/math/exp10.rs
@@ -7,7 +7,7 @@ const P10: &[f64] = &[
 ];
 
 /// Calculates 10 raised to the power of `x` (f64).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn exp10(x: f64) -> f64 {
     let (mut y, n) = modf(x);
     let u: u64 = n.to_bits();
diff --git a/library/compiler-builtins/libm/src/math/exp10f.rs b/library/compiler-builtins/libm/src/math/exp10f.rs
index 303045b3313..22a264211d0 100644
--- a/library/compiler-builtins/libm/src/math/exp10f.rs
+++ b/library/compiler-builtins/libm/src/math/exp10f.rs
@@ -7,7 +7,7 @@ const P10: &[f32] = &[
 ];
 
 /// Calculates 10 raised to the power of `x` (f32).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn exp10f(x: f32) -> f32 {
     let (mut y, n) = modff(x);
     let u = n.to_bits();
diff --git a/library/compiler-builtins/libm/src/math/exp2.rs b/library/compiler-builtins/libm/src/math/exp2.rs
index 6e98d066cbf..6e4cbc29dcc 100644
--- a/library/compiler-builtins/libm/src/math/exp2.rs
+++ b/library/compiler-builtins/libm/src/math/exp2.rs
@@ -322,7 +322,7 @@ static TBL: [u64; TBLSIZE * 2] = [
 /// Exponential, base 2 (f64)
 ///
 /// Calculate `2^x`, that is, 2 raised to the power `x`.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn exp2(mut x: f64) -> f64 {
     let redux = f64::from_bits(0x4338000000000000) / TBLSIZE as f64;
     let p1 = f64::from_bits(0x3fe62e42fefa39ef);
diff --git a/library/compiler-builtins/libm/src/math/exp2f.rs b/library/compiler-builtins/libm/src/math/exp2f.rs
index f452b6a20f8..733d2f1a847 100644
--- a/library/compiler-builtins/libm/src/math/exp2f.rs
+++ b/library/compiler-builtins/libm/src/math/exp2f.rs
@@ -73,7 +73,7 @@ static EXP2FT: [u64; TBLSIZE] = [
 /// Exponential, base 2 (f32)
 ///
 /// Calculate `2^x`, that is, 2 raised to the power `x`.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn exp2f(mut x: f32) -> f32 {
     let redux = f32::from_bits(0x4b400000) / TBLSIZE as f32;
     let p1 = f32::from_bits(0x3f317218);
diff --git a/library/compiler-builtins/libm/src/math/expf.rs b/library/compiler-builtins/libm/src/math/expf.rs
index 8dc067ab084..dbbfdbba925 100644
--- a/library/compiler-builtins/libm/src/math/expf.rs
+++ b/library/compiler-builtins/libm/src/math/expf.rs
@@ -30,7 +30,7 @@ const P2: f32 = -2.7667332906e-3; /* -0xb55215.0p-32 */
 ///
 /// Calculate the exponential of `x`, that is, *e* raised to the power `x`
 /// (where *e* is the base of the natural system of logarithms, approximately 2.71828).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn expf(mut x: f32) -> f32 {
     let x1p127 = f32::from_bits(0x7f000000); // 0x1p127f === 2 ^ 127
     let x1p_126 = f32::from_bits(0x800000); // 0x1p-126f === 2 ^ -126  /*original 0x1p-149f    ??????????? */
diff --git a/library/compiler-builtins/libm/src/math/expm1.rs b/library/compiler-builtins/libm/src/math/expm1.rs
index f25153f32a3..3714bf3afc9 100644
--- a/library/compiler-builtins/libm/src/math/expm1.rs
+++ b/library/compiler-builtins/libm/src/math/expm1.rs
@@ -30,7 +30,7 @@ const Q5: f64 = -2.01099218183624371326e-07; /* BE8AFDB7 6E09C32D */
 /// system of logarithms, approximately 2.71828).
 /// The result is accurate even for small values of `x`,
 /// where using `exp(x)-1` would lose many significant digits.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn expm1(mut x: f64) -> f64 {
     let hi: f64;
     let lo: f64;
diff --git a/library/compiler-builtins/libm/src/math/expm1f.rs b/library/compiler-builtins/libm/src/math/expm1f.rs
index 63dc86e37c8..f77515a4b99 100644
--- a/library/compiler-builtins/libm/src/math/expm1f.rs
+++ b/library/compiler-builtins/libm/src/math/expm1f.rs
@@ -32,7 +32,7 @@ const Q2: f32 = 1.5807170421e-3; /*  0xcf3010.0p-33 */
 /// system of logarithms, approximately 2.71828).
 /// The result is accurate even for small values of `x`,
 /// where using `exp(x)-1` would lose many significant digits.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn expm1f(mut x: f32) -> f32 {
     let x1p127 = f32::from_bits(0x7f000000); // 0x1p127f === 2 ^ 127
 
diff --git a/library/compiler-builtins/libm/src/math/expo2.rs b/library/compiler-builtins/libm/src/math/expo2.rs
index 82e9b360a76..ce90858ec07 100644
--- a/library/compiler-builtins/libm/src/math/expo2.rs
+++ b/library/compiler-builtins/libm/src/math/expo2.rs
@@ -1,7 +1,7 @@
 use super::{combine_words, exp};
 
 /* exp(x)/2 for x >= log(DBL_MAX), slightly better than 0.5*exp(x/2)*exp(x/2) */
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub(crate) fn expo2(x: f64) -> f64 {
     /* k is such that k*ln2 has minimal relative error and x - kln2 > log(DBL_MIN) */
     const K: i32 = 2043;
diff --git a/library/compiler-builtins/libm/src/math/fabs.rs b/library/compiler-builtins/libm/src/math/fabs.rs
index 0050a309fee..7344e21a18b 100644
--- a/library/compiler-builtins/libm/src/math/fabs.rs
+++ b/library/compiler-builtins/libm/src/math/fabs.rs
@@ -3,7 +3,7 @@
 /// Calculates the absolute value (magnitude) of the argument `x`,
 /// by direct manipulation of the bit representation of `x`.
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fabsf16(x: f16) -> f16 {
     super::generic::fabs(x)
 }
@@ -12,7 +12,7 @@ pub fn fabsf16(x: f16) -> f16 {
 ///
 /// Calculates the absolute value (magnitude) of the argument `x`,
 /// by direct manipulation of the bit representation of `x`.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fabsf(x: f32) -> f32 {
     select_implementation! {
         name: fabsf,
@@ -27,7 +27,7 @@ pub fn fabsf(x: f32) -> f32 {
 ///
 /// Calculates the absolute value (magnitude) of the argument `x`,
 /// by direct manipulation of the bit representation of `x`.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fabs(x: f64) -> f64 {
     select_implementation! {
         name: fabs,
@@ -43,7 +43,7 @@ pub fn fabs(x: f64) -> f64 {
 /// Calculates the absolute value (magnitude) of the argument `x`,
 /// by direct manipulation of the bit representation of `x`.
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fabsf128(x: f128) -> f128 {
     super::generic::fabs(x)
 }
diff --git a/library/compiler-builtins/libm/src/math/fdim.rs b/library/compiler-builtins/libm/src/math/fdim.rs
index 082c5478b2a..dac409e86b1 100644
--- a/library/compiler-builtins/libm/src/math/fdim.rs
+++ b/library/compiler-builtins/libm/src/math/fdim.rs
@@ -7,7 +7,7 @@
 ///
 /// A range error may occur.
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fdimf16(x: f16, y: f16) -> f16 {
     super::generic::fdim(x, y)
 }
@@ -20,7 +20,7 @@ pub fn fdimf16(x: f16, y: f16) -> f16 {
 /// * NAN   if either argument is NAN.
 ///
 /// A range error may occur.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fdimf(x: f32, y: f32) -> f32 {
     super::generic::fdim(x, y)
 }
@@ -33,7 +33,7 @@ pub fn fdimf(x: f32, y: f32) -> f32 {
 /// * NAN   if either argument is NAN.
 ///
 /// A range error may occur.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fdim(x: f64, y: f64) -> f64 {
     super::generic::fdim(x, y)
 }
@@ -47,7 +47,7 @@ pub fn fdim(x: f64, y: f64) -> f64 {
 ///
 /// A range error may occur.
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fdimf128(x: f128, y: f128) -> f128 {
     super::generic::fdim(x, y)
 }
diff --git a/library/compiler-builtins/libm/src/math/floor.rs b/library/compiler-builtins/libm/src/math/floor.rs
index 3c5eab101d1..7241c427f64 100644
--- a/library/compiler-builtins/libm/src/math/floor.rs
+++ b/library/compiler-builtins/libm/src/math/floor.rs
@@ -2,7 +2,7 @@
 ///
 /// Finds the nearest integer less than or equal to `x`.
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn floorf16(x: f16) -> f16 {
     return super::generic::floor(x);
 }
@@ -10,7 +10,7 @@ pub fn floorf16(x: f16) -> f16 {
 /// Floor (f64)
 ///
 /// Finds the nearest integer less than or equal to `x`.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn floor(x: f64) -> f64 {
     select_implementation! {
         name: floor,
@@ -25,7 +25,7 @@ pub fn floor(x: f64) -> f64 {
 /// Floor (f32)
 ///
 /// Finds the nearest integer less than or equal to `x`.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn floorf(x: f32) -> f32 {
     select_implementation! {
         name: floorf,
@@ -40,7 +40,7 @@ pub fn floorf(x: f32) -> f32 {
 ///
 /// Finds the nearest integer less than or equal to `x`.
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn floorf128(x: f128) -> f128 {
     return super::generic::floor(x);
 }
diff --git a/library/compiler-builtins/libm/src/math/fma.rs b/library/compiler-builtins/libm/src/math/fma.rs
index 5bf473cfe06..70e6de768fa 100644
--- a/library/compiler-builtins/libm/src/math/fma.rs
+++ b/library/compiler-builtins/libm/src/math/fma.rs
@@ -7,7 +7,7 @@ use crate::support::Round;
 // Placeholder so we can have `fmaf16` in the `Float` trait.
 #[allow(unused)]
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub(crate) fn fmaf16(_x: f16, _y: f16, _z: f16) -> f16 {
     unimplemented!()
 }
@@ -15,7 +15,7 @@ pub(crate) fn fmaf16(_x: f16, _y: f16, _z: f16) -> f16 {
 /// Floating multiply add (f32)
 ///
 /// Computes `(x*y)+z`, rounded as one ternary operation (i.e. calculated with infinite precision).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fmaf(x: f32, y: f32, z: f32) -> f32 {
     select_implementation! {
         name: fmaf,
@@ -32,7 +32,7 @@ pub fn fmaf(x: f32, y: f32, z: f32) -> f32 {
 /// Fused multiply add (f64)
 ///
 /// Computes `(x*y)+z`, rounded as one ternary operation (i.e. calculated with infinite precision).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fma(x: f64, y: f64, z: f64) -> f64 {
     select_implementation! {
         name: fma,
@@ -50,7 +50,7 @@ pub fn fma(x: f64, y: f64, z: f64) -> f64 {
 ///
 /// Computes `(x*y)+z`, rounded as one ternary operation (i.e. calculated with infinite precision).
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fmaf128(x: f128, y: f128, z: f128) -> f128 {
     generic::fma_round(x, y, z, Round::Nearest).val
 }
diff --git a/library/compiler-builtins/libm/src/math/fmin_fmax.rs b/library/compiler-builtins/libm/src/math/fmin_fmax.rs
index 481301994e9..c4c1b0435dd 100644
--- a/library/compiler-builtins/libm/src/math/fmin_fmax.rs
+++ b/library/compiler-builtins/libm/src/math/fmin_fmax.rs
@@ -3,7 +3,7 @@
 /// This coincides with IEEE 754-2011 `minNum`. The result disregards signed zero (meaning if
 /// the inputs are -0.0 and +0.0, either may be returned).
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fminf16(x: f16, y: f16) -> f16 {
     super::generic::fmin(x, y)
 }
@@ -12,7 +12,7 @@ pub fn fminf16(x: f16, y: f16) -> f16 {
 ///
 /// This coincides with IEEE 754-2011 `minNum`. The result disregards signed zero (meaning if
 /// the inputs are -0.0 and +0.0, either may be returned).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fminf(x: f32, y: f32) -> f32 {
     super::generic::fmin(x, y)
 }
@@ -21,7 +21,7 @@ pub fn fminf(x: f32, y: f32) -> f32 {
 ///
 /// This coincides with IEEE 754-2011 `minNum`. The result disregards signed zero (meaning if
 /// the inputs are -0.0 and +0.0, either may be returned).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fmin(x: f64, y: f64) -> f64 {
     super::generic::fmin(x, y)
 }
@@ -31,7 +31,7 @@ pub fn fmin(x: f64, y: f64) -> f64 {
 /// This coincides with IEEE 754-2011 `minNum`. The result disregards signed zero (meaning if
 /// the inputs are -0.0 and +0.0, either may be returned).
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fminf128(x: f128, y: f128) -> f128 {
     super::generic::fmin(x, y)
 }
@@ -41,7 +41,7 @@ pub fn fminf128(x: f128, y: f128) -> f128 {
 /// This coincides with IEEE 754-2011 `maxNum`. The result disregards signed zero (meaning if
 /// the inputs are -0.0 and +0.0, either may be returned).
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fmaxf16(x: f16, y: f16) -> f16 {
     super::generic::fmax(x, y)
 }
@@ -50,7 +50,7 @@ pub fn fmaxf16(x: f16, y: f16) -> f16 {
 ///
 /// This coincides with IEEE 754-2011 `maxNum`. The result disregards signed zero (meaning if
 /// the inputs are -0.0 and +0.0, either may be returned).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fmaxf(x: f32, y: f32) -> f32 {
     super::generic::fmax(x, y)
 }
@@ -59,7 +59,7 @@ pub fn fmaxf(x: f32, y: f32) -> f32 {
 ///
 /// This coincides with IEEE 754-2011 `maxNum`. The result disregards signed zero (meaning if
 /// the inputs are -0.0 and +0.0, either may be returned).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fmax(x: f64, y: f64) -> f64 {
     super::generic::fmax(x, y)
 }
@@ -69,7 +69,7 @@ pub fn fmax(x: f64, y: f64) -> f64 {
 /// This coincides with IEEE 754-2011 `maxNum`. The result disregards signed zero (meaning if
 /// the inputs are -0.0 and +0.0, either may be returned).
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fmaxf128(x: f128, y: f128) -> f128 {
     super::generic::fmax(x, y)
 }
diff --git a/library/compiler-builtins/libm/src/math/fminimum_fmaximum.rs b/library/compiler-builtins/libm/src/math/fminimum_fmaximum.rs
index 8f130867051..a3c9c9c3991 100644
--- a/library/compiler-builtins/libm/src/math/fminimum_fmaximum.rs
+++ b/library/compiler-builtins/libm/src/math/fminimum_fmaximum.rs
@@ -2,7 +2,7 @@
 ///
 /// This coincides with IEEE 754-2019 `minimum`. The result orders -0.0 < 0.0.
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fminimumf16(x: f16, y: f16) -> f16 {
     super::generic::fminimum(x, y)
 }
@@ -10,7 +10,7 @@ pub fn fminimumf16(x: f16, y: f16) -> f16 {
 /// Return the lesser of two arguments or, if either argument is NaN, the other argument.
 ///
 /// This coincides with IEEE 754-2019 `minimum`. The result orders -0.0 < 0.0.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fminimum(x: f64, y: f64) -> f64 {
     super::generic::fminimum(x, y)
 }
@@ -18,7 +18,7 @@ pub fn fminimum(x: f64, y: f64) -> f64 {
 /// Return the lesser of two arguments or, if either argument is NaN, the other argument.
 ///
 /// This coincides with IEEE 754-2019 `minimum`. The result orders -0.0 < 0.0.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fminimumf(x: f32, y: f32) -> f32 {
     super::generic::fminimum(x, y)
 }
@@ -27,7 +27,7 @@ pub fn fminimumf(x: f32, y: f32) -> f32 {
 ///
 /// This coincides with IEEE 754-2019 `minimum`. The result orders -0.0 < 0.0.
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fminimumf128(x: f128, y: f128) -> f128 {
     super::generic::fminimum(x, y)
 }
@@ -36,7 +36,7 @@ pub fn fminimumf128(x: f128, y: f128) -> f128 {
 ///
 /// This coincides with IEEE 754-2019 `maximum`. The result orders -0.0 < 0.0.
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fmaximumf16(x: f16, y: f16) -> f16 {
     super::generic::fmaximum(x, y)
 }
@@ -44,7 +44,7 @@ pub fn fmaximumf16(x: f16, y: f16) -> f16 {
 /// Return the greater of two arguments or, if either argument is NaN, the other argument.
 ///
 /// This coincides with IEEE 754-2019 `maximum`. The result orders -0.0 < 0.0.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fmaximumf(x: f32, y: f32) -> f32 {
     super::generic::fmaximum(x, y)
 }
@@ -52,7 +52,7 @@ pub fn fmaximumf(x: f32, y: f32) -> f32 {
 /// Return the greater of two arguments or, if either argument is NaN, the other argument.
 ///
 /// This coincides with IEEE 754-2019 `maximum`. The result orders -0.0 < 0.0.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fmaximum(x: f64, y: f64) -> f64 {
     super::generic::fmaximum(x, y)
 }
@@ -61,7 +61,7 @@ pub fn fmaximum(x: f64, y: f64) -> f64 {
 ///
 /// This coincides with IEEE 754-2019 `maximum`. The result orders -0.0 < 0.0.
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fmaximumf128(x: f128, y: f128) -> f128 {
     super::generic::fmaximum(x, y)
 }
diff --git a/library/compiler-builtins/libm/src/math/fminimum_fmaximum_num.rs b/library/compiler-builtins/libm/src/math/fminimum_fmaximum_num.rs
index fadf934180a..612cefe756e 100644
--- a/library/compiler-builtins/libm/src/math/fminimum_fmaximum_num.rs
+++ b/library/compiler-builtins/libm/src/math/fminimum_fmaximum_num.rs
@@ -2,7 +2,7 @@
 ///
 /// This coincides with IEEE 754-2019 `minimumNumber`. The result orders -0.0 < 0.0.
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fminimum_numf16(x: f16, y: f16) -> f16 {
     super::generic::fminimum_num(x, y)
 }
@@ -10,7 +10,7 @@ pub fn fminimum_numf16(x: f16, y: f16) -> f16 {
 /// Return the lesser of two arguments or, if either argument is NaN, NaN.
 ///
 /// This coincides with IEEE 754-2019 `minimumNumber`. The result orders -0.0 < 0.0.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fminimum_numf(x: f32, y: f32) -> f32 {
     super::generic::fminimum_num(x, y)
 }
@@ -18,7 +18,7 @@ pub fn fminimum_numf(x: f32, y: f32) -> f32 {
 /// Return the lesser of two arguments or, if either argument is NaN, NaN.
 ///
 /// This coincides with IEEE 754-2019 `minimumNumber`. The result orders -0.0 < 0.0.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fminimum_num(x: f64, y: f64) -> f64 {
     super::generic::fminimum_num(x, y)
 }
@@ -27,7 +27,7 @@ pub fn fminimum_num(x: f64, y: f64) -> f64 {
 ///
 /// This coincides with IEEE 754-2019 `minimumNumber`. The result orders -0.0 < 0.0.
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fminimum_numf128(x: f128, y: f128) -> f128 {
     super::generic::fminimum_num(x, y)
 }
@@ -36,7 +36,7 @@ pub fn fminimum_numf128(x: f128, y: f128) -> f128 {
 ///
 /// This coincides with IEEE 754-2019 `maximumNumber`. The result orders -0.0 < 0.0.
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fmaximum_numf16(x: f16, y: f16) -> f16 {
     super::generic::fmaximum_num(x, y)
 }
@@ -44,7 +44,7 @@ pub fn fmaximum_numf16(x: f16, y: f16) -> f16 {
 /// Return the greater of two arguments or, if either argument is NaN, NaN.
 ///
 /// This coincides with IEEE 754-2019 `maximumNumber`. The result orders -0.0 < 0.0.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fmaximum_numf(x: f32, y: f32) -> f32 {
     super::generic::fmaximum_num(x, y)
 }
@@ -52,7 +52,7 @@ pub fn fmaximum_numf(x: f32, y: f32) -> f32 {
 /// Return the greater of two arguments or, if either argument is NaN, NaN.
 ///
 /// This coincides with IEEE 754-2019 `maximumNumber`. The result orders -0.0 < 0.0.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fmaximum_num(x: f64, y: f64) -> f64 {
     super::generic::fmaximum_num(x, y)
 }
@@ -61,7 +61,7 @@ pub fn fmaximum_num(x: f64, y: f64) -> f64 {
 ///
 /// This coincides with IEEE 754-2019 `maximumNumber`. The result orders -0.0 < 0.0.
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fmaximum_numf128(x: f128, y: f128) -> f128 {
     super::generic::fmaximum_num(x, y)
 }
diff --git a/library/compiler-builtins/libm/src/math/fmod.rs b/library/compiler-builtins/libm/src/math/fmod.rs
index c4752b92578..6ae1be56083 100644
--- a/library/compiler-builtins/libm/src/math/fmod.rs
+++ b/library/compiler-builtins/libm/src/math/fmod.rs
@@ -1,25 +1,25 @@
 /// Calculate the remainder of `x / y`, the precise result of `x - trunc(x / y) * y`.
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fmodf16(x: f16, y: f16) -> f16 {
     super::generic::fmod(x, y)
 }
 
 /// Calculate the remainder of `x / y`, the precise result of `x - trunc(x / y) * y`.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fmodf(x: f32, y: f32) -> f32 {
     super::generic::fmod(x, y)
 }
 
 /// Calculate the remainder of `x / y`, the precise result of `x - trunc(x / y) * y`.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fmod(x: f64, y: f64) -> f64 {
     super::generic::fmod(x, y)
 }
 
 /// Calculate the remainder of `x / y`, the precise result of `x - trunc(x / y) * y`.
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn fmodf128(x: f128, y: f128) -> f128 {
     super::generic::fmod(x, y)
 }
diff --git a/library/compiler-builtins/libm/src/math/frexp.rs b/library/compiler-builtins/libm/src/math/frexp.rs
index de7a64fdae1..932111eebc9 100644
--- a/library/compiler-builtins/libm/src/math/frexp.rs
+++ b/library/compiler-builtins/libm/src/math/frexp.rs
@@ -1,4 +1,4 @@
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn frexp(x: f64) -> (f64, i32) {
     let mut y = x.to_bits();
     let ee = ((y >> 52) & 0x7ff) as i32;
diff --git a/library/compiler-builtins/libm/src/math/frexpf.rs b/library/compiler-builtins/libm/src/math/frexpf.rs
index 0ec91c2d350..904bf14f7b8 100644
--- a/library/compiler-builtins/libm/src/math/frexpf.rs
+++ b/library/compiler-builtins/libm/src/math/frexpf.rs
@@ -1,4 +1,4 @@
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn frexpf(x: f32) -> (f32, i32) {
     let mut y = x.to_bits();
     let ee: i32 = ((y >> 23) & 0xff) as i32;
diff --git a/library/compiler-builtins/libm/src/math/hypot.rs b/library/compiler-builtins/libm/src/math/hypot.rs
index da458ea1d05..b92ee18ca11 100644
--- a/library/compiler-builtins/libm/src/math/hypot.rs
+++ b/library/compiler-builtins/libm/src/math/hypot.rs
@@ -17,7 +17,7 @@ fn sq(x: f64) -> (f64, f64) {
     (hi, lo)
 }
 
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn hypot(mut x: f64, mut y: f64) -> f64 {
     let x1p700 = f64::from_bits(0x6bb0000000000000); // 0x1p700 === 2 ^ 700
     let x1p_700 = f64::from_bits(0x1430000000000000); // 0x1p-700 === 2 ^ -700
diff --git a/library/compiler-builtins/libm/src/math/hypotf.rs b/library/compiler-builtins/libm/src/math/hypotf.rs
index 576eebb3343..e7635ffc9a0 100644
--- a/library/compiler-builtins/libm/src/math/hypotf.rs
+++ b/library/compiler-builtins/libm/src/math/hypotf.rs
@@ -2,7 +2,7 @@ use core::f32;
 
 use super::sqrtf;
 
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn hypotf(mut x: f32, mut y: f32) -> f32 {
     let x1p90 = f32::from_bits(0x6c800000); // 0x1p90f === 2 ^ 90
     let x1p_90 = f32::from_bits(0x12800000); // 0x1p-90f === 2 ^ -90
diff --git a/library/compiler-builtins/libm/src/math/ilogb.rs b/library/compiler-builtins/libm/src/math/ilogb.rs
index 5b41f7b1dc0..ef774f6ad3a 100644
--- a/library/compiler-builtins/libm/src/math/ilogb.rs
+++ b/library/compiler-builtins/libm/src/math/ilogb.rs
@@ -1,7 +1,7 @@
 const FP_ILOGBNAN: i32 = -1 - 0x7fffffff;
 const FP_ILOGB0: i32 = FP_ILOGBNAN;
 
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn ilogb(x: f64) -> i32 {
     let mut i: u64 = x.to_bits();
     let e = ((i >> 52) & 0x7ff) as i32;
diff --git a/library/compiler-builtins/libm/src/math/ilogbf.rs b/library/compiler-builtins/libm/src/math/ilogbf.rs
index 3585d6d36f1..5b0cb46ec55 100644
--- a/library/compiler-builtins/libm/src/math/ilogbf.rs
+++ b/library/compiler-builtins/libm/src/math/ilogbf.rs
@@ -1,7 +1,7 @@
 const FP_ILOGBNAN: i32 = -1 - 0x7fffffff;
 const FP_ILOGB0: i32 = FP_ILOGBNAN;
 
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn ilogbf(x: f32) -> i32 {
     let mut i = x.to_bits();
     let e = ((i >> 23) & 0xff) as i32;
diff --git a/library/compiler-builtins/libm/src/math/j0.rs b/library/compiler-builtins/libm/src/math/j0.rs
index 99d656f0d08..7b0800477b3 100644
--- a/library/compiler-builtins/libm/src/math/j0.rs
+++ b/library/compiler-builtins/libm/src/math/j0.rs
@@ -110,7 +110,7 @@ const S03: f64 = 5.13546550207318111446e-07; /* 0x3EA13B54, 0xCE84D5A9 */
 const S04: f64 = 1.16614003333790000205e-09; /* 0x3E1408BC, 0xF4745D8F */
 
 /// Zeroth order of the [Bessel function](https://en.wikipedia.org/wiki/Bessel_function) of the first kind (f64).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn j0(mut x: f64) -> f64 {
     let z: f64;
     let r: f64;
@@ -165,7 +165,7 @@ const V03: f64 = 2.59150851840457805467e-07; /* 0x3E91642D, 0x7FF202FD */
 const V04: f64 = 4.41110311332675467403e-10; /* 0x3DFE5018, 0x3BD6D9EF */
 
 /// Zeroth order of the [Bessel function](https://en.wikipedia.org/wiki/Bessel_function) of the second kind (f64).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn y0(x: f64) -> f64 {
     let z: f64;
     let u: f64;
diff --git a/library/compiler-builtins/libm/src/math/j0f.rs b/library/compiler-builtins/libm/src/math/j0f.rs
index 25e5b325c8c..1c6a7c34462 100644
--- a/library/compiler-builtins/libm/src/math/j0f.rs
+++ b/library/compiler-builtins/libm/src/math/j0f.rs
@@ -63,7 +63,7 @@ const S03: f32 = 5.1354652442e-07; /* 0x3509daa6 */
 const S04: f32 = 1.1661400734e-09; /* 0x30a045e8 */
 
 /// Zeroth order of the [Bessel function](https://en.wikipedia.org/wiki/Bessel_function) of the first kind (f32).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn j0f(mut x: f32) -> f32 {
     let z: f32;
     let r: f32;
@@ -110,7 +110,7 @@ const V03: f32 = 2.5915085189e-07; /* 0x348b216c */
 const V04: f32 = 4.4111031494e-10; /* 0x2ff280c2 */
 
 /// Zeroth order of the [Bessel function](https://en.wikipedia.org/wiki/Bessel_function) of the second kind (f32).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn y0f(x: f32) -> f32 {
     let z: f32;
     let u: f32;
diff --git a/library/compiler-builtins/libm/src/math/j1.rs b/library/compiler-builtins/libm/src/math/j1.rs
index 9b604d9e46e..7d304ba10b7 100644
--- a/library/compiler-builtins/libm/src/math/j1.rs
+++ b/library/compiler-builtins/libm/src/math/j1.rs
@@ -114,7 +114,7 @@ const S04: f64 = 5.04636257076217042715e-09; /* 0x3E35AC88, 0xC97DFF2C */
 const S05: f64 = 1.23542274426137913908e-11; /* 0x3DAB2ACF, 0xCFB97ED8 */
 
 /// First order of the [Bessel function](https://en.wikipedia.org/wiki/Bessel_function) of the first kind (f64).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn j1(x: f64) -> f64 {
     let mut z: f64;
     let r: f64;
@@ -161,7 +161,7 @@ const V0: [f64; 5] = [
 ];
 
 /// First order of the [Bessel function](https://en.wikipedia.org/wiki/Bessel_function) of the second kind (f64).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn y1(x: f64) -> f64 {
     let z: f64;
     let u: f64;
diff --git a/library/compiler-builtins/libm/src/math/j1f.rs b/library/compiler-builtins/libm/src/math/j1f.rs
index a47472401ee..cd829c1aa12 100644
--- a/library/compiler-builtins/libm/src/math/j1f.rs
+++ b/library/compiler-builtins/libm/src/math/j1f.rs
@@ -64,7 +64,7 @@ const S04: f32 = 5.0463624390e-09; /* 0x31ad6446 */
 const S05: f32 = 1.2354227016e-11; /* 0x2d59567e */
 
 /// First order of the [Bessel function](https://en.wikipedia.org/wiki/Bessel_function) of the first kind (f32).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn j1f(x: f32) -> f32 {
     let mut z: f32;
     let r: f32;
@@ -110,7 +110,7 @@ const V0: [f32; 5] = [
 ];
 
 /// First order of the [Bessel function](https://en.wikipedia.org/wiki/Bessel_function) of the second kind (f32).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn y1f(x: f32) -> f32 {
     let z: f32;
     let u: f32;
@@ -361,8 +361,6 @@ fn qonef(x: f32) -> f32 {
     return (0.375 + r / s) / x;
 }
 
-// PowerPC tests are failing on LLVM 13: https://github.com/rust-lang/rust/issues/88520
-#[cfg(not(target_arch = "powerpc64"))]
 #[cfg(test)]
 mod tests {
     use super::{j1f, y1f};
@@ -371,6 +369,7 @@ mod tests {
         // 0x401F3E49
         assert_eq!(j1f(2.4881766_f32), 0.49999475_f32);
     }
+
     #[test]
     fn test_y1f_2002() {
         //allow slightly different result on x87
diff --git a/library/compiler-builtins/libm/src/math/jn.rs b/library/compiler-builtins/libm/src/math/jn.rs
index 31f8d9c5382..b87aeaf1cc3 100644
--- a/library/compiler-builtins/libm/src/math/jn.rs
+++ b/library/compiler-builtins/libm/src/math/jn.rs
@@ -39,7 +39,7 @@ use super::{cos, fabs, get_high_word, get_low_word, j0, j1, log, sin, sqrt, y0,
 const INVSQRTPI: f64 = 5.64189583547756279280e-01; /* 0x3FE20DD7, 0x50429B6D */
 
 /// Integer order of the [Bessel function](https://en.wikipedia.org/wiki/Bessel_function) of the first kind (f64).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn jn(n: i32, mut x: f64) -> f64 {
     let mut ix: u32;
     let lx: u32;
@@ -249,7 +249,7 @@ pub fn jn(n: i32, mut x: f64) -> f64 {
 }
 
 /// Integer order of the [Bessel function](https://en.wikipedia.org/wiki/Bessel_function) of the second kind (f64).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn yn(n: i32, x: f64) -> f64 {
     let mut ix: u32;
     let lx: u32;
diff --git a/library/compiler-builtins/libm/src/math/jnf.rs b/library/compiler-builtins/libm/src/math/jnf.rs
index 52cf7d8a8bd..34fdc5112dc 100644
--- a/library/compiler-builtins/libm/src/math/jnf.rs
+++ b/library/compiler-builtins/libm/src/math/jnf.rs
@@ -16,7 +16,7 @@
 use super::{fabsf, j0f, j1f, logf, y0f, y1f};
 
 /// Integer order of the [Bessel function](https://en.wikipedia.org/wiki/Bessel_function) of the first kind (f32).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn jnf(n: i32, mut x: f32) -> f32 {
     let mut ix: u32;
     let mut nm1: i32;
@@ -192,7 +192,7 @@ pub fn jnf(n: i32, mut x: f32) -> f32 {
 }
 
 /// Integer order of the [Bessel function](https://en.wikipedia.org/wiki/Bessel_function) of the second kind (f32).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn ynf(n: i32, x: f32) -> f32 {
     let mut ix: u32;
     let mut ib: u32;
diff --git a/library/compiler-builtins/libm/src/math/k_cos.rs b/library/compiler-builtins/libm/src/math/k_cos.rs
index 49b2fc64d86..1a2ebabe334 100644
--- a/library/compiler-builtins/libm/src/math/k_cos.rs
+++ b/library/compiler-builtins/libm/src/math/k_cos.rs
@@ -51,7 +51,7 @@ const C6: f64 = -1.13596475577881948265e-11; /* 0xBDA8FAE9, 0xBE8838D4 */
 //         expression for cos().  Retention happens in all cases tested
 //         under FreeBSD, so don't pessimize things by forcibly clipping
 //         any extra precision in w.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub(crate) fn k_cos(x: f64, y: f64) -> f64 {
     let z = x * x;
     let w = z * z;
diff --git a/library/compiler-builtins/libm/src/math/k_cosf.rs b/library/compiler-builtins/libm/src/math/k_cosf.rs
index e99f2348c00..68f568c2425 100644
--- a/library/compiler-builtins/libm/src/math/k_cosf.rs
+++ b/library/compiler-builtins/libm/src/math/k_cosf.rs
@@ -20,7 +20,7 @@ const C1: f64 = 0.0416666233237390631894; /*  0x155553e1053a42.0p-57 */
 const C2: f64 = -0.00138867637746099294692; /* -0x16c087e80f1e27.0p-62 */
 const C3: f64 = 0.0000243904487962774090654; /*  0x199342e0ee5069.0p-68 */
 
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub(crate) fn k_cosf(x: f64) -> f32 {
     let z = x * x;
     let w = z * z;
diff --git a/library/compiler-builtins/libm/src/math/k_expo2.rs b/library/compiler-builtins/libm/src/math/k_expo2.rs
index 7345075f376..7b63952d255 100644
--- a/library/compiler-builtins/libm/src/math/k_expo2.rs
+++ b/library/compiler-builtins/libm/src/math/k_expo2.rs
@@ -4,7 +4,7 @@ use super::exp;
 const K: i32 = 2043;
 
 /* expf(x)/2 for x >= log(FLT_MAX), slightly better than 0.5f*expf(x/2)*expf(x/2) */
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub(crate) fn k_expo2(x: f64) -> f64 {
     let k_ln2 = f64::from_bits(0x40962066151add8b);
     /* note that k is odd and scale*scale overflows */
diff --git a/library/compiler-builtins/libm/src/math/k_expo2f.rs b/library/compiler-builtins/libm/src/math/k_expo2f.rs
index fbd7b27d583..02213cec454 100644
--- a/library/compiler-builtins/libm/src/math/k_expo2f.rs
+++ b/library/compiler-builtins/libm/src/math/k_expo2f.rs
@@ -4,7 +4,7 @@ use super::expf;
 const K: i32 = 235;
 
 /* expf(x)/2 for x >= log(FLT_MAX), slightly better than 0.5f*expf(x/2)*expf(x/2) */
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub(crate) fn k_expo2f(x: f32) -> f32 {
     let k_ln2 = f32::from_bits(0x4322e3bc);
     /* note that k is odd and scale*scale overflows */
diff --git a/library/compiler-builtins/libm/src/math/k_sin.rs b/library/compiler-builtins/libm/src/math/k_sin.rs
index 9dd96c94474..2f854294513 100644
--- a/library/compiler-builtins/libm/src/math/k_sin.rs
+++ b/library/compiler-builtins/libm/src/math/k_sin.rs
@@ -43,7 +43,7 @@ const S6: f64 = 1.58969099521155010221e-10; /* 0x3DE5D93A, 0x5ACFD57C */
 //              r = x *(S2+x *(S3+x *(S4+x *(S5+x *S6))))
 //         then                   3    2
 //              sin(x) = x + (S1*x + (x *(r-y/2)+y))
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub(crate) fn k_sin(x: f64, y: f64, iy: i32) -> f64 {
     let z = x * x;
     let w = z * z;
diff --git a/library/compiler-builtins/libm/src/math/k_sinf.rs b/library/compiler-builtins/libm/src/math/k_sinf.rs
index 88d10cababc..297d88bbbbe 100644
--- a/library/compiler-builtins/libm/src/math/k_sinf.rs
+++ b/library/compiler-builtins/libm/src/math/k_sinf.rs
@@ -20,7 +20,7 @@ const S2: f64 = 0.0083333293858894631756; /*  0x111110896efbb2.0p-59 */
 const S3: f64 = -0.000198393348360966317347; /* -0x1a00f9e2cae774.0p-65 */
 const S4: f64 = 0.0000027183114939898219064; /*  0x16cd878c3b46a7.0p-71 */
 
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub(crate) fn k_sinf(x: f64) -> f32 {
     let z = x * x;
     let w = z * z;
diff --git a/library/compiler-builtins/libm/src/math/k_tan.rs b/library/compiler-builtins/libm/src/math/k_tan.rs
index d177010bb0a..ac48d661fd6 100644
--- a/library/compiler-builtins/libm/src/math/k_tan.rs
+++ b/library/compiler-builtins/libm/src/math/k_tan.rs
@@ -58,7 +58,7 @@ static T: [f64; 13] = [
 const PIO4: f64 = 7.85398163397448278999e-01; /* 3FE921FB, 54442D18 */
 const PIO4_LO: f64 = 3.06161699786838301793e-17; /* 3C81A626, 33145C07 */
 
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub(crate) fn k_tan(mut x: f64, mut y: f64, odd: i32) -> f64 {
     let hx = (f64::to_bits(x) >> 32) as u32;
     let big = (hx & 0x7fffffff) >= 0x3FE59428; /* |x| >= 0.6744 */
diff --git a/library/compiler-builtins/libm/src/math/k_tanf.rs b/library/compiler-builtins/libm/src/math/k_tanf.rs
index af8db539dad..79382f57bf6 100644
--- a/library/compiler-builtins/libm/src/math/k_tanf.rs
+++ b/library/compiler-builtins/libm/src/math/k_tanf.rs
@@ -19,7 +19,7 @@ const T: [f64; 6] = [
     0.00946564784943673166728, /* 0x1362b9bf971bcd.0p-59 */
 ];
 
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub(crate) fn k_tanf(x: f64, odd: bool) -> f32 {
     let z = x * x;
     /*
diff --git a/library/compiler-builtins/libm/src/math/ldexp.rs b/library/compiler-builtins/libm/src/math/ldexp.rs
index 24899ba306a..b32b8d5241b 100644
--- a/library/compiler-builtins/libm/src/math/ldexp.rs
+++ b/library/compiler-builtins/libm/src/math/ldexp.rs
@@ -1,21 +1,21 @@
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn ldexpf16(x: f16, n: i32) -> f16 {
     super::scalbnf16(x, n)
 }
 
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn ldexpf(x: f32, n: i32) -> f32 {
     super::scalbnf(x, n)
 }
 
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn ldexp(x: f64, n: i32) -> f64 {
     super::scalbn(x, n)
 }
 
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn ldexpf128(x: f128, n: i32) -> f128 {
     super::scalbnf128(x, n)
 }
diff --git a/library/compiler-builtins/libm/src/math/lgamma.rs b/library/compiler-builtins/libm/src/math/lgamma.rs
index 8312dc18648..da7ce5c983b 100644
--- a/library/compiler-builtins/libm/src/math/lgamma.rs
+++ b/library/compiler-builtins/libm/src/math/lgamma.rs
@@ -2,7 +2,7 @@ use super::lgamma_r;
 
 /// The natural logarithm of the
 /// [Gamma function](https://en.wikipedia.org/wiki/Gamma_function) (f64).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn lgamma(x: f64) -> f64 {
     lgamma_r(x).0
 }
diff --git a/library/compiler-builtins/libm/src/math/lgamma_r.rs b/library/compiler-builtins/libm/src/math/lgamma_r.rs
index 6becaad2ce9..38eb270f683 100644
--- a/library/compiler-builtins/libm/src/math/lgamma_r.rs
+++ b/library/compiler-builtins/libm/src/math/lgamma_r.rs
@@ -165,7 +165,7 @@ fn sin_pi(mut x: f64) -> f64 {
     }
 }
 
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn lgamma_r(mut x: f64) -> (f64, i32) {
     let u: u64 = x.to_bits();
     let mut t: f64;
diff --git a/library/compiler-builtins/libm/src/math/lgammaf.rs b/library/compiler-builtins/libm/src/math/lgammaf.rs
index d37512397cb..920acfed2a0 100644
--- a/library/compiler-builtins/libm/src/math/lgammaf.rs
+++ b/library/compiler-builtins/libm/src/math/lgammaf.rs
@@ -2,7 +2,7 @@ use super::lgammaf_r;
 
 /// The natural logarithm of the
 /// [Gamma function](https://en.wikipedia.org/wiki/Gamma_function) (f32).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn lgammaf(x: f32) -> f32 {
     lgammaf_r(x).0
 }
diff --git a/library/compiler-builtins/libm/src/math/lgammaf_r.rs b/library/compiler-builtins/libm/src/math/lgammaf_r.rs
index 10cecee541c..a0b6a678a67 100644
--- a/library/compiler-builtins/libm/src/math/lgammaf_r.rs
+++ b/library/compiler-builtins/libm/src/math/lgammaf_r.rs
@@ -100,7 +100,7 @@ fn sin_pi(mut x: f32) -> f32 {
     }
 }
 
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn lgammaf_r(mut x: f32) -> (f32, i32) {
     let u = x.to_bits();
     let mut t: f32;
diff --git a/library/compiler-builtins/libm/src/math/log.rs b/library/compiler-builtins/libm/src/math/log.rs
index f2dc47ec5cc..9499c56d8ad 100644
--- a/library/compiler-builtins/libm/src/math/log.rs
+++ b/library/compiler-builtins/libm/src/math/log.rs
@@ -71,7 +71,7 @@ const LG6: f64 = 1.531383769920937332e-01; /* 3FC39A09 D078C69F */
 const LG7: f64 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
 
 /// The natural logarithm of `x` (f64).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn log(mut x: f64) -> f64 {
     let x1p54 = f64::from_bits(0x4350000000000000); // 0x1p54 === 2 ^ 54
 
diff --git a/library/compiler-builtins/libm/src/math/log10.rs b/library/compiler-builtins/libm/src/math/log10.rs
index 8c9d68c492d..29f25d944af 100644
--- a/library/compiler-builtins/libm/src/math/log10.rs
+++ b/library/compiler-builtins/libm/src/math/log10.rs
@@ -32,7 +32,7 @@ const LG6: f64 = 1.531383769920937332e-01; /* 3FC39A09 D078C69F */
 const LG7: f64 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
 
 /// The base 10 logarithm of `x` (f64).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn log10(mut x: f64) -> f64 {
     let x1p54 = f64::from_bits(0x4350000000000000); // 0x1p54 === 2 ^ 54
 
diff --git a/library/compiler-builtins/libm/src/math/log10f.rs b/library/compiler-builtins/libm/src/math/log10f.rs
index 18bf8fcc832..f89584bf9c9 100644
--- a/library/compiler-builtins/libm/src/math/log10f.rs
+++ b/library/compiler-builtins/libm/src/math/log10f.rs
@@ -26,7 +26,7 @@ const LG3: f32 = 0.28498786688; /* 0x91e9ee.0p-25 */
 const LG4: f32 = 0.24279078841; /* 0xf89e26.0p-26 */
 
 /// The base 10 logarithm of `x` (f32).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn log10f(mut x: f32) -> f32 {
     let x1p25f = f32::from_bits(0x4c000000); // 0x1p25f === 2 ^ 25
 
diff --git a/library/compiler-builtins/libm/src/math/log1p.rs b/library/compiler-builtins/libm/src/math/log1p.rs
index 65142c0d622..c991cce60df 100644
--- a/library/compiler-builtins/libm/src/math/log1p.rs
+++ b/library/compiler-builtins/libm/src/math/log1p.rs
@@ -66,7 +66,7 @@ const LG6: f64 = 1.531383769920937332e-01; /* 3FC39A09 D078C69F */
 const LG7: f64 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
 
 /// The natural logarithm of 1+`x` (f64).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn log1p(x: f64) -> f64 {
     let mut ui: u64 = x.to_bits();
     let hfsq: f64;
diff --git a/library/compiler-builtins/libm/src/math/log1pf.rs b/library/compiler-builtins/libm/src/math/log1pf.rs
index 23978e61c3c..89a92fac98e 100644
--- a/library/compiler-builtins/libm/src/math/log1pf.rs
+++ b/library/compiler-builtins/libm/src/math/log1pf.rs
@@ -21,7 +21,7 @@ const LG3: f32 = 0.28498786688; /* 0x91e9ee.0p-25 */
 const LG4: f32 = 0.24279078841; /* 0xf89e26.0p-26 */
 
 /// The natural logarithm of 1+`x` (f32).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn log1pf(x: f32) -> f32 {
     let mut ui: u32 = x.to_bits();
     let hfsq: f32;
diff --git a/library/compiler-builtins/libm/src/math/log2.rs b/library/compiler-builtins/libm/src/math/log2.rs
index 701f63c25e7..9b750c9a2a6 100644
--- a/library/compiler-builtins/libm/src/math/log2.rs
+++ b/library/compiler-builtins/libm/src/math/log2.rs
@@ -30,7 +30,7 @@ const LG6: f64 = 1.531383769920937332e-01; /* 3FC39A09 D078C69F */
 const LG7: f64 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
 
 /// The base 2 logarithm of `x` (f64).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn log2(mut x: f64) -> f64 {
     let x1p54 = f64::from_bits(0x4350000000000000); // 0x1p54 === 2 ^ 54
 
diff --git a/library/compiler-builtins/libm/src/math/log2f.rs b/library/compiler-builtins/libm/src/math/log2f.rs
index 5ba2427d1d4..0e5177d7afa 100644
--- a/library/compiler-builtins/libm/src/math/log2f.rs
+++ b/library/compiler-builtins/libm/src/math/log2f.rs
@@ -24,7 +24,7 @@ const LG3: f32 = 0.28498786688; /* 0x91e9ee.0p-25 */
 const LG4: f32 = 0.24279078841; /* 0xf89e26.0p-26 */
 
 /// The base 2 logarithm of `x` (f32).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn log2f(mut x: f32) -> f32 {
     let x1p25f = f32::from_bits(0x4c000000); // 0x1p25f === 2 ^ 25
 
diff --git a/library/compiler-builtins/libm/src/math/logf.rs b/library/compiler-builtins/libm/src/math/logf.rs
index 68d1943025e..cd7a7b0ba00 100644
--- a/library/compiler-builtins/libm/src/math/logf.rs
+++ b/library/compiler-builtins/libm/src/math/logf.rs
@@ -22,7 +22,7 @@ const LG3: f32 = 0.28498786688; /*  0x91e9ee.0p-25 */
 const LG4: f32 = 0.24279078841; /*  0xf89e26.0p-26 */
 
 /// The natural logarithm of `x` (f32).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn logf(mut x: f32) -> f32 {
     let x1p25 = f32::from_bits(0x4c000000); // 0x1p25f === 2 ^ 25
 
diff --git a/library/compiler-builtins/libm/src/math/mod.rs b/library/compiler-builtins/libm/src/math/mod.rs
index ce9b8fc58bb..8eecfe5667d 100644
--- a/library/compiler-builtins/libm/src/math/mod.rs
+++ b/library/compiler-builtins/libm/src/math/mod.rs
@@ -1,3 +1,5 @@
+#![allow(clippy::approx_constant)] // many false positives
+
 macro_rules! force_eval {
     ($e:expr) => {
         unsafe { ::core::ptr::read_volatile(&$e) }
diff --git a/library/compiler-builtins/libm/src/math/modf.rs b/library/compiler-builtins/libm/src/math/modf.rs
index 6541862cdd9..a92a83dc5d1 100644
--- a/library/compiler-builtins/libm/src/math/modf.rs
+++ b/library/compiler-builtins/libm/src/math/modf.rs
@@ -1,4 +1,4 @@
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn modf(x: f64) -> (f64, f64) {
     let rv2: f64;
     let mut u = x.to_bits();
diff --git a/library/compiler-builtins/libm/src/math/modff.rs b/library/compiler-builtins/libm/src/math/modff.rs
index 90c6bca7d8d..691f351ca8d 100644
--- a/library/compiler-builtins/libm/src/math/modff.rs
+++ b/library/compiler-builtins/libm/src/math/modff.rs
@@ -1,4 +1,4 @@
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn modff(x: f32) -> (f32, f32) {
     let rv2: f32;
     let mut u: u32 = x.to_bits();
diff --git a/library/compiler-builtins/libm/src/math/nextafter.rs b/library/compiler-builtins/libm/src/math/nextafter.rs
index c991ff6f233..f4408468cc9 100644
--- a/library/compiler-builtins/libm/src/math/nextafter.rs
+++ b/library/compiler-builtins/libm/src/math/nextafter.rs
@@ -1,4 +1,4 @@
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn nextafter(x: f64, y: f64) -> f64 {
     if x.is_nan() || y.is_nan() {
         return x + y;
diff --git a/library/compiler-builtins/libm/src/math/nextafterf.rs b/library/compiler-builtins/libm/src/math/nextafterf.rs
index 8ba3833562f..c15eb9de281 100644
--- a/library/compiler-builtins/libm/src/math/nextafterf.rs
+++ b/library/compiler-builtins/libm/src/math/nextafterf.rs
@@ -1,4 +1,4 @@
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn nextafterf(x: f32, y: f32) -> f32 {
     if x.is_nan() || y.is_nan() {
         return x + y;
diff --git a/library/compiler-builtins/libm/src/math/pow.rs b/library/compiler-builtins/libm/src/math/pow.rs
index 94ae31cf0da..914d68cfce1 100644
--- a/library/compiler-builtins/libm/src/math/pow.rs
+++ b/library/compiler-builtins/libm/src/math/pow.rs
@@ -90,7 +90,7 @@ const IVLN2_H: f64 = 1.44269502162933349609e+00; /* 0x3ff71547_60000000 =24b 1/l
 const IVLN2_L: f64 = 1.92596299112661746887e-08; /* 0x3e54ae0b_f85ddf44 =1/ln2 tail*/
 
 /// Returns `x` to the power of `y` (f64).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn pow(x: f64, y: f64) -> f64 {
     let t1: f64;
     let t2: f64;
diff --git a/library/compiler-builtins/libm/src/math/powf.rs b/library/compiler-builtins/libm/src/math/powf.rs
index 11c7a7cbd94..17772ae872d 100644
--- a/library/compiler-builtins/libm/src/math/powf.rs
+++ b/library/compiler-builtins/libm/src/math/powf.rs
@@ -46,7 +46,7 @@ const IVLN2_H: f32 = 1.4426879883e+00;
 const IVLN2_L: f32 = 7.0526075433e-06;
 
 /// Returns `x` to the power of `y` (f32).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn powf(x: f32, y: f32) -> f32 {
     let mut z: f32;
     let mut ax: f32;
diff --git a/library/compiler-builtins/libm/src/math/rem_pio2.rs b/library/compiler-builtins/libm/src/math/rem_pio2.rs
index d677fd9dcb3..61b1030275a 100644
--- a/library/compiler-builtins/libm/src/math/rem_pio2.rs
+++ b/library/compiler-builtins/libm/src/math/rem_pio2.rs
@@ -41,7 +41,7 @@ const PIO2_3T: f64 = 8.47842766036889956997e-32; /* 0x397B839A, 0x252049C1 */
 // use rem_pio2_large() for large x
 //
 // caller must handle the case when reduction is not needed: |x| ~<= pi/4 */
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub(crate) fn rem_pio2(x: f64) -> (i32, f64, f64) {
     let x1p24 = f64::from_bits(0x4170000000000000);
 
@@ -195,7 +195,7 @@ mod tests {
 
     #[test]
     // FIXME(correctness): inaccurate results on i586
-    #[cfg_attr(all(target_arch = "x86", not(target_feature = "sse")), ignore)]
+    #[cfg_attr(x86_no_sse, ignore)]
     fn test_near_pi() {
         let arg = 3.141592025756836;
         let arg = force_eval!(arg);
diff --git a/library/compiler-builtins/libm/src/math/rem_pio2_large.rs b/library/compiler-builtins/libm/src/math/rem_pio2_large.rs
index 6d679bbe98c..f1fdf3673a8 100644
--- a/library/compiler-builtins/libm/src/math/rem_pio2_large.rs
+++ b/library/compiler-builtins/libm/src/math/rem_pio2_large.rs
@@ -11,7 +11,7 @@
  * ====================================================
  */
 
-use super::{floor, scalbn};
+use super::scalbn;
 
 // initial value for jk
 const INIT_JK: [usize; 4] = [3, 4, 4, 6];
@@ -221,8 +221,16 @@ const PIO2: [f64; 8] = [
 /// skip the part of the product that are known to be a huge integer (
 /// more accurately, = 0 mod 8 ). Thus the number of operations are
 /// independent of the exponent of the input.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub(crate) fn rem_pio2_large(x: &[f64], y: &mut [f64], e0: i32, prec: usize) -> i32 {
+    // FIXME(rust-lang/rust#144518): Inline assembly would cause `no_panic` to fail
+    // on the callers of this function. As a workaround, avoid inlining `floor` here
+    // when implemented with assembly.
+    #[cfg_attr(x86_no_sse, inline(never))]
+    extern "C" fn floor(x: f64) -> f64 {
+        super::floor(x)
+    }
+
     let x1p24 = f64::from_bits(0x4170000000000000); // 0x1p24 === 2 ^ 24
     let x1p_24 = f64::from_bits(0x3e70000000000000); // 0x1p_24 === 2 ^ (-24)
 
diff --git a/library/compiler-builtins/libm/src/math/rem_pio2f.rs b/library/compiler-builtins/libm/src/math/rem_pio2f.rs
index 3c658fe3dbc..0472a10355a 100644
--- a/library/compiler-builtins/libm/src/math/rem_pio2f.rs
+++ b/library/compiler-builtins/libm/src/math/rem_pio2f.rs
@@ -31,7 +31,7 @@ const PIO2_1T: f64 = 1.58932547735281966916e-08; /* 0x3E5110b4, 0x611A6263 */
 ///
 /// use double precision for everything except passing x
 /// use __rem_pio2_large() for large x
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub(crate) fn rem_pio2f(x: f32) -> (i32, f64) {
     let x64 = x as f64;
 
diff --git a/library/compiler-builtins/libm/src/math/remainder.rs b/library/compiler-builtins/libm/src/math/remainder.rs
index 9e966c9ed7f..54152df32f1 100644
--- a/library/compiler-builtins/libm/src/math/remainder.rs
+++ b/library/compiler-builtins/libm/src/math/remainder.rs
@@ -1,4 +1,4 @@
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn remainder(x: f64, y: f64) -> f64 {
     let (result, _) = super::remquo(x, y);
     result
diff --git a/library/compiler-builtins/libm/src/math/remainderf.rs b/library/compiler-builtins/libm/src/math/remainderf.rs
index b1407cf2ace..21f62921428 100644
--- a/library/compiler-builtins/libm/src/math/remainderf.rs
+++ b/library/compiler-builtins/libm/src/math/remainderf.rs
@@ -1,4 +1,4 @@
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn remainderf(x: f32, y: f32) -> f32 {
     let (result, _) = super::remquof(x, y);
     result
diff --git a/library/compiler-builtins/libm/src/math/remquo.rs b/library/compiler-builtins/libm/src/math/remquo.rs
index 4c11e848746..f13b092373e 100644
--- a/library/compiler-builtins/libm/src/math/remquo.rs
+++ b/library/compiler-builtins/libm/src/math/remquo.rs
@@ -1,4 +1,4 @@
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn remquo(mut x: f64, mut y: f64) -> (f64, i32) {
     let ux: u64 = x.to_bits();
     let mut uy: u64 = y.to_bits();
diff --git a/library/compiler-builtins/libm/src/math/remquof.rs b/library/compiler-builtins/libm/src/math/remquof.rs
index b0e85ca6611..cc7863a096f 100644
--- a/library/compiler-builtins/libm/src/math/remquof.rs
+++ b/library/compiler-builtins/libm/src/math/remquof.rs
@@ -1,4 +1,4 @@
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn remquof(mut x: f32, mut y: f32) -> (f32, i32) {
     let ux: u32 = x.to_bits();
     let mut uy: u32 = y.to_bits();
diff --git a/library/compiler-builtins/libm/src/math/rint.rs b/library/compiler-builtins/libm/src/math/rint.rs
index e1c32c94355..011a7ae3d60 100644
--- a/library/compiler-builtins/libm/src/math/rint.rs
+++ b/library/compiler-builtins/libm/src/math/rint.rs
@@ -2,7 +2,7 @@ use super::support::Round;
 
 /// Round `x` to the nearest integer, breaking ties toward even.
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn rintf16(x: f16) -> f16 {
     select_implementation! {
         name: rintf16,
@@ -14,7 +14,7 @@ pub fn rintf16(x: f16) -> f16 {
 }
 
 /// Round `x` to the nearest integer, breaking ties toward even.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn rintf(x: f32) -> f32 {
     select_implementation! {
         name: rintf,
@@ -29,7 +29,7 @@ pub fn rintf(x: f32) -> f32 {
 }
 
 /// Round `x` to the nearest integer, breaking ties toward even.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn rint(x: f64) -> f64 {
     select_implementation! {
         name: rint,
@@ -45,7 +45,7 @@ pub fn rint(x: f64) -> f64 {
 
 /// Round `x` to the nearest integer, breaking ties toward even.
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn rintf128(x: f128) -> f128 {
     super::generic::rint_round(x, Round::Nearest).val
 }
diff --git a/library/compiler-builtins/libm/src/math/round.rs b/library/compiler-builtins/libm/src/math/round.rs
index 6cd091cd73c..256197e6ccb 100644
--- a/library/compiler-builtins/libm/src/math/round.rs
+++ b/library/compiler-builtins/libm/src/math/round.rs
@@ -1,25 +1,25 @@
 /// Round `x` to the nearest integer, breaking ties away from zero.
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn roundf16(x: f16) -> f16 {
     super::generic::round(x)
 }
 
 /// Round `x` to the nearest integer, breaking ties away from zero.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn roundf(x: f32) -> f32 {
     super::generic::round(x)
 }
 
 /// Round `x` to the nearest integer, breaking ties away from zero.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn round(x: f64) -> f64 {
     super::generic::round(x)
 }
 
 /// Round `x` to the nearest integer, breaking ties away from zero.
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn roundf128(x: f128) -> f128 {
     super::generic::round(x)
 }
diff --git a/library/compiler-builtins/libm/src/math/roundeven.rs b/library/compiler-builtins/libm/src/math/roundeven.rs
index 6e621d7628f..f0d67d41076 100644
--- a/library/compiler-builtins/libm/src/math/roundeven.rs
+++ b/library/compiler-builtins/libm/src/math/roundeven.rs
@@ -3,21 +3,21 @@ use super::support::{Float, Round};
 /// Round `x` to the nearest integer, breaking ties toward even. This is IEEE 754
 /// `roundToIntegralTiesToEven`.
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn roundevenf16(x: f16) -> f16 {
     roundeven_impl(x)
 }
 
 /// Round `x` to the nearest integer, breaking ties toward even. This is IEEE 754
 /// `roundToIntegralTiesToEven`.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn roundevenf(x: f32) -> f32 {
     roundeven_impl(x)
 }
 
 /// Round `x` to the nearest integer, breaking ties toward even. This is IEEE 754
 /// `roundToIntegralTiesToEven`.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn roundeven(x: f64) -> f64 {
     roundeven_impl(x)
 }
@@ -25,7 +25,7 @@ pub fn roundeven(x: f64) -> f64 {
 /// Round `x` to the nearest integer, breaking ties toward even. This is IEEE 754
 /// `roundToIntegralTiesToEven`.
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn roundevenf128(x: f128) -> f128 {
     roundeven_impl(x)
 }
diff --git a/library/compiler-builtins/libm/src/math/scalbn.rs b/library/compiler-builtins/libm/src/math/scalbn.rs
index ed73c3f94f0..f1a67cb7f82 100644
--- a/library/compiler-builtins/libm/src/math/scalbn.rs
+++ b/library/compiler-builtins/libm/src/math/scalbn.rs
@@ -1,21 +1,21 @@
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn scalbnf16(x: f16, n: i32) -> f16 {
     super::generic::scalbn(x, n)
 }
 
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn scalbnf(x: f32, n: i32) -> f32 {
     super::generic::scalbn(x, n)
 }
 
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn scalbn(x: f64, n: i32) -> f64 {
     super::generic::scalbn(x, n)
 }
 
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn scalbnf128(x: f128, n: i32) -> f128 {
     super::generic::scalbn(x, n)
 }
diff --git a/library/compiler-builtins/libm/src/math/sin.rs b/library/compiler-builtins/libm/src/math/sin.rs
index 229fa4bef08..5378a7bc387 100644
--- a/library/compiler-builtins/libm/src/math/sin.rs
+++ b/library/compiler-builtins/libm/src/math/sin.rs
@@ -44,7 +44,7 @@ use super::{k_cos, k_sin, rem_pio2};
 /// The sine of `x` (f64).
 ///
 /// `x` is specified in radians.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn sin(x: f64) -> f64 {
     let x1p120 = f64::from_bits(0x4770000000000000); // 0x1p120f === 2 ^ 120
 
diff --git a/library/compiler-builtins/libm/src/math/sincos.rs b/library/compiler-builtins/libm/src/math/sincos.rs
index ebf482f2df3..a364f73759d 100644
--- a/library/compiler-builtins/libm/src/math/sincos.rs
+++ b/library/compiler-builtins/libm/src/math/sincos.rs
@@ -15,7 +15,7 @@ use super::{get_high_word, k_cos, k_sin, rem_pio2};
 /// Both the sine and cosine of `x` (f64).
 ///
 /// `x` is specified in radians and the return value is (sin(x), cos(x)).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn sincos(x: f64) -> (f64, f64) {
     let s: f64;
     let c: f64;
diff --git a/library/compiler-builtins/libm/src/math/sincosf.rs b/library/compiler-builtins/libm/src/math/sincosf.rs
index f3360767683..c4beb5267f2 100644
--- a/library/compiler-builtins/libm/src/math/sincosf.rs
+++ b/library/compiler-builtins/libm/src/math/sincosf.rs
@@ -26,7 +26,7 @@ const S4PIO2: f64 = 4.0 * PI_2; /* 0x401921FB, 0x54442D18 */
 /// Both the sine and cosine of `x` (f32).
 ///
 /// `x` is specified in radians and the return value is (sin(x), cos(x)).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn sincosf(x: f32) -> (f32, f32) {
     let s: f32;
     let c: f32;
diff --git a/library/compiler-builtins/libm/src/math/sinf.rs b/library/compiler-builtins/libm/src/math/sinf.rs
index 709b63fcf29..b4edf6769d3 100644
--- a/library/compiler-builtins/libm/src/math/sinf.rs
+++ b/library/compiler-builtins/libm/src/math/sinf.rs
@@ -27,7 +27,7 @@ const S4_PIO2: f64 = 4. * FRAC_PI_2; /* 0x401921FB, 0x54442D18 */
 /// The sine of `x` (f32).
 ///
 /// `x` is specified in radians.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn sinf(x: f32) -> f32 {
     let x64 = x as f64;
 
diff --git a/library/compiler-builtins/libm/src/math/sinh.rs b/library/compiler-builtins/libm/src/math/sinh.rs
index 79184198263..900dd6ca4d8 100644
--- a/library/compiler-builtins/libm/src/math/sinh.rs
+++ b/library/compiler-builtins/libm/src/math/sinh.rs
@@ -6,7 +6,7 @@ use super::{expm1, expo2};
 //
 
 /// The hyperbolic sine of `x` (f64).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn sinh(x: f64) -> f64 {
     // union {double f; uint64_t i;} u = {.f = x};
     // uint32_t w;
diff --git a/library/compiler-builtins/libm/src/math/sinhf.rs b/library/compiler-builtins/libm/src/math/sinhf.rs
index 44d2e3560d5..501acea3028 100644
--- a/library/compiler-builtins/libm/src/math/sinhf.rs
+++ b/library/compiler-builtins/libm/src/math/sinhf.rs
@@ -1,7 +1,7 @@
 use super::{expm1f, k_expo2f};
 
 /// The hyperbolic sine of `x` (f32).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn sinhf(x: f32) -> f32 {
     let mut h = 0.5f32;
     let mut ix = x.to_bits();
diff --git a/library/compiler-builtins/libm/src/math/sqrt.rs b/library/compiler-builtins/libm/src/math/sqrt.rs
index 76bc240cf01..7ba1bc9b32b 100644
--- a/library/compiler-builtins/libm/src/math/sqrt.rs
+++ b/library/compiler-builtins/libm/src/math/sqrt.rs
@@ -1,6 +1,6 @@
 /// The square root of `x` (f16).
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn sqrtf16(x: f16) -> f16 {
     select_implementation! {
         name: sqrtf16,
@@ -12,7 +12,7 @@ pub fn sqrtf16(x: f16) -> f16 {
 }
 
 /// The square root of `x` (f32).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn sqrtf(x: f32) -> f32 {
     select_implementation! {
         name: sqrtf,
@@ -28,7 +28,7 @@ pub fn sqrtf(x: f32) -> f32 {
 }
 
 /// The square root of `x` (f64).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn sqrt(x: f64) -> f64 {
     select_implementation! {
         name: sqrt,
@@ -45,7 +45,7 @@ pub fn sqrt(x: f64) -> f64 {
 
 /// The square root of `x` (f128).
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn sqrtf128(x: f128) -> f128 {
     return super::generic::sqrt(x);
 }
diff --git a/library/compiler-builtins/libm/src/math/support/mod.rs b/library/compiler-builtins/libm/src/math/support/mod.rs
index 2e7edd03c42..b2d7bd8d556 100644
--- a/library/compiler-builtins/libm/src/math/support/mod.rs
+++ b/library/compiler-builtins/libm/src/math/support/mod.rs
@@ -11,7 +11,8 @@ mod int_traits;
 
 #[allow(unused_imports)]
 pub use big::{i256, u256};
-#[allow(unused_imports)]
+// Clippy seems to have a false positive
+#[allow(unused_imports, clippy::single_component_path_imports)]
 pub(crate) use cfg_if;
 pub use env::{FpResult, Round, Status};
 #[allow(unused_imports)]
diff --git a/library/compiler-builtins/libm/src/math/tan.rs b/library/compiler-builtins/libm/src/math/tan.rs
index a072bdec56e..79c1bad563e 100644
--- a/library/compiler-builtins/libm/src/math/tan.rs
+++ b/library/compiler-builtins/libm/src/math/tan.rs
@@ -43,7 +43,7 @@ use super::{k_tan, rem_pio2};
 /// The tangent of `x` (f64).
 ///
 /// `x` is specified in radians.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn tan(x: f64) -> f64 {
     let x1p120 = f32::from_bits(0x7b800000); // 0x1p120f === 2 ^ 120
 
diff --git a/library/compiler-builtins/libm/src/math/tanf.rs b/library/compiler-builtins/libm/src/math/tanf.rs
index 8bcf9581ff6..a615573d87a 100644
--- a/library/compiler-builtins/libm/src/math/tanf.rs
+++ b/library/compiler-builtins/libm/src/math/tanf.rs
@@ -27,7 +27,7 @@ const T4_PIO2: f64 = 4. * FRAC_PI_2; /* 0x401921FB, 0x54442D18 */
 /// The tangent of `x` (f32).
 ///
 /// `x` is specified in radians.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn tanf(x: f32) -> f32 {
     let x64 = x as f64;
 
diff --git a/library/compiler-builtins/libm/src/math/tanh.rs b/library/compiler-builtins/libm/src/math/tanh.rs
index cc0abe4fcb2..c99cc2a70b1 100644
--- a/library/compiler-builtins/libm/src/math/tanh.rs
+++ b/library/compiler-builtins/libm/src/math/tanh.rs
@@ -8,7 +8,7 @@ use super::expm1;
 /// The hyperbolic tangent of `x` (f64).
 ///
 /// `x` is specified in radians.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn tanh(mut x: f64) -> f64 {
     let mut uf: f64 = x;
     let mut ui: u64 = f64::to_bits(uf);
diff --git a/library/compiler-builtins/libm/src/math/tanhf.rs b/library/compiler-builtins/libm/src/math/tanhf.rs
index fffbba6c6ec..3cbd5917f07 100644
--- a/library/compiler-builtins/libm/src/math/tanhf.rs
+++ b/library/compiler-builtins/libm/src/math/tanhf.rs
@@ -3,7 +3,7 @@ use super::expm1f;
 /// The hyperbolic tangent of `x` (f32).
 ///
 /// `x` is specified in radians.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn tanhf(mut x: f32) -> f32 {
     /* x = |x| */
     let mut ix = x.to_bits();
diff --git a/library/compiler-builtins/libm/src/math/tgamma.rs b/library/compiler-builtins/libm/src/math/tgamma.rs
index 3059860646a..41415d9d125 100644
--- a/library/compiler-builtins/libm/src/math/tgamma.rs
+++ b/library/compiler-builtins/libm/src/math/tgamma.rs
@@ -131,7 +131,7 @@ fn s(x: f64) -> f64 {
 }
 
 /// The [Gamma function](https://en.wikipedia.org/wiki/Gamma_function) (f64).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn tgamma(mut x: f64) -> f64 {
     let u: u64 = x.to_bits();
     let absx: f64;
diff --git a/library/compiler-builtins/libm/src/math/tgammaf.rs b/library/compiler-builtins/libm/src/math/tgammaf.rs
index fe178f7a3c0..a63a2a31862 100644
--- a/library/compiler-builtins/libm/src/math/tgammaf.rs
+++ b/library/compiler-builtins/libm/src/math/tgammaf.rs
@@ -1,7 +1,7 @@
 use super::tgamma;
 
 /// The [Gamma function](https://en.wikipedia.org/wiki/Gamma_function) (f32).
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn tgammaf(x: f32) -> f32 {
     tgamma(x as f64) as f32
 }
diff --git a/library/compiler-builtins/libm/src/math/trunc.rs b/library/compiler-builtins/libm/src/math/trunc.rs
index fa50d55e136..20d52a111a1 100644
--- a/library/compiler-builtins/libm/src/math/trunc.rs
+++ b/library/compiler-builtins/libm/src/math/trunc.rs
@@ -2,7 +2,7 @@
 ///
 /// This effectively removes the decimal part of the number, leaving the integral part.
 #[cfg(f16_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn truncf16(x: f16) -> f16 {
     super::generic::trunc(x)
 }
@@ -10,7 +10,7 @@ pub fn truncf16(x: f16) -> f16 {
 /// Rounds the number toward 0 to the closest integral value (f32).
 ///
 /// This effectively removes the decimal part of the number, leaving the integral part.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn truncf(x: f32) -> f32 {
     select_implementation! {
         name: truncf,
@@ -24,7 +24,7 @@ pub fn truncf(x: f32) -> f32 {
 /// Rounds the number toward 0 to the closest integral value (f64).
 ///
 /// This effectively removes the decimal part of the number, leaving the integral part.
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn trunc(x: f64) -> f64 {
     select_implementation! {
         name: trunc,
@@ -39,7 +39,7 @@ pub fn trunc(x: f64) -> f64 {
 ///
 /// This effectively removes the decimal part of the number, leaving the integral part.
 #[cfg(f128_enabled)]
-#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+#[cfg_attr(assert_no_panic, no_panic::no_panic)]
 pub fn truncf128(x: f128) -> f128 {
     super::generic::trunc(x)
 }
diff --git a/library/compiler-builtins/triagebot.toml b/library/compiler-builtins/triagebot.toml
index 8a2356c2b1c..eba5cdd88b9 100644
--- a/library/compiler-builtins/triagebot.toml
+++ b/library/compiler-builtins/triagebot.toml
@@ -19,6 +19,3 @@ check-commits = false
 # Enable issue transfers within the org
 # Documentation at: https://forge.rust-lang.org/triagebot/transfer.html
 [transfer]
-
-# Automatically close and reopen PRs made by bots to run CI on them
-[bot-pull-requests]
diff --git a/library/core/src/alloc/layout.rs b/library/core/src/alloc/layout.rs
index 49275975f04..cd5fd77f865 100644
--- a/library/core/src/alloc/layout.rs
+++ b/library/core/src/alloc/layout.rs
@@ -226,10 +226,10 @@ impl Layout {
 
     /// Creates a `NonNull` that is dangling, but well-aligned for this Layout.
     ///
-    /// Note that the pointer value may potentially represent a valid pointer,
-    /// which means this must not be used as a "not yet initialized"
-    /// sentinel value. Types that lazily allocate must track initialization by
-    /// some other means.
+    /// Note that the address of the returned pointer may potentially
+    /// be that of a valid pointer, which means this must not be used
+    /// as a "not yet initialized" sentinel value.
+    /// Types that lazily allocate must track initialization by some other means.
     #[unstable(feature = "alloc_layout_extra", issue = "55724")]
     #[must_use]
     #[inline]
diff --git a/library/core/src/any.rs b/library/core/src/any.rs
index 38393379a78..ceb9748e7fe 100644
--- a/library/core/src/any.rs
+++ b/library/core/src/any.rs
@@ -725,7 +725,7 @@ unsafe impl Send for TypeId {}
 unsafe impl Sync for TypeId {}
 
 #[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
+#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
 impl const PartialEq for TypeId {
     #[inline]
     fn eq(&self, other: &Self) -> bool {
@@ -773,7 +773,7 @@ impl TypeId {
     /// ```
     #[must_use]
     #[stable(feature = "rust1", since = "1.0.0")]
-    #[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
+    #[rustc_const_stable(feature = "const_type_id", since = "CURRENT_RUSTC_VERSION")]
     pub const fn of<T: ?Sized + 'static>() -> TypeId {
         const { intrinsics::type_id::<T>() }
     }
diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs
index 1c23218552a..b3a498570f9 100644
--- a/library/core/src/array/mod.rs
+++ b/library/core/src/array/mod.rs
@@ -378,7 +378,7 @@ impl<'a, T, const N: usize> IntoIterator for &'a mut [T; N] {
 #[rustc_const_unstable(feature = "const_index", issue = "143775")]
 impl<T, I, const N: usize> const Index<I> for [T; N]
 where
-    [T]: ~const Index<I>,
+    [T]: [const] Index<I>,
 {
     type Output = <[T] as Index<I>>::Output;
 
@@ -392,7 +392,7 @@ where
 #[rustc_const_unstable(feature = "const_index", issue = "143775")]
 impl<T, I, const N: usize> const IndexMut<I> for [T; N]
 where
-    [T]: ~const IndexMut<I>,
+    [T]: [const] IndexMut<I>,
 {
     #[inline]
     fn index_mut(&mut self, index: I) -> &mut Self::Output {
diff --git a/library/core/src/ascii/ascii_char.rs b/library/core/src/ascii/ascii_char.rs
index 054ddf84470..419e4694594 100644
--- a/library/core/src/ascii/ascii_char.rs
+++ b/library/core/src/ascii/ascii_char.rs
@@ -445,7 +445,15 @@ pub enum AsciiChar {
 }
 
 impl AsciiChar {
-    /// Creates an ascii character from the byte `b`,
+    /// The character with the lowest ASCII code.
+    #[unstable(feature = "ascii_char", issue = "110998")]
+    pub const MIN: Self = Self::Null;
+
+    /// The character with the highest ASCII code.
+    #[unstable(feature = "ascii_char", issue = "110998")]
+    pub const MAX: Self = Self::Delete;
+
+    /// Creates an ASCII character from the byte `b`,
     /// or returns `None` if it's too large.
     #[unstable(feature = "ascii_char", issue = "110998")]
     #[inline]
@@ -540,6 +548,608 @@ impl AsciiChar {
     pub const fn as_str(&self) -> &str {
         crate::slice::from_ref(self).as_str()
     }
+
+    /// Makes a copy of the value in its upper case equivalent.
+    ///
+    /// Letters 'a' to 'z' are mapped to 'A' to 'Z'.
+    ///
+    /// To uppercase the value in-place, use [`make_uppercase`].
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(ascii_char, ascii_char_variants)]
+    /// use std::ascii;
+    ///
+    /// let lowercase_a = ascii::Char::SmallA;
+    ///
+    /// assert_eq!(
+    ///     ascii::Char::CapitalA,
+    ///     lowercase_a.to_uppercase(),
+    /// );
+    /// ```
+    ///
+    /// [`make_uppercase`]: Self::make_uppercase
+    #[must_use = "to uppercase the value in-place, use `make_uppercase()`"]
+    #[unstable(feature = "ascii_char", issue = "110998")]
+    #[inline]
+    pub const fn to_uppercase(self) -> Self {
+        let uppercase_byte = self.to_u8().to_ascii_uppercase();
+        // SAFETY: Toggling the 6th bit won't convert ASCII to non-ASCII.
+        unsafe { Self::from_u8_unchecked(uppercase_byte) }
+    }
+
+    /// Makes a copy of the value in its lower case equivalent.
+    ///
+    /// Letters 'A' to 'Z' are mapped to 'a' to 'z'.
+    ///
+    /// To lowercase the value in-place, use [`make_lowercase`].
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(ascii_char, ascii_char_variants)]
+    /// use std::ascii;
+    ///
+    /// let uppercase_a = ascii::Char::CapitalA;
+    ///
+    /// assert_eq!(
+    ///     ascii::Char::SmallA,
+    ///     uppercase_a.to_lowercase(),
+    /// );
+    /// ```
+    ///
+    /// [`make_lowercase`]: Self::make_lowercase
+    #[must_use = "to lowercase the value in-place, use `make_lowercase()`"]
+    #[unstable(feature = "ascii_char", issue = "110998")]
+    #[inline]
+    pub const fn to_lowercase(self) -> Self {
+        let lowercase_byte = self.to_u8().to_ascii_lowercase();
+        // SAFETY: Setting the 6th bit won't convert ASCII to non-ASCII.
+        unsafe { Self::from_u8_unchecked(lowercase_byte) }
+    }
+
+    /// Checks that two values are a case-insensitive match.
+    ///
+    /// This is equivalent to `to_lowercase(a) == to_lowercase(b)`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(ascii_char, ascii_char_variants)]
+    /// use std::ascii;
+    ///
+    /// let lowercase_a = ascii::Char::SmallA;
+    /// let uppercase_a = ascii::Char::CapitalA;
+    ///
+    /// assert!(lowercase_a.eq_ignore_case(uppercase_a));
+    /// ```
+    #[unstable(feature = "ascii_char", issue = "110998")]
+    #[inline]
+    pub const fn eq_ignore_case(self, other: Self) -> bool {
+        // FIXME(const-hack) `arg.to_u8().to_ascii_lowercase()` -> `arg.to_lowercase()`
+        // once `PartialEq` is const for `Self`.
+        self.to_u8().to_ascii_lowercase() == other.to_u8().to_ascii_lowercase()
+    }
+
+    /// Converts this value to its upper case equivalent in-place.
+    ///
+    /// Letters 'a' to 'z' are mapped to 'A' to 'Z'.
+    ///
+    /// To return a new uppercased value without modifying the existing one, use
+    /// [`to_uppercase`].
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(ascii_char, ascii_char_variants)]
+    /// use std::ascii;
+    ///
+    /// let mut letter_a = ascii::Char::SmallA;
+    ///
+    /// letter_a.make_uppercase();
+    ///
+    /// assert_eq!(ascii::Char::CapitalA, letter_a);
+    /// ```
+    ///
+    /// [`to_uppercase`]: Self::to_uppercase
+    #[unstable(feature = "ascii_char", issue = "110998")]
+    #[inline]
+    pub const fn make_uppercase(&mut self) {
+        *self = self.to_uppercase();
+    }
+
+    /// Converts this value to its lower case equivalent in-place.
+    ///
+    /// Letters 'A' to 'Z' are mapped to 'a' to 'z'.
+    ///
+    /// To return a new lowercased value without modifying the existing one, use
+    /// [`to_lowercase`].
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(ascii_char, ascii_char_variants)]
+    /// use std::ascii;
+    ///
+    /// let mut letter_a = ascii::Char::CapitalA;
+    ///
+    /// letter_a.make_lowercase();
+    ///
+    /// assert_eq!(ascii::Char::SmallA, letter_a);
+    /// ```
+    ///
+    /// [`to_lowercase`]: Self::to_lowercase
+    #[unstable(feature = "ascii_char", issue = "110998")]
+    #[inline]
+    pub const fn make_lowercase(&mut self) {
+        *self = self.to_lowercase();
+    }
+
+    /// Checks if the value is an alphabetic character:
+    ///
+    /// - 0x41 'A' ..= 0x5A 'Z', or
+    /// - 0x61 'a' ..= 0x7A 'z'.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(ascii_char, ascii_char_variants)]
+    /// use std::ascii;
+    ///
+    /// let uppercase_a = ascii::Char::CapitalA;
+    /// let uppercase_g = ascii::Char::CapitalG;
+    /// let a = ascii::Char::SmallA;
+    /// let g = ascii::Char::SmallG;
+    /// let zero = ascii::Char::Digit0;
+    /// let percent = ascii::Char::PercentSign;
+    /// let space = ascii::Char::Space;
+    /// let lf = ascii::Char::LineFeed;
+    /// let esc = ascii::Char::Escape;
+    ///
+    /// assert!(uppercase_a.is_alphabetic());
+    /// assert!(uppercase_g.is_alphabetic());
+    /// assert!(a.is_alphabetic());
+    /// assert!(g.is_alphabetic());
+    /// assert!(!zero.is_alphabetic());
+    /// assert!(!percent.is_alphabetic());
+    /// assert!(!space.is_alphabetic());
+    /// assert!(!lf.is_alphabetic());
+    /// assert!(!esc.is_alphabetic());
+    /// ```
+    #[must_use]
+    #[unstable(feature = "ascii_char", issue = "110998")]
+    #[inline]
+    pub const fn is_alphabetic(self) -> bool {
+        self.to_u8().is_ascii_alphabetic()
+    }
+
+    /// Checks if the value is an uppercase character:
+    /// 0x41 'A' ..= 0x5A 'Z'.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(ascii_char, ascii_char_variants)]
+    /// use std::ascii;
+    ///
+    /// let uppercase_a = ascii::Char::CapitalA;
+    /// let uppercase_g = ascii::Char::CapitalG;
+    /// let a = ascii::Char::SmallA;
+    /// let g = ascii::Char::SmallG;
+    /// let zero = ascii::Char::Digit0;
+    /// let percent = ascii::Char::PercentSign;
+    /// let space = ascii::Char::Space;
+    /// let lf = ascii::Char::LineFeed;
+    /// let esc = ascii::Char::Escape;
+    ///
+    /// assert!(uppercase_a.is_uppercase());
+    /// assert!(uppercase_g.is_uppercase());
+    /// assert!(!a.is_uppercase());
+    /// assert!(!g.is_uppercase());
+    /// assert!(!zero.is_uppercase());
+    /// assert!(!percent.is_uppercase());
+    /// assert!(!space.is_uppercase());
+    /// assert!(!lf.is_uppercase());
+    /// assert!(!esc.is_uppercase());
+    /// ```
+    #[must_use]
+    #[unstable(feature = "ascii_char", issue = "110998")]
+    #[inline]
+    pub const fn is_uppercase(self) -> bool {
+        self.to_u8().is_ascii_uppercase()
+    }
+
+    /// Checks if the value is a lowercase character:
+    /// 0x61 'a' ..= 0x7A 'z'.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(ascii_char, ascii_char_variants)]
+    /// use std::ascii;
+    ///
+    /// let uppercase_a = ascii::Char::CapitalA;
+    /// let uppercase_g = ascii::Char::CapitalG;
+    /// let a = ascii::Char::SmallA;
+    /// let g = ascii::Char::SmallG;
+    /// let zero = ascii::Char::Digit0;
+    /// let percent = ascii::Char::PercentSign;
+    /// let space = ascii::Char::Space;
+    /// let lf = ascii::Char::LineFeed;
+    /// let esc = ascii::Char::Escape;
+    ///
+    /// assert!(!uppercase_a.is_lowercase());
+    /// assert!(!uppercase_g.is_lowercase());
+    /// assert!(a.is_lowercase());
+    /// assert!(g.is_lowercase());
+    /// assert!(!zero.is_lowercase());
+    /// assert!(!percent.is_lowercase());
+    /// assert!(!space.is_lowercase());
+    /// assert!(!lf.is_lowercase());
+    /// assert!(!esc.is_lowercase());
+    /// ```
+    #[must_use]
+    #[unstable(feature = "ascii_char", issue = "110998")]
+    #[inline]
+    pub const fn is_lowercase(self) -> bool {
+        self.to_u8().is_ascii_lowercase()
+    }
+
+    /// Checks if the value is an alphanumeric character:
+    ///
+    /// - 0x41 'A' ..= 0x5A 'Z', or
+    /// - 0x61 'a' ..= 0x7A 'z', or
+    /// - 0x30 '0' ..= 0x39 '9'.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(ascii_char, ascii_char_variants)]
+    /// use std::ascii;
+    ///
+    /// let uppercase_a = ascii::Char::CapitalA;
+    /// let uppercase_g = ascii::Char::CapitalG;
+    /// let a = ascii::Char::SmallA;
+    /// let g = ascii::Char::SmallG;
+    /// let zero = ascii::Char::Digit0;
+    /// let percent = ascii::Char::PercentSign;
+    /// let space = ascii::Char::Space;
+    /// let lf = ascii::Char::LineFeed;
+    /// let esc = ascii::Char::Escape;
+    ///
+    /// assert!(uppercase_a.is_alphanumeric());
+    /// assert!(uppercase_g.is_alphanumeric());
+    /// assert!(a.is_alphanumeric());
+    /// assert!(g.is_alphanumeric());
+    /// assert!(zero.is_alphanumeric());
+    /// assert!(!percent.is_alphanumeric());
+    /// assert!(!space.is_alphanumeric());
+    /// assert!(!lf.is_alphanumeric());
+    /// assert!(!esc.is_alphanumeric());
+    /// ```
+    #[must_use]
+    #[unstable(feature = "ascii_char", issue = "110998")]
+    #[inline]
+    pub const fn is_alphanumeric(self) -> bool {
+        self.to_u8().is_ascii_alphanumeric()
+    }
+
+    /// Checks if the value is a decimal digit:
+    /// 0x30 '0' ..= 0x39 '9'.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(ascii_char, ascii_char_variants)]
+    /// use std::ascii;
+    ///
+    /// let uppercase_a = ascii::Char::CapitalA;
+    /// let uppercase_g = ascii::Char::CapitalG;
+    /// let a = ascii::Char::SmallA;
+    /// let g = ascii::Char::SmallG;
+    /// let zero = ascii::Char::Digit0;
+    /// let percent = ascii::Char::PercentSign;
+    /// let space = ascii::Char::Space;
+    /// let lf = ascii::Char::LineFeed;
+    /// let esc = ascii::Char::Escape;
+    ///
+    /// assert!(!uppercase_a.is_digit());
+    /// assert!(!uppercase_g.is_digit());
+    /// assert!(!a.is_digit());
+    /// assert!(!g.is_digit());
+    /// assert!(zero.is_digit());
+    /// assert!(!percent.is_digit());
+    /// assert!(!space.is_digit());
+    /// assert!(!lf.is_digit());
+    /// assert!(!esc.is_digit());
+    /// ```
+    #[must_use]
+    #[unstable(feature = "ascii_char", issue = "110998")]
+    #[inline]
+    pub const fn is_digit(self) -> bool {
+        self.to_u8().is_ascii_digit()
+    }
+
+    /// Checks if the value is an octal digit:
+    /// 0x30 '0' ..= 0x37 '7'.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(ascii_char, ascii_char_variants, is_ascii_octdigit)]
+    ///
+    /// use std::ascii;
+    ///
+    /// let uppercase_a = ascii::Char::CapitalA;
+    /// let a = ascii::Char::SmallA;
+    /// let zero = ascii::Char::Digit0;
+    /// let seven = ascii::Char::Digit7;
+    /// let eight = ascii::Char::Digit8;
+    /// let percent = ascii::Char::PercentSign;
+    /// let lf = ascii::Char::LineFeed;
+    /// let esc = ascii::Char::Escape;
+    ///
+    /// assert!(!uppercase_a.is_octdigit());
+    /// assert!(!a.is_octdigit());
+    /// assert!(zero.is_octdigit());
+    /// assert!(seven.is_octdigit());
+    /// assert!(!eight.is_octdigit());
+    /// assert!(!percent.is_octdigit());
+    /// assert!(!lf.is_octdigit());
+    /// assert!(!esc.is_octdigit());
+    /// ```
+    #[must_use]
+    // This is blocked on two unstable features. Please ensure both are
+    // stabilized before marking this method as stable.
+    #[unstable(feature = "ascii_char", issue = "110998")]
+    // #[unstable(feature = "is_ascii_octdigit", issue = "101288")]
+    #[inline]
+    pub const fn is_octdigit(self) -> bool {
+        self.to_u8().is_ascii_octdigit()
+    }
+
+    /// Checks if the value is a hexadecimal digit:
+    ///
+    /// - 0x30 '0' ..= 0x39 '9', or
+    /// - 0x41 'A' ..= 0x46 'F', or
+    /// - 0x61 'a' ..= 0x66 'f'.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(ascii_char, ascii_char_variants)]
+    /// use std::ascii;
+    ///
+    /// let uppercase_a = ascii::Char::CapitalA;
+    /// let uppercase_g = ascii::Char::CapitalG;
+    /// let a = ascii::Char::SmallA;
+    /// let g = ascii::Char::SmallG;
+    /// let zero = ascii::Char::Digit0;
+    /// let percent = ascii::Char::PercentSign;
+    /// let space = ascii::Char::Space;
+    /// let lf = ascii::Char::LineFeed;
+    /// let esc = ascii::Char::Escape;
+    ///
+    /// assert!(uppercase_a.is_hexdigit());
+    /// assert!(!uppercase_g.is_hexdigit());
+    /// assert!(a.is_hexdigit());
+    /// assert!(!g.is_hexdigit());
+    /// assert!(zero.is_hexdigit());
+    /// assert!(!percent.is_hexdigit());
+    /// assert!(!space.is_hexdigit());
+    /// assert!(!lf.is_hexdigit());
+    /// assert!(!esc.is_hexdigit());
+    /// ```
+    #[must_use]
+    #[unstable(feature = "ascii_char", issue = "110998")]
+    #[inline]
+    pub const fn is_hexdigit(self) -> bool {
+        self.to_u8().is_ascii_hexdigit()
+    }
+
+    /// Checks if the value is a punctuation character:
+    ///
+    /// - 0x21 ..= 0x2F `! " # $ % & ' ( ) * + , - . /`, or
+    /// - 0x3A ..= 0x40 `: ; < = > ? @`, or
+    /// - 0x5B ..= 0x60 `` [ \ ] ^ _ ` ``, or
+    /// - 0x7B ..= 0x7E `{ | } ~`
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(ascii_char, ascii_char_variants)]
+    /// use std::ascii;
+    ///
+    /// let uppercase_a = ascii::Char::CapitalA;
+    /// let uppercase_g = ascii::Char::CapitalG;
+    /// let a = ascii::Char::SmallA;
+    /// let g = ascii::Char::SmallG;
+    /// let zero = ascii::Char::Digit0;
+    /// let percent = ascii::Char::PercentSign;
+    /// let space = ascii::Char::Space;
+    /// let lf = ascii::Char::LineFeed;
+    /// let esc = ascii::Char::Escape;
+    ///
+    /// assert!(!uppercase_a.is_punctuation());
+    /// assert!(!uppercase_g.is_punctuation());
+    /// assert!(!a.is_punctuation());
+    /// assert!(!g.is_punctuation());
+    /// assert!(!zero.is_punctuation());
+    /// assert!(percent.is_punctuation());
+    /// assert!(!space.is_punctuation());
+    /// assert!(!lf.is_punctuation());
+    /// assert!(!esc.is_punctuation());
+    /// ```
+    #[must_use]
+    #[unstable(feature = "ascii_char", issue = "110998")]
+    #[inline]
+    pub const fn is_punctuation(self) -> bool {
+        self.to_u8().is_ascii_punctuation()
+    }
+
+    /// Checks if the value is a graphic character:
+    /// 0x21 '!' ..= 0x7E '~'.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(ascii_char, ascii_char_variants)]
+    /// use std::ascii;
+    ///
+    /// let uppercase_a = ascii::Char::CapitalA;
+    /// let uppercase_g = ascii::Char::CapitalG;
+    /// let a = ascii::Char::SmallA;
+    /// let g = ascii::Char::SmallG;
+    /// let zero = ascii::Char::Digit0;
+    /// let percent = ascii::Char::PercentSign;
+    /// let space = ascii::Char::Space;
+    /// let lf = ascii::Char::LineFeed;
+    /// let esc = ascii::Char::Escape;
+    ///
+    /// assert!(uppercase_a.is_graphic());
+    /// assert!(uppercase_g.is_graphic());
+    /// assert!(a.is_graphic());
+    /// assert!(g.is_graphic());
+    /// assert!(zero.is_graphic());
+    /// assert!(percent.is_graphic());
+    /// assert!(!space.is_graphic());
+    /// assert!(!lf.is_graphic());
+    /// assert!(!esc.is_graphic());
+    /// ```
+    #[must_use]
+    #[unstable(feature = "ascii_char", issue = "110998")]
+    #[inline]
+    pub const fn is_graphic(self) -> bool {
+        self.to_u8().is_ascii_graphic()
+    }
+
+    /// Checks if the value is a whitespace character:
+    /// 0x20 SPACE, 0x09 HORIZONTAL TAB, 0x0A LINE FEED,
+    /// 0x0C FORM FEED, or 0x0D CARRIAGE RETURN.
+    ///
+    /// Rust uses the WhatWG Infra Standard's [definition of ASCII
+    /// whitespace][infra-aw]. There are several other definitions in
+    /// wide use. For instance, [the POSIX locale][pct] includes
+    /// 0x0B VERTICAL TAB as well as all the above characters,
+    /// but—from the very same specification—[the default rule for
+    /// "field splitting" in the Bourne shell][bfs] considers *only*
+    /// SPACE, HORIZONTAL TAB, and LINE FEED as whitespace.
+    ///
+    /// If you are writing a program that will process an existing
+    /// file format, check what that format's definition of whitespace is
+    /// before using this function.
+    ///
+    /// [infra-aw]: https://infra.spec.whatwg.org/#ascii-whitespace
+    /// [pct]: https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap07.html#tag_07_03_01
+    /// [bfs]: https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_06_05
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(ascii_char, ascii_char_variants)]
+    /// use std::ascii;
+    ///
+    /// let uppercase_a = ascii::Char::CapitalA;
+    /// let uppercase_g = ascii::Char::CapitalG;
+    /// let a = ascii::Char::SmallA;
+    /// let g = ascii::Char::SmallG;
+    /// let zero = ascii::Char::Digit0;
+    /// let percent = ascii::Char::PercentSign;
+    /// let space = ascii::Char::Space;
+    /// let lf = ascii::Char::LineFeed;
+    /// let esc = ascii::Char::Escape;
+    ///
+    /// assert!(!uppercase_a.is_whitespace());
+    /// assert!(!uppercase_g.is_whitespace());
+    /// assert!(!a.is_whitespace());
+    /// assert!(!g.is_whitespace());
+    /// assert!(!zero.is_whitespace());
+    /// assert!(!percent.is_whitespace());
+    /// assert!(space.is_whitespace());
+    /// assert!(lf.is_whitespace());
+    /// assert!(!esc.is_whitespace());
+    /// ```
+    #[must_use]
+    #[unstable(feature = "ascii_char", issue = "110998")]
+    #[inline]
+    pub const fn is_whitespace(self) -> bool {
+        self.to_u8().is_ascii_whitespace()
+    }
+
+    /// Checks if the value is a control character:
+    /// 0x00 NUL ..= 0x1F UNIT SEPARATOR, or 0x7F DELETE.
+    /// Note that most whitespace characters are control
+    /// characters, but SPACE is not.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(ascii_char, ascii_char_variants)]
+    /// use std::ascii;
+    ///
+    /// let uppercase_a = ascii::Char::CapitalA;
+    /// let uppercase_g = ascii::Char::CapitalG;
+    /// let a = ascii::Char::SmallA;
+    /// let g = ascii::Char::SmallG;
+    /// let zero = ascii::Char::Digit0;
+    /// let percent = ascii::Char::PercentSign;
+    /// let space = ascii::Char::Space;
+    /// let lf = ascii::Char::LineFeed;
+    /// let esc = ascii::Char::Escape;
+    ///
+    /// assert!(!uppercase_a.is_control());
+    /// assert!(!uppercase_g.is_control());
+    /// assert!(!a.is_control());
+    /// assert!(!g.is_control());
+    /// assert!(!zero.is_control());
+    /// assert!(!percent.is_control());
+    /// assert!(!space.is_control());
+    /// assert!(lf.is_control());
+    /// assert!(esc.is_control());
+    /// ```
+    #[must_use]
+    #[unstable(feature = "ascii_char", issue = "110998")]
+    #[inline]
+    pub const fn is_control(self) -> bool {
+        self.to_u8().is_ascii_control()
+    }
+
+    /// Returns an iterator that produces an escaped version of a
+    /// character.
+    ///
+    /// The behavior is identical to
+    /// [`ascii::escape_default`](crate::ascii::escape_default).
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(ascii_char, ascii_char_variants)]
+    /// use std::ascii;
+    ///
+    /// let zero = ascii::Char::Digit0;
+    /// let tab = ascii::Char::CharacterTabulation;
+    /// let cr = ascii::Char::CarriageReturn;
+    /// let lf = ascii::Char::LineFeed;
+    /// let apostrophe = ascii::Char::Apostrophe;
+    /// let double_quote = ascii::Char::QuotationMark;
+    /// let backslash = ascii::Char::ReverseSolidus;
+    ///
+    /// assert_eq!("0", zero.escape_ascii().to_string());
+    /// assert_eq!("\\t", tab.escape_ascii().to_string());
+    /// assert_eq!("\\r", cr.escape_ascii().to_string());
+    /// assert_eq!("\\n", lf.escape_ascii().to_string());
+    /// assert_eq!("\\'", apostrophe.escape_ascii().to_string());
+    /// assert_eq!("\\\"", double_quote.escape_ascii().to_string());
+    /// assert_eq!("\\\\", backslash.escape_ascii().to_string());
+    /// ```
+    #[must_use = "this returns the escaped character as an iterator, \
+                  without modifying the original"]
+    #[unstable(feature = "ascii_char", issue = "110998")]
+    #[inline]
+    pub fn escape_ascii(self) -> super::EscapeDefault {
+        super::escape_default(self.to_u8())
+    }
 }
 
 macro_rules! into_int_impl {
diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs
index d67408cae1b..c639d50cc3d 100644
--- a/library/core/src/cell.rs
+++ b/library/core/src/cell.rs
@@ -334,7 +334,7 @@ impl<T: Copy> Clone for Cell<T> {
 
 #[stable(feature = "rust1", since = "1.0.0")]
 #[rustc_const_unstable(feature = "const_default", issue = "143894")]
-impl<T: ~const Default> const Default for Cell<T> {
+impl<T: [const] Default> const Default for Cell<T> {
     /// Creates a `Cell<T>`, with the `Default` value for T.
     #[inline]
     fn default() -> Cell<T> {
@@ -1325,7 +1325,7 @@ impl<T: Clone> Clone for RefCell<T> {
 
 #[stable(feature = "rust1", since = "1.0.0")]
 #[rustc_const_unstable(feature = "const_default", issue = "143894")]
-impl<T: ~const Default> const Default for RefCell<T> {
+impl<T: [const] Default> const Default for RefCell<T> {
     /// Creates a `RefCell<T>`, with the `Default` value for T.
     #[inline]
     fn default() -> RefCell<T> {
@@ -2333,7 +2333,7 @@ impl<T: ?Sized> UnsafeCell<T> {
 
 #[stable(feature = "unsafe_cell_default", since = "1.10.0")]
 #[rustc_const_unstable(feature = "const_default", issue = "143894")]
-impl<T: ~const Default> const Default for UnsafeCell<T> {
+impl<T: [const] Default> const Default for UnsafeCell<T> {
     /// Creates an `UnsafeCell`, with the `Default` value for T.
     fn default() -> UnsafeCell<T> {
         UnsafeCell::new(Default::default())
@@ -2438,7 +2438,7 @@ impl<T: ?Sized> SyncUnsafeCell<T> {
 
 #[unstable(feature = "sync_unsafe_cell", issue = "95439")]
 #[rustc_const_unstable(feature = "const_default", issue = "143894")]
-impl<T: ~const Default> const Default for SyncUnsafeCell<T> {
+impl<T: [const] Default> const Default for SyncUnsafeCell<T> {
     /// Creates an `SyncUnsafeCell`, with the `Default` value for T.
     fn default() -> SyncUnsafeCell<T> {
         SyncUnsafeCell::new(Default::default())
diff --git a/library/core/src/char/methods.rs b/library/core/src/char/methods.rs
index b0752a85faf..7ee0962721f 100644
--- a/library/core/src/char/methods.rs
+++ b/library/core/src/char/methods.rs
@@ -920,7 +920,11 @@ impl char {
     #[stable(feature = "rust1", since = "1.0.0")]
     #[inline]
     pub fn is_alphanumeric(self) -> bool {
-        self.is_alphabetic() || self.is_numeric()
+        if self.is_ascii() {
+            self.is_ascii_alphanumeric()
+        } else {
+            unicode::Alphabetic(self) || unicode::N(self)
+        }
     }
 
     /// Returns `true` if this `char` has the general category for control codes.
diff --git a/library/core/src/clone.rs b/library/core/src/clone.rs
index 51d037ddfd2..0add77b2bc8 100644
--- a/library/core/src/clone.rs
+++ b/library/core/src/clone.rs
@@ -139,6 +139,34 @@ mod uninit;
 /// // Note: With the manual implementations the above line will compile.
 /// ```
 ///
+/// ## `Clone` and `PartialEq`/`Eq`
+/// `Clone` is intended for the duplication of objects. Consequently, when implementing
+/// both `Clone` and [`PartialEq`], the following property is expected to hold:
+/// ```text
+/// x == x -> x.clone() == x
+/// ```
+/// In other words, if an object compares equal to itself,
+/// its clone must also compare equal to the original.
+///
+/// For types that also implement [`Eq`] – for which `x == x` always holds –
+/// this implies that `x.clone() == x` must always be true.
+/// Standard library collections such as
+/// [`HashMap`], [`HashSet`], [`BTreeMap`], [`BTreeSet`] and [`BinaryHeap`]
+/// rely on their keys respecting this property for correct behavior.
+/// Furthermore, these collections require that cloning a key preserves the outcome of the
+/// [`Hash`] and [`Ord`] methods. Thankfully, this follows automatically from `x.clone() == x`
+/// if `Hash` and `Ord` are correctly implemented according to their own requirements.
+///
+/// When deriving both `Clone` and [`PartialEq`] using `#[derive(Clone, PartialEq)]`
+/// or when additionally deriving [`Eq`] using `#[derive(Clone, PartialEq, Eq)]`,
+/// then this property is automatically upheld – provided that it is satisfied by
+/// the underlying types.
+///
+/// Violating this property is a logic error. The behavior resulting from a logic error is not
+/// specified, but users of the trait must ensure that such logic errors do *not* result in
+/// undefined behavior. This means that `unsafe` code **must not** rely on this property
+/// being satisfied.
+///
 /// ## Additional implementors
 ///
 /// In addition to the [implementors listed below][impls],
@@ -152,6 +180,11 @@ mod uninit;
 ///   (even if the referent doesn't),
 ///   while variables captured by mutable reference never implement `Clone`.
 ///
+/// [`HashMap`]: ../../std/collections/struct.HashMap.html
+/// [`HashSet`]: ../../std/collections/struct.HashSet.html
+/// [`BTreeMap`]: ../../std/collections/struct.BTreeMap.html
+/// [`BTreeSet`]: ../../std/collections/struct.BTreeSet.html
+/// [`BinaryHeap`]: ../../std/collections/struct.BinaryHeap.html
 /// [impls]: #implementors
 #[stable(feature = "rust1", since = "1.0.0")]
 #[lang = "clone"]
@@ -212,7 +245,7 @@ pub trait Clone: Sized {
     #[stable(feature = "rust1", since = "1.0.0")]
     fn clone_from(&mut self, source: &Self)
     where
-        Self: ~const Destruct,
+        Self: [const] Destruct,
     {
         *self = source.clone()
     }
diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs
index 1b9af10a6fd..a64fade285b 100644
--- a/library/core/src/cmp.rs
+++ b/library/core/src/cmp.rs
@@ -2022,7 +2022,7 @@ mod impls {
     #[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
     impl<A: PointeeSized, B: PointeeSized> const PartialEq<&B> for &A
     where
-        A: ~const PartialEq<B>,
+        A: [const] PartialEq<B>,
     {
         #[inline]
         fn eq(&self, other: &&B) -> bool {
@@ -2094,7 +2094,7 @@ mod impls {
     #[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
     impl<A: PointeeSized, B: PointeeSized> const PartialEq<&mut B> for &mut A
     where
-        A: ~const PartialEq<B>,
+        A: [const] PartialEq<B>,
     {
         #[inline]
         fn eq(&self, other: &&mut B) -> bool {
@@ -2164,7 +2164,7 @@ mod impls {
     #[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
     impl<A: PointeeSized, B: PointeeSized> const PartialEq<&mut B> for &A
     where
-        A: ~const PartialEq<B>,
+        A: [const] PartialEq<B>,
     {
         #[inline]
         fn eq(&self, other: &&mut B) -> bool {
@@ -2180,7 +2180,7 @@ mod impls {
     #[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
     impl<A: PointeeSized, B: PointeeSized> const PartialEq<&B> for &mut A
     where
-        A: ~const PartialEq<B>,
+        A: [const] PartialEq<B>,
     {
         #[inline]
         fn eq(&self, other: &&B) -> bool {
diff --git a/library/core/src/cmp/bytewise.rs b/library/core/src/cmp/bytewise.rs
index 7d61c9345ec..a06a6e8b69a 100644
--- a/library/core/src/cmp/bytewise.rs
+++ b/library/core/src/cmp/bytewise.rs
@@ -19,7 +19,7 @@ use crate::num::NonZero;
 #[rustc_specialization_trait]
 #[const_trait]
 pub(crate) unsafe trait BytewiseEq<Rhs = Self>:
-    ~const PartialEq<Rhs> + Sized
+    [const] PartialEq<Rhs> + Sized
 {
 }
 
diff --git a/library/core/src/convert/mod.rs b/library/core/src/convert/mod.rs
index 220a24caf09..0c3034c3d4c 100644
--- a/library/core/src/convert/mod.rs
+++ b/library/core/src/convert/mod.rs
@@ -717,7 +717,7 @@ pub trait TryFrom<T>: Sized {
 #[rustc_const_unstable(feature = "const_try", issue = "74935")]
 impl<T: PointeeSized, U: PointeeSized> const AsRef<U> for &T
 where
-    T: ~const AsRef<U>,
+    T: [const] AsRef<U>,
 {
     #[inline]
     fn as_ref(&self) -> &U {
@@ -730,7 +730,7 @@ where
 #[rustc_const_unstable(feature = "const_try", issue = "74935")]
 impl<T: PointeeSized, U: PointeeSized> const AsRef<U> for &mut T
 where
-    T: ~const AsRef<U>,
+    T: [const] AsRef<U>,
 {
     #[inline]
     fn as_ref(&self) -> &U {
@@ -751,7 +751,7 @@ where
 #[rustc_const_unstable(feature = "const_try", issue = "74935")]
 impl<T: PointeeSized, U: PointeeSized> const AsMut<U> for &mut T
 where
-    T: ~const AsMut<U>,
+    T: [const] AsMut<U>,
 {
     #[inline]
     fn as_mut(&mut self) -> &mut U {
@@ -772,7 +772,7 @@ where
 #[rustc_const_unstable(feature = "const_from", issue = "143773")]
 impl<T, U> const Into<U> for T
 where
-    U: ~const From<T>,
+    U: [const] From<T>,
 {
     /// Calls `U::from(self)`.
     ///
@@ -816,7 +816,7 @@ impl<T> const From<!> for T {
 #[rustc_const_unstable(feature = "const_from", issue = "143773")]
 impl<T, U> const TryInto<U> for T
 where
-    U: ~const TryFrom<T>,
+    U: [const] TryFrom<T>,
 {
     type Error = U::Error;
 
@@ -832,7 +832,7 @@ where
 #[rustc_const_unstable(feature = "const_from", issue = "143773")]
 impl<T, U> const TryFrom<U> for T
 where
-    U: ~const Into<T>,
+    U: [const] Into<T>,
 {
     type Error = Infallible;
 
diff --git a/library/core/src/ffi/c_str.rs b/library/core/src/ffi/c_str.rs
index c43f3834630..e6b599fafcf 100644
--- a/library/core/src/ffi/c_str.rs
+++ b/library/core/src/ffi/c_str.rs
@@ -652,7 +652,7 @@ impl CStr {
     }
 }
 
-#[stable(feature = "c_string_eq_c_str", since = "CURRENT_RUSTC_VERSION")]
+#[stable(feature = "c_string_eq_c_str", since = "1.90.0")]
 impl PartialEq<&Self> for CStr {
     #[inline]
     fn eq(&self, other: &&Self) -> bool {
diff --git a/library/core/src/internal_macros.rs b/library/core/src/internal_macros.rs
index 2aaefba2468..f90818c7969 100644
--- a/library/core/src/internal_macros.rs
+++ b/library/core/src/internal_macros.rs
@@ -1,13 +1,9 @@
 // implements the unary operator "op &T"
 // based on "op T" where T is expected to be `Copy`able
 macro_rules! forward_ref_unop {
-    (impl $imp:ident, $method:ident for $t:ty) => {
-        forward_ref_unop!(impl $imp, $method for $t,
-                #[stable(feature = "rust1", since = "1.0.0")]);
-    };
-    (impl $imp:ident, $method:ident for $t:ty, #[$attr:meta]) => {
-        #[$attr]
-        impl $imp for &$t {
+    (impl $imp:ident, $method:ident for $t:ty, $(#[$attr:meta])+) => {
+        $(#[$attr])+
+        impl const $imp for &$t {
             type Output = <$t as $imp>::Output;
 
             #[inline]
@@ -21,13 +17,9 @@ macro_rules! forward_ref_unop {
 // implements binary operators "&T op U", "T op &U", "&T op &U"
 // based on "T op U" where T and U are expected to be `Copy`able
 macro_rules! forward_ref_binop {
-    (impl $imp:ident, $method:ident for $t:ty, $u:ty) => {
-        forward_ref_binop!(impl $imp, $method for $t, $u,
-                #[stable(feature = "rust1", since = "1.0.0")]);
-    };
-    (impl $imp:ident, $method:ident for $t:ty, $u:ty, #[$attr:meta]) => {
-        #[$attr]
-        impl<'a> $imp<$u> for &'a $t {
+    (impl $imp:ident, $method:ident for $t:ty, $u:ty, $(#[$attr:meta])+) => {
+        $(#[$attr])+
+        impl const $imp<$u> for &$t {
             type Output = <$t as $imp<$u>>::Output;
 
             #[inline]
@@ -37,8 +29,8 @@ macro_rules! forward_ref_binop {
             }
         }
 
-        #[$attr]
-        impl $imp<&$u> for $t {
+        $(#[$attr])+
+        impl const $imp<&$u> for $t {
             type Output = <$t as $imp<$u>>::Output;
 
             #[inline]
@@ -48,8 +40,8 @@ macro_rules! forward_ref_binop {
             }
         }
 
-        #[$attr]
-        impl $imp<&$u> for &$t {
+        $(#[$attr])+
+        impl const $imp<&$u> for &$t {
             type Output = <$t as $imp<$u>>::Output;
 
             #[inline]
@@ -64,13 +56,9 @@ macro_rules! forward_ref_binop {
 // implements "T op= &U", based on "T op= U"
 // where U is expected to be `Copy`able
 macro_rules! forward_ref_op_assign {
-    (impl $imp:ident, $method:ident for $t:ty, $u:ty) => {
-        forward_ref_op_assign!(impl $imp, $method for $t, $u,
-                #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]);
-    };
-    (impl $imp:ident, $method:ident for $t:ty, $u:ty, #[$attr:meta]) => {
-        #[$attr]
-        impl $imp<&$u> for $t {
+    (impl $imp:ident, $method:ident for $t:ty, $u:ty, $(#[$attr:meta])+) => {
+        $(#[$attr])+
+        impl const $imp<&$u> for $t {
             #[inline]
             #[track_caller]
             fn $method(&mut self, other: &$u) {
diff --git a/library/core/src/intrinsics/mod.rs b/library/core/src/intrinsics/mod.rs
index 106cc725fee..7228ad0ed6d 100644
--- a/library/core/src/intrinsics/mod.rs
+++ b/library/core/src/intrinsics/mod.rs
@@ -150,69 +150,63 @@ pub unsafe fn atomic_xchg<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src:
 
 /// Adds to the current value, returning the previous value.
 /// `T` must be an integer or pointer type.
-/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
-/// value stored at `*dst` will have the provenance of the old value stored there.
+/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
 ///
 /// The stabilized version of this intrinsic is available on the
 /// [`atomic`] types via the `fetch_add` method. For example, [`AtomicIsize::fetch_add`].
 #[rustc_intrinsic]
 #[rustc_nounwind]
-pub unsafe fn atomic_xadd<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
+pub unsafe fn atomic_xadd<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
 
 /// Subtract from the current value, returning the previous value.
 /// `T` must be an integer or pointer type.
-/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
-/// value stored at `*dst` will have the provenance of the old value stored there.
+/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
 ///
 /// The stabilized version of this intrinsic is available on the
 /// [`atomic`] types via the `fetch_sub` method. For example, [`AtomicIsize::fetch_sub`].
 #[rustc_intrinsic]
 #[rustc_nounwind]
-pub unsafe fn atomic_xsub<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
+pub unsafe fn atomic_xsub<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
 
 /// Bitwise and with the current value, returning the previous value.
 /// `T` must be an integer or pointer type.
-/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
-/// value stored at `*dst` will have the provenance of the old value stored there.
+/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
 ///
 /// The stabilized version of this intrinsic is available on the
 /// [`atomic`] types via the `fetch_and` method. For example, [`AtomicBool::fetch_and`].
 #[rustc_intrinsic]
 #[rustc_nounwind]
-pub unsafe fn atomic_and<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
+pub unsafe fn atomic_and<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
 
 /// Bitwise nand with the current value, returning the previous value.
 /// `T` must be an integer or pointer type.
-/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
-/// value stored at `*dst` will have the provenance of the old value stored there.
+/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
 ///
 /// The stabilized version of this intrinsic is available on the
 /// [`AtomicBool`] type via the `fetch_nand` method. For example, [`AtomicBool::fetch_nand`].
 #[rustc_intrinsic]
 #[rustc_nounwind]
-pub unsafe fn atomic_nand<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
+pub unsafe fn atomic_nand<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
 
 /// Bitwise or with the current value, returning the previous value.
 /// `T` must be an integer or pointer type.
-/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
-/// value stored at `*dst` will have the provenance of the old value stored there.
+/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
 ///
 /// The stabilized version of this intrinsic is available on the
 /// [`atomic`] types via the `fetch_or` method. For example, [`AtomicBool::fetch_or`].
 #[rustc_intrinsic]
 #[rustc_nounwind]
-pub unsafe fn atomic_or<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
+pub unsafe fn atomic_or<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
 
 /// Bitwise xor with the current value, returning the previous value.
 /// `T` must be an integer or pointer type.
-/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
-/// value stored at `*dst` will have the provenance of the old value stored there.
+/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
 ///
 /// The stabilized version of this intrinsic is available on the
 /// [`atomic`] types via the `fetch_xor` method. For example, [`AtomicBool::fetch_xor`].
 #[rustc_intrinsic]
 #[rustc_nounwind]
-pub unsafe fn atomic_xor<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
+pub unsafe fn atomic_xor<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
 
 /// Maximum with the current value using a signed comparison.
 /// `T` must be a signed integer type.
@@ -1828,7 +1822,7 @@ pub const fn three_way_compare<T: Copy>(lhs: T, rhss: T) -> crate::cmp::Ordering
 #[rustc_intrinsic]
 #[track_caller]
 #[miri::intrinsic_fallback_is_spec] // the fallbacks all `assume` to tell Miri
-pub const unsafe fn disjoint_bitor<T: ~const fallback::DisjointBitOr>(a: T, b: T) -> T {
+pub const unsafe fn disjoint_bitor<T: [const] fallback::DisjointBitOr>(a: T, b: T) -> T {
     // SAFETY: same preconditions as this function.
     unsafe { fallback::DisjointBitOr::disjoint_bitor(a, b) }
 }
@@ -1897,7 +1891,7 @@ pub const fn mul_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
 #[rustc_nounwind]
 #[rustc_intrinsic]
 #[miri::intrinsic_fallback_is_spec]
-pub const fn carrying_mul_add<T: ~const fallback::CarryingMulAdd<Unsigned = U>, U>(
+pub const fn carrying_mul_add<T: [const] fallback::CarryingMulAdd<Unsigned = U>, U>(
     multiplier: T,
     multiplicand: T,
     addend: T,
@@ -2667,7 +2661,7 @@ pub unsafe fn vtable_align(ptr: *const ()) -> usize;
 /// More specifically, this is the offset in bytes between successive
 /// items of the same type, including alignment padding.
 ///
-/// The stabilized version of this intrinsic is [`size_of`].
+/// The stabilized version of this intrinsic is [`core::mem::size_of`].
 #[rustc_nounwind]
 #[unstable(feature = "core_intrinsics", issue = "none")]
 #[rustc_intrinsic_const_stable_indirect]
@@ -2681,7 +2675,7 @@ pub const fn size_of<T>() -> usize;
 /// Therefore, implementations must not require the user to uphold
 /// any safety invariants.
 ///
-/// The stabilized version of this intrinsic is [`align_of`].
+/// The stabilized version of this intrinsic is [`core::mem::align_of`].
 #[rustc_nounwind]
 #[unstable(feature = "core_intrinsics", issue = "none")]
 #[rustc_intrinsic_const_stable_indirect]
@@ -2704,7 +2698,7 @@ pub const fn variant_count<T>() -> usize;
 
 /// The size of the referenced value in bytes.
 ///
-/// The stabilized version of this intrinsic is [`size_of_val`].
+/// The stabilized version of this intrinsic is [`core::mem::size_of_val`].
 ///
 /// # Safety
 ///
@@ -2717,7 +2711,7 @@ pub const unsafe fn size_of_val<T: ?Sized>(ptr: *const T) -> usize;
 
 /// The required alignment of the referenced value.
 ///
-/// The stabilized version of this intrinsic is [`align_of_val`].
+/// The stabilized version of this intrinsic is [`core::mem::align_of_val`].
 ///
 /// # Safety
 ///
diff --git a/library/core/src/iter/adapters/map_windows.rs b/library/core/src/iter/adapters/map_windows.rs
index a9c07fee2a9..0dada9eb6aa 100644
--- a/library/core/src/iter/adapters/map_windows.rs
+++ b/library/core/src/iter/adapters/map_windows.rs
@@ -195,7 +195,7 @@ impl<T, const N: usize> Buffer<T, N> {
 
         // SAFETY: the index is valid and this is element `a` in the
         // diagram above and has not been dropped yet.
-        unsafe { ptr::drop_in_place(to_drop.cast::<T>()) };
+        unsafe { ptr::drop_in_place(to_drop.cast_init()) };
     }
 }
 
diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs
index 29313867ff2..7fb162a653f 100644
--- a/library/core/src/iter/traits/iterator.rs
+++ b/library/core/src/iter/traits/iterator.rs
@@ -1875,7 +1875,7 @@ pub trait Iterator {
     /// without giving up ownership of the original iterator,
     /// so you can use the original iterator afterwards.
     ///
-    /// Uses [impl<I: Iterator + ?Sized> Iterator for &mut I { type Item = I::Item; ...}](https://doc.rust-lang.org/nightly/std/iter/trait.Iterator.html#impl-Iterator-for-%26mut+I).
+    /// Uses [`impl<I: Iterator + ?Sized> Iterator for &mut I { type Item = I::Item; ...}`](https://doc.rust-lang.org/nightly/std/iter/trait.Iterator.html#impl-Iterator-for-%26mut+I).
     ///
     /// # Examples
     ///
diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs
index 3c33f4b1368..d5bce6ad233 100644
--- a/library/core/src/lib.rs
+++ b/library/core/src/lib.rs
@@ -195,6 +195,7 @@
 #![feature(hexagon_target_feature)]
 #![feature(loongarch_target_feature)]
 #![feature(mips_target_feature)]
+#![feature(nvptx_target_feature)]
 #![feature(powerpc_target_feature)]
 #![feature(riscv_target_feature)]
 #![feature(rtm_target_feature)]
diff --git a/library/core/src/net/ip_addr.rs b/library/core/src/net/ip_addr.rs
index 6adeb2aa3fd..87f2110034c 100644
--- a/library/core/src/net/ip_addr.rs
+++ b/library/core/src/net/ip_addr.rs
@@ -2,7 +2,6 @@ use super::display_buffer::DisplayBuffer;
 use crate::cmp::Ordering;
 use crate::fmt::{self, Write};
 use crate::hash::{Hash, Hasher};
-use crate::iter;
 use crate::mem::transmute;
 use crate::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, Not};
 
@@ -2348,20 +2347,24 @@ impl const From<[u16; 8]> for IpAddr {
 }
 
 #[stable(feature = "ip_bitops", since = "1.75.0")]
-impl Not for Ipv4Addr {
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl const Not for Ipv4Addr {
     type Output = Ipv4Addr;
 
     #[inline]
     fn not(mut self) -> Ipv4Addr {
-        for octet in &mut self.octets {
-            *octet = !*octet;
+        let mut idx = 0;
+        while idx < 4 {
+            self.octets[idx] = !self.octets[idx];
+            idx += 1;
         }
         self
     }
 }
 
 #[stable(feature = "ip_bitops", since = "1.75.0")]
-impl Not for &'_ Ipv4Addr {
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl const Not for &'_ Ipv4Addr {
     type Output = Ipv4Addr;
 
     #[inline]
@@ -2371,20 +2374,24 @@ impl Not for &'_ Ipv4Addr {
 }
 
 #[stable(feature = "ip_bitops", since = "1.75.0")]
-impl Not for Ipv6Addr {
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl const Not for Ipv6Addr {
     type Output = Ipv6Addr;
 
     #[inline]
     fn not(mut self) -> Ipv6Addr {
-        for octet in &mut self.octets {
-            *octet = !*octet;
+        let mut idx = 0;
+        while idx < 16 {
+            self.octets[idx] = !self.octets[idx];
+            idx += 1;
         }
         self
     }
 }
 
 #[stable(feature = "ip_bitops", since = "1.75.0")]
-impl Not for &'_ Ipv6Addr {
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl const Not for &'_ Ipv6Addr {
     type Output = Ipv6Addr;
 
     #[inline]
@@ -2400,23 +2407,25 @@ macro_rules! bitop_impls {
     )*) => {
         $(
             $(#[$attr])*
-            impl $BitOpAssign for $ty {
+            impl const $BitOpAssign for $ty {
                 fn $bitop_assign(&mut self, rhs: $ty) {
-                    for (lhs, rhs) in iter::zip(&mut self.octets, rhs.octets) {
-                        lhs.$bitop_assign(rhs);
+                    let mut idx = 0;
+                    while idx < self.octets.len() {
+                        self.octets[idx].$bitop_assign(rhs.octets[idx]);
+                        idx += 1;
                     }
                 }
             }
 
             $(#[$attr])*
-            impl $BitOpAssign<&'_ $ty> for $ty {
+            impl const $BitOpAssign<&'_ $ty> for $ty {
                 fn $bitop_assign(&mut self, rhs: &'_ $ty) {
                     self.$bitop_assign(*rhs);
                 }
             }
 
             $(#[$attr])*
-            impl $BitOp for $ty {
+            impl const $BitOp for $ty {
                 type Output = $ty;
 
                 #[inline]
@@ -2427,7 +2436,7 @@ macro_rules! bitop_impls {
             }
 
             $(#[$attr])*
-            impl $BitOp<&'_ $ty> for $ty {
+            impl const $BitOp<&'_ $ty> for $ty {
                 type Output = $ty;
 
                 #[inline]
@@ -2438,7 +2447,7 @@ macro_rules! bitop_impls {
             }
 
             $(#[$attr])*
-            impl $BitOp<$ty> for &'_ $ty {
+            impl const $BitOp<$ty> for &'_ $ty {
                 type Output = $ty;
 
                 #[inline]
@@ -2450,7 +2459,7 @@ macro_rules! bitop_impls {
             }
 
             $(#[$attr])*
-            impl $BitOp<&'_ $ty> for &'_ $ty {
+            impl const $BitOp<&'_ $ty> for &'_ $ty {
                 type Output = $ty;
 
                 #[inline]
@@ -2466,12 +2475,16 @@ macro_rules! bitop_impls {
 
 bitop_impls! {
     #[stable(feature = "ip_bitops", since = "1.75.0")]
+    #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
     impl (BitAnd, BitAndAssign) for Ipv4Addr = (bitand, bitand_assign);
     #[stable(feature = "ip_bitops", since = "1.75.0")]
+    #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
     impl (BitOr, BitOrAssign) for Ipv4Addr = (bitor, bitor_assign);
 
     #[stable(feature = "ip_bitops", since = "1.75.0")]
+    #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
     impl (BitAnd, BitAndAssign) for Ipv6Addr = (bitand, bitand_assign);
     #[stable(feature = "ip_bitops", since = "1.75.0")]
+    #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
     impl (BitOr, BitOrAssign) for Ipv6Addr = (bitor, bitor_assign);
 }
diff --git a/library/core/src/net/socket_addr.rs b/library/core/src/net/socket_addr.rs
index 69924199f99..df99e9b20c2 100644
--- a/library/core/src/net/socket_addr.rs
+++ b/library/core/src/net/socket_addr.rs
@@ -613,7 +613,7 @@ impl const From<SocketAddrV6> for SocketAddr {
 
 #[stable(feature = "addr_from_into_ip", since = "1.17.0")]
 #[rustc_const_unstable(feature = "const_try", issue = "74935")]
-impl<I: ~const Into<IpAddr>> const From<(I, u16)> for SocketAddr {
+impl<I: [const] Into<IpAddr>> const From<(I, u16)> for SocketAddr {
     /// Converts a tuple struct (Into<[`IpAddr`]>, `u16`) into a [`SocketAddr`].
     ///
     /// This conversion creates a [`SocketAddr::V4`] for an [`IpAddr::V4`]
diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs
index 5683d5ec92d..bd2f7445612 100644
--- a/library/core/src/num/int_macros.rs
+++ b/library/core/src/num/int_macros.rs
@@ -177,14 +177,14 @@ macro_rules! int_impl {
         ///
         #[doc = concat!("let n: ", stringify!($SelfT), " = 0b_01100100;")]
         ///
-        /// assert_eq!(n.isolate_most_significant_one(), 0b_01000000);
-        #[doc = concat!("assert_eq!(0_", stringify!($SelfT), ".isolate_most_significant_one(), 0);")]
+        /// assert_eq!(n.isolate_highest_one(), 0b_01000000);
+        #[doc = concat!("assert_eq!(0_", stringify!($SelfT), ".isolate_highest_one(), 0);")]
         /// ```
         #[unstable(feature = "isolate_most_least_significant_one", issue = "136909")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline(always)]
-        pub const fn isolate_most_significant_one(self) -> Self {
+        pub const fn isolate_highest_one(self) -> Self {
             self & (((1 as $SelfT) << (<$SelfT>::BITS - 1)).wrapping_shr(self.leading_zeros()))
         }
 
@@ -198,14 +198,14 @@ macro_rules! int_impl {
         ///
         #[doc = concat!("let n: ", stringify!($SelfT), " = 0b_01100100;")]
         ///
-        /// assert_eq!(n.isolate_least_significant_one(), 0b_00000100);
-        #[doc = concat!("assert_eq!(0_", stringify!($SelfT), ".isolate_least_significant_one(), 0);")]
+        /// assert_eq!(n.isolate_lowest_one(), 0b_00000100);
+        #[doc = concat!("assert_eq!(0_", stringify!($SelfT), ".isolate_lowest_one(), 0);")]
         /// ```
         #[unstable(feature = "isolate_most_least_significant_one", issue = "136909")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline(always)]
-        pub const fn isolate_least_significant_one(self) -> Self {
+        pub const fn isolate_lowest_one(self) -> Self {
             self & self.wrapping_neg()
         }
 
@@ -469,17 +469,16 @@ macro_rules! int_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).strict_add(1), ", stringify!($SelfT), "::MAX - 1);")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = (", stringify!($SelfT), "::MAX - 2).strict_add(3);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -560,17 +559,16 @@ macro_rules! int_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".strict_add_unsigned(2), 3);")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = (", stringify!($SelfT), "::MAX - 2).strict_add_unsigned(3);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -611,17 +609,16 @@ macro_rules! int_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN + 2).strict_sub(1), ", stringify!($SelfT), "::MIN + 1);")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = (", stringify!($SelfT), "::MIN + 2).strict_sub(3);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -702,17 +699,16 @@ macro_rules! int_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".strict_sub_unsigned(2), -1);")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = (", stringify!($SelfT), "::MIN + 2).strict_sub_unsigned(3);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -753,17 +749,16 @@ macro_rules! int_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.strict_mul(1), ", stringify!($SelfT), "::MAX);")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ``` should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = ", stringify!($SelfT), "::MAX.strict_mul(2);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -855,24 +850,22 @@ macro_rules! int_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN + 1).strict_div(-1), ", stringify!($Max), ");")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = ", stringify!($SelfT), "::MIN.strict_div(-1);")]
         /// ```
         ///
         /// The following panics because of division by zero:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = (1", stringify!($SelfT), ").strict_div(0);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -924,24 +917,22 @@ macro_rules! int_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN + 1).strict_div_euclid(-1), ", stringify!($Max), ");")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = ", stringify!($SelfT), "::MIN.strict_div_euclid(-1);")]
         /// ```
         ///
         /// The following panics because of division by zero:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = (1", stringify!($SelfT), ").strict_div_euclid(0);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -1092,24 +1083,22 @@ macro_rules! int_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".strict_rem(2), 1);")]
         /// ```
         ///
         /// The following panics because of division by zero:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = 5", stringify!($SelfT), ".strict_rem(0);")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = ", stringify!($SelfT), "::MIN.strict_rem(-1);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -1160,24 +1149,22 @@ macro_rules! int_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".strict_rem_euclid(2), 1);")]
         /// ```
         ///
         /// The following panics because of division by zero:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = 5", stringify!($SelfT), ".strict_rem_euclid(0);")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = ", stringify!($SelfT), "::MIN.strict_rem_euclid(-1);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -1249,17 +1236,16 @@ macro_rules! int_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".strict_neg(), -5);")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = ", stringify!($SelfT), "::MIN.strict_neg();")]
         ///
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -1306,17 +1292,16 @@ macro_rules! int_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(0x1", stringify!($SelfT), ".strict_shl(4), 0x10);")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = 0x1", stringify!($SelfT), ".strict_shl(129);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -1422,17 +1407,16 @@ macro_rules! int_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(0x10", stringify!($SelfT), ".strict_shr(4), 0x1);")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = 0x10", stringify!($SelfT), ".strict_shr(128);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -1542,17 +1526,16 @@ macro_rules! int_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!((-5", stringify!($SelfT), ").strict_abs(), 5);")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = ", stringify!($SelfT), "::MIN.strict_abs();")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -1612,17 +1595,16 @@ macro_rules! int_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(8", stringify!($SelfT), ".strict_pow(2), 64);")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = ", stringify!($SelfT), "::MAX.strict_pow(2);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs
index f793602de50..308d722f5d5 100644
--- a/library/core/src/num/nonzero.rs
+++ b/library/core/src/num/nonzero.rs
@@ -203,7 +203,7 @@ impl<T> Copy for NonZero<T> where T: ZeroablePrimitive {}
 #[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
 impl<T> const PartialEq for NonZero<T>
 where
-    T: ZeroablePrimitive + ~const PartialEq,
+    T: ZeroablePrimitive + [const] PartialEq,
 {
     #[inline]
     fn eq(&self, other: &Self) -> bool {
@@ -310,9 +310,10 @@ where
 }
 
 #[stable(feature = "nonzero_bitor", since = "1.45.0")]
-impl<T> BitOr for NonZero<T>
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl<T> const BitOr for NonZero<T>
 where
-    T: ZeroablePrimitive + BitOr<Output = T>,
+    T: ZeroablePrimitive + [const] BitOr<Output = T>,
 {
     type Output = Self;
 
@@ -324,9 +325,10 @@ where
 }
 
 #[stable(feature = "nonzero_bitor", since = "1.45.0")]
-impl<T> BitOr<T> for NonZero<T>
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl<T> const BitOr<T> for NonZero<T>
 where
-    T: ZeroablePrimitive + BitOr<Output = T>,
+    T: ZeroablePrimitive + [const] BitOr<Output = T>,
 {
     type Output = Self;
 
@@ -338,9 +340,10 @@ where
 }
 
 #[stable(feature = "nonzero_bitor", since = "1.45.0")]
-impl<T> BitOr<NonZero<T>> for T
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl<T> const BitOr<NonZero<T>> for T
 where
-    T: ZeroablePrimitive + BitOr<Output = T>,
+    T: ZeroablePrimitive + [const] BitOr<Output = T>,
 {
     type Output = NonZero<T>;
 
@@ -352,10 +355,11 @@ where
 }
 
 #[stable(feature = "nonzero_bitor", since = "1.45.0")]
-impl<T> BitOrAssign for NonZero<T>
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl<T> const BitOrAssign for NonZero<T>
 where
     T: ZeroablePrimitive,
-    Self: BitOr<Output = Self>,
+    Self: [const] BitOr<Output = Self>,
 {
     #[inline]
     fn bitor_assign(&mut self, rhs: Self) {
@@ -364,10 +368,11 @@ where
 }
 
 #[stable(feature = "nonzero_bitor", since = "1.45.0")]
-impl<T> BitOrAssign<T> for NonZero<T>
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl<T> const BitOrAssign<T> for NonZero<T>
 where
     T: ZeroablePrimitive,
-    Self: BitOr<T, Output = Self>,
+    Self: [const] BitOr<T, Output = Self>,
 {
     #[inline]
     fn bitor_assign(&mut self, rhs: T) {
@@ -629,7 +634,7 @@ macro_rules! nonzero_integer {
             #[doc = concat!("let a = NonZero::<", stringify!($Int), ">::new(0b_01100100)?;")]
             #[doc = concat!("let b = NonZero::<", stringify!($Int), ">::new(0b_01000000)?;")]
             ///
-            /// assert_eq!(a.isolate_most_significant_one(), b);
+            /// assert_eq!(a.isolate_highest_one(), b);
             /// # Some(())
             /// # }
             /// ```
@@ -637,7 +642,7 @@ macro_rules! nonzero_integer {
             #[must_use = "this returns the result of the operation, \
                         without modifying the original"]
             #[inline(always)]
-            pub const fn isolate_most_significant_one(self) -> Self {
+            pub const fn isolate_highest_one(self) -> Self {
                 let n = self.get() & (((1 as $Int) << (<$Int>::BITS - 1)).wrapping_shr(self.leading_zeros()));
 
                 // SAFETY:
@@ -659,7 +664,7 @@ macro_rules! nonzero_integer {
             #[doc = concat!("let a = NonZero::<", stringify!($Int), ">::new(0b_01100100)?;")]
             #[doc = concat!("let b = NonZero::<", stringify!($Int), ">::new(0b_00000100)?;")]
             ///
-            /// assert_eq!(a.isolate_least_significant_one(), b);
+            /// assert_eq!(a.isolate_lowest_one(), b);
             /// # Some(())
             /// # }
             /// ```
@@ -667,7 +672,7 @@ macro_rules! nonzero_integer {
             #[must_use = "this returns the result of the operation, \
                         without modifying the original"]
             #[inline(always)]
-            pub const fn isolate_least_significant_one(self) -> Self {
+            pub const fn isolate_lowest_one(self) -> Self {
                 let n = self.get();
                 let n = n & n.wrapping_neg();
 
@@ -1239,7 +1244,8 @@ macro_rules! nonzero_integer_signedness_dependent_impls {
     // Impls for unsigned nonzero types only.
     (unsigned $Int:ty) => {
         #[stable(feature = "nonzero_div", since = "1.51.0")]
-        impl Div<NonZero<$Int>> for $Int {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Div<NonZero<$Int>> for $Int {
             type Output = $Int;
 
             /// Same as `self / other.get()`, but because `other` is a `NonZero<_>`,
@@ -1257,7 +1263,8 @@ macro_rules! nonzero_integer_signedness_dependent_impls {
         }
 
         #[stable(feature = "nonzero_div_assign", since = "1.79.0")]
-        impl DivAssign<NonZero<$Int>> for $Int {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const DivAssign<NonZero<$Int>> for $Int {
             /// Same as `self /= other.get()`, but because `other` is a `NonZero<_>`,
             /// there's never a runtime check for division-by-zero.
             ///
@@ -1270,7 +1277,8 @@ macro_rules! nonzero_integer_signedness_dependent_impls {
         }
 
         #[stable(feature = "nonzero_div", since = "1.51.0")]
-        impl Rem<NonZero<$Int>> for $Int {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Rem<NonZero<$Int>> for $Int {
             type Output = $Int;
 
             /// This operation satisfies `n % d == n - (n / d) * d`, and cannot panic.
@@ -1283,7 +1291,8 @@ macro_rules! nonzero_integer_signedness_dependent_impls {
         }
 
         #[stable(feature = "nonzero_div_assign", since = "1.79.0")]
-        impl RemAssign<NonZero<$Int>> for $Int {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const RemAssign<NonZero<$Int>> for $Int {
             /// This operation satisfies `n % d == n - (n / d) * d`, and cannot panic.
             #[inline]
             fn rem_assign(&mut self, other: NonZero<$Int>) {
@@ -1323,7 +1332,8 @@ macro_rules! nonzero_integer_signedness_dependent_impls {
     // Impls for signed nonzero types only.
     (signed $Int:ty) => {
         #[stable(feature = "signed_nonzero_neg", since = "1.71.0")]
-        impl Neg for NonZero<$Int> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Neg for NonZero<$Int> {
             type Output = Self;
 
             #[inline]
@@ -1334,7 +1344,8 @@ macro_rules! nonzero_integer_signedness_dependent_impls {
         }
 
         forward_ref_unop! { impl Neg, neg for NonZero<$Int>,
-        #[stable(feature = "signed_nonzero_neg", since = "1.71.0")] }
+        #[stable(feature = "signed_nonzero_neg", since = "1.71.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     };
 }
 
diff --git a/library/core/src/num/saturating.rs b/library/core/src/num/saturating.rs
index 4460e430aec..c7040721b93 100644
--- a/library/core/src/num/saturating.rs
+++ b/library/core/src/num/saturating.rs
@@ -109,7 +109,8 @@ impl<T: fmt::UpperHex> fmt::UpperHex for Saturating<T> {
 //         //         *self = *self << other;
 //         //     }
 //         // }
-//         // forward_ref_op_assign! { impl ShlAssign, shl_assign for Saturating<$t>, $f }
+//         // forward_ref_op_assign! { impl ShlAssign, shl_assign for Saturating<$t>, $f,
+//         // #[unstable(feature = "saturating_int_impl", issue = "87920")] }
 //
 //         #[unstable(feature = "saturating_int_impl", issue = "87920")]
 //         impl Shr<$f> for Saturating<$t> {
@@ -134,7 +135,8 @@ impl<T: fmt::UpperHex> fmt::UpperHex for Saturating<T> {
 //                 *self = *self >> other;
 //             }
 //         }
-//         forward_ref_op_assign! { impl ShrAssign, shr_assign for Saturating<$t>, $f }
+//         forward_ref_op_assign! { impl ShrAssign, shr_assign for Saturating<$t>, $f,
+//         #[unstable(feature = "saturating_int_impl", issue = "87920")] }
 //     };
 // }
 //
@@ -159,7 +161,8 @@ impl<T: fmt::UpperHex> fmt::UpperHex for Saturating<T> {
 //                 *self = *self << other;
 //             }
 //         }
-//         forward_ref_op_assign! { impl ShlAssign, shl_assign for Saturating<$t>, $f }
+//         forward_ref_op_assign! { impl ShlAssign, shl_assign for Saturating<$t>, $f,
+//         #[unstable(feature = "saturating_int_impl", issue = "87920")] }
 //
 //         #[unstable(feature = "saturating_int_impl", issue = "87920")]
 //         impl Shr<$f> for Saturating<$t> {
@@ -180,7 +183,8 @@ impl<T: fmt::UpperHex> fmt::UpperHex for Saturating<T> {
 //                 *self = *self >> other;
 //             }
 //         }
-//         forward_ref_op_assign! { impl ShrAssign, shr_assign for Saturating<$t>, $f }
+//         forward_ref_op_assign! { impl ShrAssign, shr_assign for Saturating<$t>, $f,
+//         #[unstable(feature = "saturating_int_impl", issue = "87920")] }
 //     };
 // }
 //
@@ -209,7 +213,8 @@ impl<T: fmt::UpperHex> fmt::UpperHex for Saturating<T> {
 macro_rules! saturating_impl {
     ($($t:ty)*) => ($(
         #[stable(feature = "saturating_int_impl", since = "1.74.0")]
-        impl Add for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Add for Saturating<$t> {
             type Output = Saturating<$t>;
 
             #[inline]
@@ -218,28 +223,36 @@ macro_rules! saturating_impl {
             }
         }
         forward_ref_binop! { impl Add, add for Saturating<$t>, Saturating<$t>,
-                #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_impl", since = "1.74.0")]
-        impl AddAssign for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const AddAssign for Saturating<$t> {
             #[inline]
             fn add_assign(&mut self, other: Saturating<$t>) {
                 *self = *self + other;
             }
         }
-        forward_ref_op_assign! { impl AddAssign, add_assign for Saturating<$t>, Saturating<$t> }
+        forward_ref_op_assign! { impl AddAssign, add_assign for Saturating<$t>, Saturating<$t>,
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_assign_impl", since = "1.74.0")]
-        impl AddAssign<$t> for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const AddAssign<$t> for Saturating<$t> {
             #[inline]
             fn add_assign(&mut self, other: $t) {
                 *self = *self + Saturating(other);
             }
         }
-        forward_ref_op_assign! { impl AddAssign, add_assign for Saturating<$t>, $t }
+        forward_ref_op_assign! { impl AddAssign, add_assign for Saturating<$t>, $t,
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_impl", since = "1.74.0")]
-        impl Sub for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Sub for Saturating<$t> {
             type Output = Saturating<$t>;
 
             #[inline]
@@ -248,28 +261,36 @@ macro_rules! saturating_impl {
             }
         }
         forward_ref_binop! { impl Sub, sub for Saturating<$t>, Saturating<$t>,
-                #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_impl", since = "1.74.0")]
-        impl SubAssign for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const SubAssign for Saturating<$t> {
             #[inline]
             fn sub_assign(&mut self, other: Saturating<$t>) {
                 *self = *self - other;
             }
         }
-        forward_ref_op_assign! { impl SubAssign, sub_assign for Saturating<$t>, Saturating<$t> }
+        forward_ref_op_assign! { impl SubAssign, sub_assign for Saturating<$t>, Saturating<$t>,
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_assign_impl", since = "1.74.0")]
-        impl SubAssign<$t> for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const SubAssign<$t> for Saturating<$t> {
             #[inline]
             fn sub_assign(&mut self, other: $t) {
                 *self = *self - Saturating(other);
             }
         }
-        forward_ref_op_assign! { impl SubAssign, sub_assign for Saturating<$t>, $t }
+        forward_ref_op_assign! { impl SubAssign, sub_assign for Saturating<$t>, $t,
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_impl", since = "1.74.0")]
-        impl Mul for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Mul for Saturating<$t> {
             type Output = Saturating<$t>;
 
             #[inline]
@@ -278,25 +299,32 @@ macro_rules! saturating_impl {
             }
         }
         forward_ref_binop! { impl Mul, mul for Saturating<$t>, Saturating<$t>,
-                #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_impl", since = "1.74.0")]
-        impl MulAssign for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const MulAssign for Saturating<$t> {
             #[inline]
             fn mul_assign(&mut self, other: Saturating<$t>) {
                 *self = *self * other;
             }
         }
-        forward_ref_op_assign! { impl MulAssign, mul_assign for Saturating<$t>, Saturating<$t> }
+        forward_ref_op_assign! { impl MulAssign, mul_assign for Saturating<$t>, Saturating<$t>,
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_assign_impl", since = "1.74.0")]
-        impl MulAssign<$t> for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const MulAssign<$t> for Saturating<$t> {
             #[inline]
             fn mul_assign(&mut self, other: $t) {
                 *self = *self * Saturating(other);
             }
         }
-        forward_ref_op_assign! { impl MulAssign, mul_assign for Saturating<$t>, $t }
+        forward_ref_op_assign! { impl MulAssign, mul_assign for Saturating<$t>, $t,
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         /// # Examples
         ///
@@ -314,7 +342,8 @@ macro_rules! saturating_impl {
         #[doc = concat!("let _ = Saturating(0", stringify!($t), ") / Saturating(0);")]
         /// ```
         #[stable(feature = "saturating_int_impl", since = "1.74.0")]
-        impl Div for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Div for Saturating<$t> {
             type Output = Saturating<$t>;
 
             #[inline]
@@ -323,29 +352,36 @@ macro_rules! saturating_impl {
             }
         }
         forward_ref_binop! { impl Div, div for Saturating<$t>, Saturating<$t>,
-                #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
-
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_impl", since = "1.74.0")]
-        impl DivAssign for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const DivAssign for Saturating<$t> {
             #[inline]
             fn div_assign(&mut self, other: Saturating<$t>) {
                 *self = *self / other;
             }
         }
-        forward_ref_op_assign! { impl DivAssign, div_assign for Saturating<$t>, Saturating<$t> }
+        forward_ref_op_assign! { impl DivAssign, div_assign for Saturating<$t>, Saturating<$t>,
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_assign_impl", since = "1.74.0")]
-        impl DivAssign<$t> for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const DivAssign<$t> for Saturating<$t> {
             #[inline]
             fn div_assign(&mut self, other: $t) {
                 *self = *self / Saturating(other);
             }
         }
-        forward_ref_op_assign! { impl DivAssign, div_assign for Saturating<$t>, $t }
+        forward_ref_op_assign! { impl DivAssign, div_assign for Saturating<$t>, $t,
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_impl", since = "1.74.0")]
-        impl Rem for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Rem for Saturating<$t> {
             type Output = Saturating<$t>;
 
             #[inline]
@@ -354,28 +390,36 @@ macro_rules! saturating_impl {
             }
         }
         forward_ref_binop! { impl Rem, rem for Saturating<$t>, Saturating<$t>,
-                #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_impl", since = "1.74.0")]
-        impl RemAssign for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const RemAssign for Saturating<$t> {
             #[inline]
             fn rem_assign(&mut self, other: Saturating<$t>) {
                 *self = *self % other;
             }
         }
-        forward_ref_op_assign! { impl RemAssign, rem_assign for Saturating<$t>, Saturating<$t> }
+        forward_ref_op_assign! { impl RemAssign, rem_assign for Saturating<$t>, Saturating<$t>,
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_assign_impl", since = "1.74.0")]
-        impl RemAssign<$t> for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const RemAssign<$t> for Saturating<$t> {
             #[inline]
             fn rem_assign(&mut self, other: $t) {
                 *self = *self % Saturating(other);
             }
         }
-        forward_ref_op_assign! { impl RemAssign, rem_assign for Saturating<$t>, $t }
+        forward_ref_op_assign! { impl RemAssign, rem_assign for Saturating<$t>, $t,
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_impl", since = "1.74.0")]
-        impl Not for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Not for Saturating<$t> {
             type Output = Saturating<$t>;
 
             #[inline]
@@ -384,10 +428,12 @@ macro_rules! saturating_impl {
             }
         }
         forward_ref_unop! { impl Not, not for Saturating<$t>,
-                #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_impl", since = "1.74.0")]
-        impl BitXor for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitXor for Saturating<$t> {
             type Output = Saturating<$t>;
 
             #[inline]
@@ -396,28 +442,36 @@ macro_rules! saturating_impl {
             }
         }
         forward_ref_binop! { impl BitXor, bitxor for Saturating<$t>, Saturating<$t>,
-                #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_impl", since = "1.74.0")]
-        impl BitXorAssign for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitXorAssign for Saturating<$t> {
             #[inline]
             fn bitxor_assign(&mut self, other: Saturating<$t>) {
                 *self = *self ^ other;
             }
         }
-        forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for Saturating<$t>, Saturating<$t> }
+        forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for Saturating<$t>, Saturating<$t>,
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_assign_impl", since = "1.74.0")]
-        impl BitXorAssign<$t> for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitXorAssign<$t> for Saturating<$t> {
             #[inline]
             fn bitxor_assign(&mut self, other: $t) {
                 *self = *self ^ Saturating(other);
             }
         }
-        forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for Saturating<$t>, $t }
+        forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for Saturating<$t>, $t,
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_impl", since = "1.74.0")]
-        impl BitOr for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitOr for Saturating<$t> {
             type Output = Saturating<$t>;
 
             #[inline]
@@ -426,28 +480,36 @@ macro_rules! saturating_impl {
             }
         }
         forward_ref_binop! { impl BitOr, bitor for Saturating<$t>, Saturating<$t>,
-                #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_impl", since = "1.74.0")]
-        impl BitOrAssign for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitOrAssign for Saturating<$t> {
             #[inline]
             fn bitor_assign(&mut self, other: Saturating<$t>) {
                 *self = *self | other;
             }
         }
-        forward_ref_op_assign! { impl BitOrAssign, bitor_assign for Saturating<$t>, Saturating<$t> }
+        forward_ref_op_assign! { impl BitOrAssign, bitor_assign for Saturating<$t>, Saturating<$t>,
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_assign_impl", since = "1.74.0")]
-        impl BitOrAssign<$t> for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitOrAssign<$t> for Saturating<$t> {
             #[inline]
             fn bitor_assign(&mut self, other: $t) {
                 *self = *self | Saturating(other);
             }
         }
-        forward_ref_op_assign! { impl BitOrAssign, bitor_assign for Saturating<$t>, $t }
+        forward_ref_op_assign! { impl BitOrAssign, bitor_assign for Saturating<$t>, $t,
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_impl", since = "1.74.0")]
-        impl BitAnd for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitAnd for Saturating<$t> {
             type Output = Saturating<$t>;
 
             #[inline]
@@ -456,25 +518,32 @@ macro_rules! saturating_impl {
             }
         }
         forward_ref_binop! { impl BitAnd, bitand for Saturating<$t>, Saturating<$t>,
-                #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_impl", since = "1.74.0")]
-        impl BitAndAssign for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitAndAssign for Saturating<$t> {
             #[inline]
             fn bitand_assign(&mut self, other: Saturating<$t>) {
                 *self = *self & other;
             }
         }
-        forward_ref_op_assign! { impl BitAndAssign, bitand_assign for Saturating<$t>, Saturating<$t> }
+        forward_ref_op_assign! { impl BitAndAssign, bitand_assign for Saturating<$t>, Saturating<$t>,
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "saturating_int_assign_impl", since = "1.74.0")]
-        impl BitAndAssign<$t> for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitAndAssign<$t> for Saturating<$t> {
             #[inline]
             fn bitand_assign(&mut self, other: $t) {
                 *self = *self & Saturating(other);
             }
         }
-        forward_ref_op_assign! { impl BitAndAssign, bitand_assign for Saturating<$t>, $t }
+        forward_ref_op_assign! { impl BitAndAssign, bitand_assign for Saturating<$t>, $t,
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
     )*)
 }
@@ -931,7 +1000,8 @@ macro_rules! saturating_int_impl_signed {
         }
 
         #[stable(feature = "saturating_int_impl", since = "1.74.0")]
-        impl Neg for Saturating<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Neg for Saturating<$t> {
             type Output = Self;
             #[inline]
             fn neg(self) -> Self {
@@ -939,7 +1009,8 @@ macro_rules! saturating_int_impl_signed {
             }
         }
         forward_ref_unop! { impl Neg, neg for Saturating<$t>,
-                #[stable(feature = "saturating_int_impl", since = "1.74.0")] }
+        #[stable(feature = "saturating_int_impl", since = "1.74.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )*)
 }
 
diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs
index 584cd60fbe5..2c9a1196849 100644
--- a/library/core/src/num/uint_macros.rs
+++ b/library/core/src/num/uint_macros.rs
@@ -229,14 +229,14 @@ macro_rules! uint_impl {
         ///
         #[doc = concat!("let n: ", stringify!($SelfT), " = 0b_01100100;")]
         ///
-        /// assert_eq!(n.isolate_most_significant_one(), 0b_01000000);
-        #[doc = concat!("assert_eq!(0_", stringify!($SelfT), ".isolate_most_significant_one(), 0);")]
+        /// assert_eq!(n.isolate_highest_one(), 0b_01000000);
+        #[doc = concat!("assert_eq!(0_", stringify!($SelfT), ".isolate_highest_one(), 0);")]
         /// ```
         #[unstable(feature = "isolate_most_least_significant_one", issue = "136909")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline(always)]
-        pub const fn isolate_most_significant_one(self) -> Self {
+        pub const fn isolate_highest_one(self) -> Self {
             self & (((1 as $SelfT) << (<$SelfT>::BITS - 1)).wrapping_shr(self.leading_zeros()))
         }
 
@@ -250,14 +250,14 @@ macro_rules! uint_impl {
         ///
         #[doc = concat!("let n: ", stringify!($SelfT), " = 0b_01100100;")]
         ///
-        /// assert_eq!(n.isolate_least_significant_one(), 0b_00000100);
-        #[doc = concat!("assert_eq!(0_", stringify!($SelfT), ".isolate_least_significant_one(), 0);")]
+        /// assert_eq!(n.isolate_lowest_one(), 0b_00000100);
+        #[doc = concat!("assert_eq!(0_", stringify!($SelfT), ".isolate_lowest_one(), 0);")]
         /// ```
         #[unstable(feature = "isolate_most_least_significant_one", issue = "136909")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline(always)]
-        pub const fn isolate_least_significant_one(self) -> Self {
+        pub const fn isolate_lowest_one(self) -> Self {
             self & self.wrapping_neg()
         }
 
@@ -538,17 +538,16 @@ macro_rules! uint_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).strict_add(1), ", stringify!($SelfT), "::MAX - 1);")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = (", stringify!($SelfT), "::MAX - 2).strict_add(3);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -630,22 +629,20 @@ macro_rules! uint_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".strict_add_signed(2), 3);")]
         /// ```
         ///
         /// The following panic because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = 1", stringify!($SelfT), ".strict_add_signed(-2);")]
         /// ```
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = (", stringify!($SelfT), "::MAX - 2).strict_add_signed(3);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -695,17 +692,16 @@ macro_rules! uint_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".strict_sub(1), 0);")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = 0", stringify!($SelfT), ".strict_sub(1);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -790,8 +786,8 @@ macro_rules! uint_impl {
         #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".checked_sub_signed(-2), Some(3));")]
         #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_sub_signed(-4), None);")]
         /// ```
-        #[stable(feature = "mixed_integer_ops_unsigned_sub", since = "CURRENT_RUSTC_VERSION")]
-        #[rustc_const_stable(feature = "mixed_integer_ops_unsigned_sub", since = "CURRENT_RUSTC_VERSION")]
+        #[stable(feature = "mixed_integer_ops_unsigned_sub", since = "1.90.0")]
+        #[rustc_const_stable(feature = "mixed_integer_ops_unsigned_sub", since = "1.90.0")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -817,22 +813,20 @@ macro_rules! uint_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(3", stringify!($SelfT), ".strict_sub_signed(2), 1);")]
         /// ```
         ///
         /// The following panic because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = 1", stringify!($SelfT), ".strict_sub_signed(2);")]
         /// ```
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = (", stringify!($SelfT), "::MAX).strict_sub_signed(-1);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -850,7 +844,6 @@ macro_rules! uint_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(unsigned_signed_diff)]
         #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".checked_signed_diff(2), Some(8));")]
         #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".checked_signed_diff(10), Some(-8));")]
         #[doc = concat!(
@@ -888,7 +881,8 @@ macro_rules! uint_impl {
             "::MAX), Some(0));"
         )]
         /// ```
-        #[unstable(feature = "unsigned_signed_diff", issue = "126041")]
+        #[stable(feature = "unsigned_signed_diff", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "unsigned_signed_diff", since = "CURRENT_RUSTC_VERSION")]
         #[inline]
         pub const fn checked_signed_diff(self, rhs: Self) -> Option<$SignedT> {
             let res = self.wrapping_sub(rhs) as $SignedT;
@@ -932,17 +926,16 @@ macro_rules! uint_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".strict_mul(1), 5);")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ``` should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = ", stringify!($SelfT), "::MAX.strict_mul(2);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -1029,17 +1022,16 @@ macro_rules! uint_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".strict_div(10), 10);")]
         /// ```
         ///
         /// The following panics because of division by zero:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = (1", stringify!($SelfT), ").strict_div(0);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline(always)]
@@ -1085,16 +1077,15 @@ macro_rules! uint_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".strict_div_euclid(10), 10);")]
         /// ```
         /// The following panics because of division by zero:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = (1", stringify!($SelfT), ").strict_div_euclid(0);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline(always)]
@@ -1239,17 +1230,16 @@ macro_rules! uint_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".strict_rem(10), 0);")]
         /// ```
         ///
         /// The following panics because of division by zero:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = 5", stringify!($SelfT), ".strict_rem(0);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline(always)]
@@ -1296,17 +1286,16 @@ macro_rules! uint_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".strict_rem_euclid(10), 0);")]
         /// ```
         ///
         /// The following panics because of division by zero:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = 5", stringify!($SelfT), ".strict_rem_euclid(0);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline(always)]
@@ -1568,17 +1557,16 @@ macro_rules! uint_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".strict_neg(), 0);")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = 1", stringify!($SelfT), ".strict_neg();")]
         ///
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -1625,17 +1613,16 @@ macro_rules! uint_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(0x1", stringify!($SelfT), ".strict_shl(4), 0x10);")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = 0x10", stringify!($SelfT), ".strict_shl(129);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -1741,17 +1728,16 @@ macro_rules! uint_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(0x10", stringify!($SelfT), ".strict_shr(4), 0x1);")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = 0x10", stringify!($SelfT), ".strict_shr(129);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -1867,17 +1853,16 @@ macro_rules! uint_impl {
         /// # Examples
         ///
         /// ```
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".strict_pow(5), 32);")]
         /// ```
         ///
         /// The following panics because of overflow:
         ///
         /// ```should_panic
-        /// #![feature(strict_overflow_ops)]
         #[doc = concat!("let _ = ", stringify!($SelfT), "::MAX.strict_pow(2);")]
         /// ```
-        #[unstable(feature = "strict_overflow_ops", issue = "118260")]
+        #[stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
+        #[rustc_const_stable(feature = "strict_overflow_ops", since = "CURRENT_RUSTC_VERSION")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -1974,8 +1959,8 @@ macro_rules! uint_impl {
         #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".saturating_sub_signed(-2), 3);")]
         #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).saturating_sub_signed(-4), ", stringify!($SelfT), "::MAX);")]
         /// ```
-        #[stable(feature = "mixed_integer_ops_unsigned_sub", since = "CURRENT_RUSTC_VERSION")]
-        #[rustc_const_stable(feature = "mixed_integer_ops_unsigned_sub", since = "CURRENT_RUSTC_VERSION")]
+        #[stable(feature = "mixed_integer_ops_unsigned_sub", since = "1.90.0")]
+        #[rustc_const_stable(feature = "mixed_integer_ops_unsigned_sub", since = "1.90.0")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -2122,8 +2107,8 @@ macro_rules! uint_impl {
         #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".wrapping_sub_signed(-2), 3);")]
         #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).wrapping_sub_signed(-4), 1);")]
         /// ```
-        #[stable(feature = "mixed_integer_ops_unsigned_sub", since = "CURRENT_RUSTC_VERSION")]
-        #[rustc_const_stable(feature = "mixed_integer_ops_unsigned_sub", since = "CURRENT_RUSTC_VERSION")]
+        #[stable(feature = "mixed_integer_ops_unsigned_sub", since = "1.90.0")]
+        #[rustc_const_stable(feature = "mixed_integer_ops_unsigned_sub", since = "1.90.0")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
@@ -2581,8 +2566,8 @@ macro_rules! uint_impl {
         #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".overflowing_sub_signed(-2), (3, false));")]
         #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).overflowing_sub_signed(-4), (1, true));")]
         /// ```
-        #[stable(feature = "mixed_integer_ops_unsigned_sub", since = "CURRENT_RUSTC_VERSION")]
-        #[rustc_const_stable(feature = "mixed_integer_ops_unsigned_sub", since = "CURRENT_RUSTC_VERSION")]
+        #[stable(feature = "mixed_integer_ops_unsigned_sub", since = "1.90.0")]
+        #[rustc_const_stable(feature = "mixed_integer_ops_unsigned_sub", since = "1.90.0")]
         #[must_use = "this returns the result of the operation, \
                       without modifying the original"]
         #[inline]
diff --git a/library/core/src/num/wrapping.rs b/library/core/src/num/wrapping.rs
index c460f38bd2e..9ccad4b6459 100644
--- a/library/core/src/num/wrapping.rs
+++ b/library/core/src/num/wrapping.rs
@@ -88,7 +88,8 @@ impl<T: fmt::UpperHex> fmt::UpperHex for Wrapping<T> {
 macro_rules! sh_impl_signed {
     ($t:ident, $f:ident) => {
         #[stable(feature = "rust1", since = "1.0.0")]
-        impl Shl<$f> for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Shl<$f> for Wrapping<$t> {
             type Output = Wrapping<$t>;
 
             #[inline]
@@ -101,19 +102,24 @@ macro_rules! sh_impl_signed {
             }
         }
         forward_ref_binop! { impl Shl, shl for Wrapping<$t>, $f,
-        #[stable(feature = "wrapping_ref_ops", since = "1.39.0")] }
+        #[stable(feature = "wrapping_ref_ops", since = "1.39.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl ShlAssign<$f> for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const ShlAssign<$f> for Wrapping<$t> {
             #[inline]
             fn shl_assign(&mut self, other: $f) {
                 *self = *self << other;
             }
         }
-        forward_ref_op_assign! { impl ShlAssign, shl_assign for Wrapping<$t>, $f }
+        forward_ref_op_assign! { impl ShlAssign, shl_assign for Wrapping<$t>, $f,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "rust1", since = "1.0.0")]
-        impl Shr<$f> for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Shr<$f> for Wrapping<$t> {
             type Output = Wrapping<$t>;
 
             #[inline]
@@ -126,23 +132,28 @@ macro_rules! sh_impl_signed {
             }
         }
         forward_ref_binop! { impl Shr, shr for Wrapping<$t>, $f,
-        #[stable(feature = "wrapping_ref_ops", since = "1.39.0")] }
+        #[stable(feature = "wrapping_ref_ops", since = "1.39.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl ShrAssign<$f> for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const ShrAssign<$f> for Wrapping<$t> {
             #[inline]
             fn shr_assign(&mut self, other: $f) {
                 *self = *self >> other;
             }
         }
-        forward_ref_op_assign! { impl ShrAssign, shr_assign for Wrapping<$t>, $f }
+        forward_ref_op_assign! { impl ShrAssign, shr_assign for Wrapping<$t>, $f,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     };
 }
 
 macro_rules! sh_impl_unsigned {
     ($t:ident, $f:ident) => {
         #[stable(feature = "rust1", since = "1.0.0")]
-        impl Shl<$f> for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Shl<$f> for Wrapping<$t> {
             type Output = Wrapping<$t>;
 
             #[inline]
@@ -151,19 +162,24 @@ macro_rules! sh_impl_unsigned {
             }
         }
         forward_ref_binop! { impl Shl, shl for Wrapping<$t>, $f,
-        #[stable(feature = "wrapping_ref_ops", since = "1.39.0")] }
+        #[stable(feature = "wrapping_ref_ops", since = "1.39.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl ShlAssign<$f> for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const ShlAssign<$f> for Wrapping<$t> {
             #[inline]
             fn shl_assign(&mut self, other: $f) {
                 *self = *self << other;
             }
         }
-        forward_ref_op_assign! { impl ShlAssign, shl_assign for Wrapping<$t>, $f }
+        forward_ref_op_assign! { impl ShlAssign, shl_assign for Wrapping<$t>, $f,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "rust1", since = "1.0.0")]
-        impl Shr<$f> for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Shr<$f> for Wrapping<$t> {
             type Output = Wrapping<$t>;
 
             #[inline]
@@ -172,16 +188,20 @@ macro_rules! sh_impl_unsigned {
             }
         }
         forward_ref_binop! { impl Shr, shr for Wrapping<$t>, $f,
-        #[stable(feature = "wrapping_ref_ops", since = "1.39.0")] }
+        #[stable(feature = "wrapping_ref_ops", since = "1.39.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl ShrAssign<$f> for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const ShrAssign<$f> for Wrapping<$t> {
             #[inline]
             fn shr_assign(&mut self, other: $f) {
                 *self = *self >> other;
             }
         }
-        forward_ref_op_assign! { impl ShrAssign, shr_assign for Wrapping<$t>, $f }
+        forward_ref_op_assign! { impl ShrAssign, shr_assign for Wrapping<$t>, $f,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     };
 }
 
@@ -210,7 +230,8 @@ sh_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
 macro_rules! wrapping_impl {
     ($($t:ty)*) => ($(
         #[stable(feature = "rust1", since = "1.0.0")]
-        impl Add for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Add for Wrapping<$t> {
             type Output = Wrapping<$t>;
 
             #[inline]
@@ -219,28 +240,36 @@ macro_rules! wrapping_impl {
             }
         }
         forward_ref_binop! { impl Add, add for Wrapping<$t>, Wrapping<$t>,
-                #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+        #[stable(feature = "wrapping_ref", since = "1.14.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl AddAssign for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const AddAssign for Wrapping<$t> {
             #[inline]
             fn add_assign(&mut self, other: Wrapping<$t>) {
                 *self = *self + other;
             }
         }
-        forward_ref_op_assign! { impl AddAssign, add_assign for Wrapping<$t>, Wrapping<$t> }
+        forward_ref_op_assign! { impl AddAssign, add_assign for Wrapping<$t>, Wrapping<$t>,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "wrapping_int_assign_impl", since = "1.60.0")]
-        impl AddAssign<$t> for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const AddAssign<$t> for Wrapping<$t> {
             #[inline]
             fn add_assign(&mut self, other: $t) {
                 *self = *self + Wrapping(other);
             }
         }
-        forward_ref_op_assign! { impl AddAssign, add_assign for Wrapping<$t>, $t }
+        forward_ref_op_assign! { impl AddAssign, add_assign for Wrapping<$t>, $t,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "rust1", since = "1.0.0")]
-        impl Sub for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Sub for Wrapping<$t> {
             type Output = Wrapping<$t>;
 
             #[inline]
@@ -249,28 +278,36 @@ macro_rules! wrapping_impl {
             }
         }
         forward_ref_binop! { impl Sub, sub for Wrapping<$t>, Wrapping<$t>,
-                #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+        #[stable(feature = "wrapping_ref", since = "1.14.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl SubAssign for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const SubAssign for Wrapping<$t> {
             #[inline]
             fn sub_assign(&mut self, other: Wrapping<$t>) {
                 *self = *self - other;
             }
         }
-        forward_ref_op_assign! { impl SubAssign, sub_assign for Wrapping<$t>, Wrapping<$t> }
+        forward_ref_op_assign! { impl SubAssign, sub_assign for Wrapping<$t>, Wrapping<$t>,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "wrapping_int_assign_impl", since = "1.60.0")]
-        impl SubAssign<$t> for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const SubAssign<$t> for Wrapping<$t> {
             #[inline]
             fn sub_assign(&mut self, other: $t) {
                 *self = *self - Wrapping(other);
             }
         }
-        forward_ref_op_assign! { impl SubAssign, sub_assign for Wrapping<$t>, $t }
+        forward_ref_op_assign! { impl SubAssign, sub_assign for Wrapping<$t>, $t,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "rust1", since = "1.0.0")]
-        impl Mul for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Mul for Wrapping<$t> {
             type Output = Wrapping<$t>;
 
             #[inline]
@@ -279,28 +316,36 @@ macro_rules! wrapping_impl {
             }
         }
         forward_ref_binop! { impl Mul, mul for Wrapping<$t>, Wrapping<$t>,
-                #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+        #[stable(feature = "wrapping_ref", since = "1.14.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl MulAssign for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const MulAssign for Wrapping<$t> {
             #[inline]
             fn mul_assign(&mut self, other: Wrapping<$t>) {
                 *self = *self * other;
             }
         }
-        forward_ref_op_assign! { impl MulAssign, mul_assign for Wrapping<$t>, Wrapping<$t> }
+        forward_ref_op_assign! { impl MulAssign, mul_assign for Wrapping<$t>, Wrapping<$t>,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "wrapping_int_assign_impl", since = "1.60.0")]
-        impl MulAssign<$t> for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const MulAssign<$t> for Wrapping<$t> {
             #[inline]
             fn mul_assign(&mut self, other: $t) {
                 *self = *self * Wrapping(other);
             }
         }
-        forward_ref_op_assign! { impl MulAssign, mul_assign for Wrapping<$t>, $t }
+        forward_ref_op_assign! { impl MulAssign, mul_assign for Wrapping<$t>, $t,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "wrapping_div", since = "1.3.0")]
-        impl Div for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Div for Wrapping<$t> {
             type Output = Wrapping<$t>;
 
             #[inline]
@@ -309,28 +354,36 @@ macro_rules! wrapping_impl {
             }
         }
         forward_ref_binop! { impl Div, div for Wrapping<$t>, Wrapping<$t>,
-                #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+        #[stable(feature = "wrapping_ref", since = "1.14.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl DivAssign for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const DivAssign for Wrapping<$t> {
             #[inline]
             fn div_assign(&mut self, other: Wrapping<$t>) {
                 *self = *self / other;
             }
         }
-        forward_ref_op_assign! { impl DivAssign, div_assign for Wrapping<$t>, Wrapping<$t> }
+        forward_ref_op_assign! { impl DivAssign, div_assign for Wrapping<$t>, Wrapping<$t>,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "wrapping_int_assign_impl", since = "1.60.0")]
-        impl DivAssign<$t> for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const DivAssign<$t> for Wrapping<$t> {
             #[inline]
             fn div_assign(&mut self, other: $t) {
                 *self = *self / Wrapping(other);
             }
         }
-        forward_ref_op_assign! { impl DivAssign, div_assign for Wrapping<$t>, $t }
+        forward_ref_op_assign! { impl DivAssign, div_assign for Wrapping<$t>, $t,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "wrapping_impls", since = "1.7.0")]
-        impl Rem for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Rem for Wrapping<$t> {
             type Output = Wrapping<$t>;
 
             #[inline]
@@ -339,28 +392,36 @@ macro_rules! wrapping_impl {
             }
         }
         forward_ref_binop! { impl Rem, rem for Wrapping<$t>, Wrapping<$t>,
-                #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+        #[stable(feature = "wrapping_ref", since = "1.14.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl RemAssign for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const RemAssign for Wrapping<$t> {
             #[inline]
             fn rem_assign(&mut self, other: Wrapping<$t>) {
                 *self = *self % other;
             }
         }
-        forward_ref_op_assign! { impl RemAssign, rem_assign for Wrapping<$t>, Wrapping<$t> }
+        forward_ref_op_assign! { impl RemAssign, rem_assign for Wrapping<$t>, Wrapping<$t>,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "wrapping_int_assign_impl", since = "1.60.0")]
-        impl RemAssign<$t> for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const RemAssign<$t> for Wrapping<$t> {
             #[inline]
             fn rem_assign(&mut self, other: $t) {
                 *self = *self % Wrapping(other);
             }
         }
-        forward_ref_op_assign! { impl RemAssign, rem_assign for Wrapping<$t>, $t }
+        forward_ref_op_assign! { impl RemAssign, rem_assign for Wrapping<$t>, $t,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "rust1", since = "1.0.0")]
-        impl Not for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Not for Wrapping<$t> {
             type Output = Wrapping<$t>;
 
             #[inline]
@@ -369,10 +430,12 @@ macro_rules! wrapping_impl {
             }
         }
         forward_ref_unop! { impl Not, not for Wrapping<$t>,
-                #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+        #[stable(feature = "wrapping_ref", since = "1.14.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "rust1", since = "1.0.0")]
-        impl BitXor for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitXor for Wrapping<$t> {
             type Output = Wrapping<$t>;
 
             #[inline]
@@ -381,28 +444,36 @@ macro_rules! wrapping_impl {
             }
         }
         forward_ref_binop! { impl BitXor, bitxor for Wrapping<$t>, Wrapping<$t>,
-                #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+        #[stable(feature = "wrapping_ref", since = "1.14.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl BitXorAssign for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitXorAssign for Wrapping<$t> {
             #[inline]
             fn bitxor_assign(&mut self, other: Wrapping<$t>) {
                 *self = *self ^ other;
             }
         }
-        forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for Wrapping<$t>, Wrapping<$t> }
+        forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for Wrapping<$t>, Wrapping<$t>,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "wrapping_int_assign_impl", since = "1.60.0")]
-        impl BitXorAssign<$t> for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitXorAssign<$t> for Wrapping<$t> {
             #[inline]
             fn bitxor_assign(&mut self, other: $t) {
                 *self = *self ^ Wrapping(other);
             }
         }
-        forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for Wrapping<$t>, $t }
+        forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for Wrapping<$t>, $t,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "rust1", since = "1.0.0")]
-        impl BitOr for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitOr for Wrapping<$t> {
             type Output = Wrapping<$t>;
 
             #[inline]
@@ -411,28 +482,36 @@ macro_rules! wrapping_impl {
             }
         }
         forward_ref_binop! { impl BitOr, bitor for Wrapping<$t>, Wrapping<$t>,
-                #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+        #[stable(feature = "wrapping_ref", since = "1.14.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl BitOrAssign for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitOrAssign for Wrapping<$t> {
             #[inline]
             fn bitor_assign(&mut self, other: Wrapping<$t>) {
                 *self = *self | other;
             }
         }
-        forward_ref_op_assign! { impl BitOrAssign, bitor_assign for Wrapping<$t>, Wrapping<$t> }
+        forward_ref_op_assign! { impl BitOrAssign, bitor_assign for Wrapping<$t>, Wrapping<$t>,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "wrapping_int_assign_impl", since = "1.60.0")]
-        impl BitOrAssign<$t> for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitOrAssign<$t> for Wrapping<$t> {
             #[inline]
             fn bitor_assign(&mut self, other: $t) {
                 *self = *self | Wrapping(other);
             }
         }
-        forward_ref_op_assign! { impl BitOrAssign, bitor_assign for Wrapping<$t>, $t }
+        forward_ref_op_assign! { impl BitOrAssign, bitor_assign for Wrapping<$t>, $t,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "rust1", since = "1.0.0")]
-        impl BitAnd for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitAnd for Wrapping<$t> {
             type Output = Wrapping<$t>;
 
             #[inline]
@@ -441,28 +520,36 @@ macro_rules! wrapping_impl {
             }
         }
         forward_ref_binop! { impl BitAnd, bitand for Wrapping<$t>, Wrapping<$t>,
-                #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+        #[stable(feature = "wrapping_ref", since = "1.14.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl BitAndAssign for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitAndAssign for Wrapping<$t> {
             #[inline]
             fn bitand_assign(&mut self, other: Wrapping<$t>) {
                 *self = *self & other;
             }
         }
-        forward_ref_op_assign! { impl BitAndAssign, bitand_assign for Wrapping<$t>, Wrapping<$t> }
+        forward_ref_op_assign! { impl BitAndAssign, bitand_assign for Wrapping<$t>, Wrapping<$t>,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "wrapping_int_assign_impl", since = "1.60.0")]
-        impl BitAndAssign<$t> for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitAndAssign<$t> for Wrapping<$t> {
             #[inline]
             fn bitand_assign(&mut self, other: $t) {
                 *self = *self & Wrapping(other);
             }
         }
-        forward_ref_op_assign! { impl BitAndAssign, bitand_assign for Wrapping<$t>, $t }
+        forward_ref_op_assign! { impl BitAndAssign, bitand_assign for Wrapping<$t>, $t,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
         #[stable(feature = "wrapping_neg", since = "1.10.0")]
-        impl Neg for Wrapping<$t> {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Neg for Wrapping<$t> {
             type Output = Self;
             #[inline]
             fn neg(self) -> Self {
@@ -470,7 +557,8 @@ macro_rules! wrapping_impl {
             }
         }
         forward_ref_unop! { impl Neg, neg for Wrapping<$t>,
-                #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+        #[stable(feature = "wrapping_ref", since = "1.14.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
 
     )*)
 }
diff --git a/library/core/src/ops/arith.rs b/library/core/src/ops/arith.rs
index 7d44b1733b9..16c719b0c39 100644
--- a/library/core/src/ops/arith.rs
+++ b/library/core/src/ops/arith.rs
@@ -106,7 +106,9 @@ macro_rules! add_impl {
             fn add(self, other: $t) -> $t { self + other }
         }
 
-        forward_ref_binop! { impl Add, add for $t, $t }
+        forward_ref_binop! { impl Add, add for $t, $t,
+        #[stable(feature = "rust1", since = "1.0.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )*)
 }
 
@@ -218,7 +220,9 @@ macro_rules! sub_impl {
             fn sub(self, other: $t) -> $t { self - other }
         }
 
-        forward_ref_binop! { impl Sub, sub for $t, $t }
+        forward_ref_binop! { impl Sub, sub for $t, $t,
+        #[stable(feature = "rust1", since = "1.0.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )*)
 }
 
@@ -351,7 +355,9 @@ macro_rules! mul_impl {
             fn mul(self, other: $t) -> $t { self * other }
         }
 
-        forward_ref_binop! { impl Mul, mul for $t, $t }
+        forward_ref_binop! { impl Mul, mul for $t, $t,
+        #[stable(feature = "rust1", since = "1.0.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )*)
 }
 
@@ -493,7 +499,9 @@ macro_rules! div_impl_integer {
             fn div(self, other: $t) -> $t { self / other }
         }
 
-        forward_ref_binop! { impl Div, div for $t, $t }
+        forward_ref_binop! { impl Div, div for $t, $t,
+        #[stable(feature = "rust1", since = "1.0.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )*)*)
 }
 
@@ -513,7 +521,9 @@ macro_rules! div_impl_float {
             fn div(self, other: $t) -> $t { self / other }
         }
 
-        forward_ref_binop! { impl Div, div for $t, $t }
+        forward_ref_binop! { impl Div, div for $t, $t,
+        #[stable(feature = "rust1", since = "1.0.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )*)
 }
 
@@ -599,7 +609,9 @@ macro_rules! rem_impl_integer {
             fn rem(self, other: $t) -> $t { self % other }
         }
 
-        forward_ref_binop! { impl Rem, rem for $t, $t }
+        forward_ref_binop! { impl Rem, rem for $t, $t,
+        #[stable(feature = "rust1", since = "1.0.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )*)*)
 }
 
@@ -634,7 +646,9 @@ macro_rules! rem_impl_float {
             fn rem(self, other: $t) -> $t { self % other }
         }
 
-        forward_ref_binop! { impl Rem, rem for $t, $t }
+        forward_ref_binop! { impl Rem, rem for $t, $t,
+        #[stable(feature = "rust1", since = "1.0.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )*)
 }
 
@@ -678,7 +692,9 @@ rem_impl_float! { f16 f32 f64 f128 }
 /// ```
 #[lang = "neg"]
 #[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
 #[doc(alias = "-")]
+#[const_trait]
 pub trait Neg {
     /// The resulting type after applying the `-` operator.
     #[stable(feature = "rust1", since = "1.0.0")]
@@ -701,7 +717,8 @@ pub trait Neg {
 macro_rules! neg_impl {
     ($($t:ty)*) => ($(
         #[stable(feature = "rust1", since = "1.0.0")]
-        impl Neg for $t {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Neg for $t {
             type Output = $t;
 
             #[inline]
@@ -709,7 +726,9 @@ macro_rules! neg_impl {
             fn neg(self) -> $t { -self }
         }
 
-        forward_ref_unop! { impl Neg, neg for $t }
+        forward_ref_unop! { impl Neg, neg for $t,
+        #[stable(feature = "rust1", since = "1.0.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )*)
 }
 
@@ -746,12 +765,14 @@ neg_impl! { isize i8 i16 i32 i64 i128 f16 f32 f64 f128 }
 /// ```
 #[lang = "add_assign"]
 #[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
 #[diagnostic::on_unimplemented(
     message = "cannot add-assign `{Rhs}` to `{Self}`",
     label = "no implementation for `{Self} += {Rhs}`"
 )]
 #[doc(alias = "+")]
 #[doc(alias = "+=")]
+#[const_trait]
 pub trait AddAssign<Rhs = Self> {
     /// Performs the `+=` operation.
     ///
@@ -769,14 +790,17 @@ pub trait AddAssign<Rhs = Self> {
 macro_rules! add_assign_impl {
     ($($t:ty)+) => ($(
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl AddAssign for $t {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const AddAssign for $t {
             #[inline]
             #[track_caller]
             #[rustc_inherit_overflow_checks]
             fn add_assign(&mut self, other: $t) { *self += other }
         }
 
-        forward_ref_op_assign! { impl AddAssign, add_assign for $t, $t }
+        forward_ref_op_assign! { impl AddAssign, add_assign for $t, $t,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )+)
 }
 
@@ -813,12 +837,14 @@ add_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f16 f32 f
 /// ```
 #[lang = "sub_assign"]
 #[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
 #[diagnostic::on_unimplemented(
     message = "cannot subtract-assign `{Rhs}` from `{Self}`",
     label = "no implementation for `{Self} -= {Rhs}`"
 )]
 #[doc(alias = "-")]
 #[doc(alias = "-=")]
+#[const_trait]
 pub trait SubAssign<Rhs = Self> {
     /// Performs the `-=` operation.
     ///
@@ -836,14 +862,17 @@ pub trait SubAssign<Rhs = Self> {
 macro_rules! sub_assign_impl {
     ($($t:ty)+) => ($(
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl SubAssign for $t {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const SubAssign for $t {
             #[inline]
             #[track_caller]
             #[rustc_inherit_overflow_checks]
             fn sub_assign(&mut self, other: $t) { *self -= other }
         }
 
-        forward_ref_op_assign! { impl SubAssign, sub_assign for $t, $t }
+        forward_ref_op_assign! { impl SubAssign, sub_assign for $t, $t,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )+)
 }
 
@@ -871,12 +900,14 @@ sub_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f16 f32 f
 /// ```
 #[lang = "mul_assign"]
 #[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
 #[diagnostic::on_unimplemented(
     message = "cannot multiply-assign `{Self}` by `{Rhs}`",
     label = "no implementation for `{Self} *= {Rhs}`"
 )]
 #[doc(alias = "*")]
 #[doc(alias = "*=")]
+#[const_trait]
 pub trait MulAssign<Rhs = Self> {
     /// Performs the `*=` operation.
     ///
@@ -894,14 +925,17 @@ pub trait MulAssign<Rhs = Self> {
 macro_rules! mul_assign_impl {
     ($($t:ty)+) => ($(
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl MulAssign for $t {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const MulAssign for $t {
             #[inline]
             #[track_caller]
             #[rustc_inherit_overflow_checks]
             fn mul_assign(&mut self, other: $t) { *self *= other }
         }
 
-        forward_ref_op_assign! { impl MulAssign, mul_assign for $t, $t }
+        forward_ref_op_assign! { impl MulAssign, mul_assign for $t, $t,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )+)
 }
 
@@ -929,12 +963,14 @@ mul_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f16 f32 f
 /// ```
 #[lang = "div_assign"]
 #[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
 #[diagnostic::on_unimplemented(
     message = "cannot divide-assign `{Self}` by `{Rhs}`",
     label = "no implementation for `{Self} /= {Rhs}`"
 )]
 #[doc(alias = "/")]
 #[doc(alias = "/=")]
+#[const_trait]
 pub trait DivAssign<Rhs = Self> {
     /// Performs the `/=` operation.
     ///
@@ -952,13 +988,16 @@ pub trait DivAssign<Rhs = Self> {
 macro_rules! div_assign_impl {
     ($($t:ty)+) => ($(
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl DivAssign for $t {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const DivAssign for $t {
             #[inline]
             #[track_caller]
             fn div_assign(&mut self, other: $t) { *self /= other }
         }
 
-        forward_ref_op_assign! { impl DivAssign, div_assign for $t, $t }
+        forward_ref_op_assign! { impl DivAssign, div_assign for $t, $t,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )+)
 }
 
@@ -990,12 +1029,14 @@ div_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f16 f32 f
 /// ```
 #[lang = "rem_assign"]
 #[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
 #[diagnostic::on_unimplemented(
     message = "cannot calculate and assign the remainder of `{Self}` divided by `{Rhs}`",
     label = "no implementation for `{Self} %= {Rhs}`"
 )]
 #[doc(alias = "%")]
 #[doc(alias = "%=")]
+#[const_trait]
 pub trait RemAssign<Rhs = Self> {
     /// Performs the `%=` operation.
     ///
@@ -1013,13 +1054,16 @@ pub trait RemAssign<Rhs = Self> {
 macro_rules! rem_assign_impl {
     ($($t:ty)+) => ($(
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl RemAssign for $t {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const RemAssign for $t {
             #[inline]
             #[track_caller]
             fn rem_assign(&mut self, other: $t) { *self %= other }
         }
 
-        forward_ref_op_assign! { impl RemAssign, rem_assign for $t, $t }
+        forward_ref_op_assign! { impl RemAssign, rem_assign for $t, $t,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )+)
 }
 
diff --git a/library/core/src/ops/bit.rs b/library/core/src/ops/bit.rs
index deb54c8ba34..00196728219 100644
--- a/library/core/src/ops/bit.rs
+++ b/library/core/src/ops/bit.rs
@@ -30,7 +30,9 @@
 /// ```
 #[lang = "not"]
 #[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
 #[doc(alias = "!")]
+#[const_trait]
 pub trait Not {
     /// The resulting type after applying the `!` operator.
     #[stable(feature = "rust1", since = "1.0.0")]
@@ -54,21 +56,25 @@ pub trait Not {
 macro_rules! not_impl {
     ($($t:ty)*) => ($(
         #[stable(feature = "rust1", since = "1.0.0")]
-        impl Not for $t {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Not for $t {
             type Output = $t;
 
             #[inline]
             fn not(self) -> $t { !self }
         }
 
-        forward_ref_unop! { impl Not, not for $t }
+        forward_ref_unop! { impl Not, not for $t,
+        #[stable(feature = "rust1", since = "1.0.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )*)
 }
 
 not_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
 
 #[stable(feature = "not_never", since = "1.60.0")]
-impl Not for ! {
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl const Not for ! {
     type Output = !;
 
     #[inline]
@@ -137,10 +143,12 @@ impl Not for ! {
 #[lang = "bitand"]
 #[doc(alias = "&")]
 #[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
 #[diagnostic::on_unimplemented(
     message = "no implementation for `{Self} & {Rhs}`",
     label = "no implementation for `{Self} & {Rhs}`"
 )]
+#[const_trait]
 pub trait BitAnd<Rhs = Self> {
     /// The resulting type after applying the `&` operator.
     #[stable(feature = "rust1", since = "1.0.0")]
@@ -164,14 +172,17 @@ pub trait BitAnd<Rhs = Self> {
 macro_rules! bitand_impl {
     ($($t:ty)*) => ($(
         #[stable(feature = "rust1", since = "1.0.0")]
-        impl BitAnd for $t {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitAnd for $t {
             type Output = $t;
 
             #[inline]
             fn bitand(self, rhs: $t) -> $t { self & rhs }
         }
 
-        forward_ref_binop! { impl BitAnd, bitand for $t, $t }
+        forward_ref_binop! { impl BitAnd, bitand for $t, $t,
+        #[stable(feature = "rust1", since = "1.0.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )*)
 }
 
@@ -237,10 +248,12 @@ bitand_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
 #[lang = "bitor"]
 #[doc(alias = "|")]
 #[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
 #[diagnostic::on_unimplemented(
     message = "no implementation for `{Self} | {Rhs}`",
     label = "no implementation for `{Self} | {Rhs}`"
 )]
+#[const_trait]
 pub trait BitOr<Rhs = Self> {
     /// The resulting type after applying the `|` operator.
     #[stable(feature = "rust1", since = "1.0.0")]
@@ -264,14 +277,17 @@ pub trait BitOr<Rhs = Self> {
 macro_rules! bitor_impl {
     ($($t:ty)*) => ($(
         #[stable(feature = "rust1", since = "1.0.0")]
-        impl BitOr for $t {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitOr for $t {
             type Output = $t;
 
             #[inline]
             fn bitor(self, rhs: $t) -> $t { self | rhs }
         }
 
-        forward_ref_binop! { impl BitOr, bitor for $t, $t }
+        forward_ref_binop! { impl BitOr, bitor for $t, $t,
+        #[stable(feature = "rust1", since = "1.0.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )*)
 }
 
@@ -337,10 +353,12 @@ bitor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
 #[lang = "bitxor"]
 #[doc(alias = "^")]
 #[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
 #[diagnostic::on_unimplemented(
     message = "no implementation for `{Self} ^ {Rhs}`",
     label = "no implementation for `{Self} ^ {Rhs}`"
 )]
+#[const_trait]
 pub trait BitXor<Rhs = Self> {
     /// The resulting type after applying the `^` operator.
     #[stable(feature = "rust1", since = "1.0.0")]
@@ -364,14 +382,17 @@ pub trait BitXor<Rhs = Self> {
 macro_rules! bitxor_impl {
     ($($t:ty)*) => ($(
         #[stable(feature = "rust1", since = "1.0.0")]
-        impl BitXor for $t {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitXor for $t {
             type Output = $t;
 
             #[inline]
             fn bitxor(self, other: $t) -> $t { self ^ other }
         }
 
-        forward_ref_binop! { impl BitXor, bitxor for $t, $t }
+        forward_ref_binop! { impl BitXor, bitxor for $t, $t,
+        #[stable(feature = "rust1", since = "1.0.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )*)
 }
 
@@ -436,10 +457,12 @@ bitxor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
 #[lang = "shl"]
 #[doc(alias = "<<")]
 #[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
 #[diagnostic::on_unimplemented(
     message = "no implementation for `{Self} << {Rhs}`",
     label = "no implementation for `{Self} << {Rhs}`"
 )]
+#[const_trait]
 pub trait Shl<Rhs = Self> {
     /// The resulting type after applying the `<<` operator.
     #[stable(feature = "rust1", since = "1.0.0")]
@@ -461,7 +484,8 @@ pub trait Shl<Rhs = Self> {
 macro_rules! shl_impl {
     ($t:ty, $f:ty) => {
         #[stable(feature = "rust1", since = "1.0.0")]
-        impl Shl<$f> for $t {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Shl<$f> for $t {
             type Output = $t;
 
             #[inline]
@@ -471,7 +495,9 @@ macro_rules! shl_impl {
             }
         }
 
-        forward_ref_binop! { impl Shl, shl for $t, $f }
+        forward_ref_binop! { impl Shl, shl for $t, $f,
+        #[stable(feature = "rust1", since = "1.0.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     };
 }
 
@@ -554,10 +580,12 @@ shl_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
 #[lang = "shr"]
 #[doc(alias = ">>")]
 #[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
 #[diagnostic::on_unimplemented(
     message = "no implementation for `{Self} >> {Rhs}`",
     label = "no implementation for `{Self} >> {Rhs}`"
 )]
+#[const_trait]
 pub trait Shr<Rhs = Self> {
     /// The resulting type after applying the `>>` operator.
     #[stable(feature = "rust1", since = "1.0.0")]
@@ -579,7 +607,8 @@ pub trait Shr<Rhs = Self> {
 macro_rules! shr_impl {
     ($t:ty, $f:ty) => {
         #[stable(feature = "rust1", since = "1.0.0")]
-        impl Shr<$f> for $t {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const Shr<$f> for $t {
             type Output = $t;
 
             #[inline]
@@ -589,7 +618,9 @@ macro_rules! shr_impl {
             }
         }
 
-        forward_ref_binop! { impl Shr, shr for $t, $f }
+        forward_ref_binop! { impl Shr, shr for $t, $f,
+        #[stable(feature = "rust1", since = "1.0.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     };
 }
 
@@ -681,10 +712,12 @@ shr_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
 #[lang = "bitand_assign"]
 #[doc(alias = "&=")]
 #[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
 #[diagnostic::on_unimplemented(
     message = "no implementation for `{Self} &= {Rhs}`",
     label = "no implementation for `{Self} &= {Rhs}`"
 )]
+#[const_trait]
 pub trait BitAndAssign<Rhs = Self> {
     /// Performs the `&=` operation.
     ///
@@ -714,12 +747,15 @@ pub trait BitAndAssign<Rhs = Self> {
 macro_rules! bitand_assign_impl {
     ($($t:ty)+) => ($(
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl BitAndAssign for $t {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitAndAssign for $t {
             #[inline]
             fn bitand_assign(&mut self, other: $t) { *self &= other }
         }
 
-        forward_ref_op_assign! { impl BitAndAssign, bitand_assign for $t, $t }
+        forward_ref_op_assign! { impl BitAndAssign, bitand_assign for $t, $t,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )+)
 }
 
@@ -752,10 +788,12 @@ bitand_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
 #[lang = "bitor_assign"]
 #[doc(alias = "|=")]
 #[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
 #[diagnostic::on_unimplemented(
     message = "no implementation for `{Self} |= {Rhs}`",
     label = "no implementation for `{Self} |= {Rhs}`"
 )]
+#[const_trait]
 pub trait BitOrAssign<Rhs = Self> {
     /// Performs the `|=` operation.
     ///
@@ -785,12 +823,15 @@ pub trait BitOrAssign<Rhs = Self> {
 macro_rules! bitor_assign_impl {
     ($($t:ty)+) => ($(
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl BitOrAssign for $t {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitOrAssign for $t {
             #[inline]
             fn bitor_assign(&mut self, other: $t) { *self |= other }
         }
 
-        forward_ref_op_assign! { impl BitOrAssign, bitor_assign for $t, $t }
+        forward_ref_op_assign! { impl BitOrAssign, bitor_assign for $t, $t,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )+)
 }
 
@@ -823,10 +864,12 @@ bitor_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
 #[lang = "bitxor_assign"]
 #[doc(alias = "^=")]
 #[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
 #[diagnostic::on_unimplemented(
     message = "no implementation for `{Self} ^= {Rhs}`",
     label = "no implementation for `{Self} ^= {Rhs}`"
 )]
+#[const_trait]
 pub trait BitXorAssign<Rhs = Self> {
     /// Performs the `^=` operation.
     ///
@@ -856,12 +899,15 @@ pub trait BitXorAssign<Rhs = Self> {
 macro_rules! bitxor_assign_impl {
     ($($t:ty)+) => ($(
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl BitXorAssign for $t {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const BitXorAssign for $t {
             #[inline]
             fn bitxor_assign(&mut self, other: $t) { *self ^= other }
         }
 
-        forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for $t, $t }
+        forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for $t, $t,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     )+)
 }
 
@@ -892,10 +938,12 @@ bitxor_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
 #[lang = "shl_assign"]
 #[doc(alias = "<<=")]
 #[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
 #[diagnostic::on_unimplemented(
     message = "no implementation for `{Self} <<= {Rhs}`",
     label = "no implementation for `{Self} <<= {Rhs}`"
 )]
+#[const_trait]
 pub trait ShlAssign<Rhs = Self> {
     /// Performs the `<<=` operation.
     ///
@@ -917,7 +965,8 @@ pub trait ShlAssign<Rhs = Self> {
 macro_rules! shl_assign_impl {
     ($t:ty, $f:ty) => {
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl ShlAssign<$f> for $t {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const ShlAssign<$f> for $t {
             #[inline]
             #[rustc_inherit_overflow_checks]
             fn shl_assign(&mut self, other: $f) {
@@ -925,7 +974,9 @@ macro_rules! shl_assign_impl {
             }
         }
 
-        forward_ref_op_assign! { impl ShlAssign, shl_assign for $t, $f }
+        forward_ref_op_assign! { impl ShlAssign, shl_assign for $t, $f,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     };
 }
 
@@ -974,10 +1025,12 @@ shl_assign_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
 #[lang = "shr_assign"]
 #[doc(alias = ">>=")]
 #[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
 #[diagnostic::on_unimplemented(
     message = "no implementation for `{Self} >>= {Rhs}`",
     label = "no implementation for `{Self} >>= {Rhs}`"
 )]
+#[const_trait]
 pub trait ShrAssign<Rhs = Self> {
     /// Performs the `>>=` operation.
     ///
@@ -999,7 +1052,8 @@ pub trait ShrAssign<Rhs = Self> {
 macro_rules! shr_assign_impl {
     ($t:ty, $f:ty) => {
         #[stable(feature = "op_assign_traits", since = "1.8.0")]
-        impl ShrAssign<$f> for $t {
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+        impl const ShrAssign<$f> for $t {
             #[inline]
             #[rustc_inherit_overflow_checks]
             fn shr_assign(&mut self, other: $f) {
@@ -1007,7 +1061,9 @@ macro_rules! shr_assign_impl {
             }
         }
 
-        forward_ref_op_assign! { impl ShrAssign, shr_assign for $t, $f }
+        forward_ref_op_assign! { impl ShrAssign, shr_assign for $t, $f,
+        #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]
+        #[rustc_const_unstable(feature = "const_ops", issue = "143802")] }
     };
 }
 
diff --git a/library/core/src/ops/control_flow.rs b/library/core/src/ops/control_flow.rs
index 26661b20c12..7489a8bb6e7 100644
--- a/library/core/src/ops/control_flow.rs
+++ b/library/core/src/ops/control_flow.rs
@@ -187,6 +187,80 @@ impl<B, C> ControlFlow<B, C> {
         }
     }
 
+    /// Converts the `ControlFlow` into an `Result` which is `Ok` if the
+    /// `ControlFlow` was `Break` and `Err` if otherwise.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(control_flow_ok)]
+    ///
+    /// use std::ops::ControlFlow;
+    ///
+    /// struct TreeNode<T> {
+    ///     value: T,
+    ///     left: Option<Box<TreeNode<T>>>,
+    ///     right: Option<Box<TreeNode<T>>>,
+    /// }
+    ///
+    /// impl<T> TreeNode<T> {
+    ///     fn find<'a>(&'a self, mut predicate: impl FnMut(&T) -> bool) -> Result<&'a T, ()> {
+    ///         let mut f = |t: &'a T| -> ControlFlow<&'a T> {
+    ///             if predicate(t) {
+    ///                 ControlFlow::Break(t)
+    ///             } else {
+    ///                 ControlFlow::Continue(())
+    ///             }
+    ///         };
+    ///
+    ///         self.traverse_inorder(&mut f).break_ok()
+    ///     }
+    ///
+    ///     fn traverse_inorder<'a, B>(
+    ///         &'a self,
+    ///         f: &mut impl FnMut(&'a T) -> ControlFlow<B>,
+    ///     ) -> ControlFlow<B> {
+    ///         if let Some(left) = &self.left {
+    ///             left.traverse_inorder(f)?;
+    ///         }
+    ///         f(&self.value)?;
+    ///         if let Some(right) = &self.right {
+    ///             right.traverse_inorder(f)?;
+    ///         }
+    ///         ControlFlow::Continue(())
+    ///     }
+    ///
+    ///     fn leaf(value: T) -> Option<Box<TreeNode<T>>> {
+    ///         Some(Box::new(Self {
+    ///             value,
+    ///             left: None,
+    ///             right: None,
+    ///         }))
+    ///     }
+    /// }
+    ///
+    /// let node = TreeNode {
+    ///     value: 0,
+    ///     left: TreeNode::leaf(1),
+    ///     right: Some(Box::new(TreeNode {
+    ///         value: -1,
+    ///         left: TreeNode::leaf(5),
+    ///         right: TreeNode::leaf(2),
+    ///     })),
+    /// };
+    ///
+    /// let res = node.find(|val: &i32| *val > 3);
+    /// assert_eq!(res, Ok(&5));
+    /// ```
+    #[inline]
+    #[unstable(feature = "control_flow_ok", issue = "140266")]
+    pub fn break_ok(self) -> Result<B, C> {
+        match self {
+            ControlFlow::Continue(c) => Err(c),
+            ControlFlow::Break(b) => Ok(b),
+        }
+    }
+
     /// Maps `ControlFlow<B, C>` to `ControlFlow<T, C>` by applying a function
     /// to the break value in case it exists.
     #[inline]
@@ -218,6 +292,79 @@ impl<B, C> ControlFlow<B, C> {
         }
     }
 
+    /// Converts the `ControlFlow` into an `Result` which is `Ok` if the
+    /// `ControlFlow` was `Continue` and `Err` if otherwise.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(control_flow_ok)]
+    ///
+    /// use std::ops::ControlFlow;
+    ///
+    /// struct TreeNode<T> {
+    ///     value: T,
+    ///     left: Option<Box<TreeNode<T>>>,
+    ///     right: Option<Box<TreeNode<T>>>,
+    /// }
+    ///
+    /// impl<T> TreeNode<T> {
+    ///     fn validate<B>(&self, f: &mut impl FnMut(&T) -> ControlFlow<B>) -> Result<(), B> {
+    ///         self.traverse_inorder(f).continue_ok()
+    ///     }
+    ///
+    ///     fn traverse_inorder<B>(&self, f: &mut impl FnMut(&T) -> ControlFlow<B>) -> ControlFlow<B> {
+    ///         if let Some(left) = &self.left {
+    ///             left.traverse_inorder(f)?;
+    ///         }
+    ///         f(&self.value)?;
+    ///         if let Some(right) = &self.right {
+    ///             right.traverse_inorder(f)?;
+    ///         }
+    ///         ControlFlow::Continue(())
+    ///     }
+    ///
+    ///     fn leaf(value: T) -> Option<Box<TreeNode<T>>> {
+    ///         Some(Box::new(Self {
+    ///             value,
+    ///             left: None,
+    ///             right: None,
+    ///         }))
+    ///     }
+    /// }
+    ///
+    /// let node = TreeNode {
+    ///     value: 0,
+    ///     left: TreeNode::leaf(1),
+    ///     right: Some(Box::new(TreeNode {
+    ///         value: -1,
+    ///         left: TreeNode::leaf(5),
+    ///         right: TreeNode::leaf(2),
+    ///     })),
+    /// };
+    ///
+    /// let res = node.validate(&mut |val| {
+    ///     if *val < 0 {
+    ///         return ControlFlow::Break("negative value detected");
+    ///     }
+    ///
+    ///     if *val > 4 {
+    ///         return ControlFlow::Break("too big value detected");
+    ///     }
+    ///
+    ///     ControlFlow::Continue(())
+    /// });
+    /// assert_eq!(res, Err("too big value detected"));
+    /// ```
+    #[inline]
+    #[unstable(feature = "control_flow_ok", issue = "140266")]
+    pub fn continue_ok(self) -> Result<C, B> {
+        match self {
+            ControlFlow::Continue(c) => Ok(c),
+            ControlFlow::Break(b) => Err(b),
+        }
+    }
+
     /// Maps `ControlFlow<B, C>` to `ControlFlow<B, T>` by applying a function
     /// to the continue value in case it exists.
     #[inline]
diff --git a/library/core/src/ops/deref.rs b/library/core/src/ops/deref.rs
index c2dede9fa08..5f68c1f55c2 100644
--- a/library/core/src/ops/deref.rs
+++ b/library/core/src/ops/deref.rs
@@ -269,7 +269,7 @@ impl<T: ?Sized> const Deref for &mut T {
 #[stable(feature = "rust1", since = "1.0.0")]
 #[const_trait]
 #[rustc_const_unstable(feature = "const_deref", issue = "88955")]
-pub trait DerefMut: ~const Deref + PointeeSized {
+pub trait DerefMut: [const] Deref + PointeeSized {
     /// Mutably dereferences the value.
     #[stable(feature = "rust1", since = "1.0.0")]
     #[rustc_diagnostic_item = "deref_mut_method"]
diff --git a/library/core/src/ops/function.rs b/library/core/src/ops/function.rs
index efc751a094d..ad46e52a475 100644
--- a/library/core/src/ops/function.rs
+++ b/library/core/src/ops/function.rs
@@ -260,7 +260,7 @@ mod impls {
     #[rustc_const_unstable(feature = "const_trait_impl", issue = "143874")]
     impl<A: Tuple, F: ?Sized> const Fn<A> for &F
     where
-        F: ~const Fn<A>,
+        F: [const] Fn<A>,
     {
         extern "rust-call" fn call(&self, args: A) -> F::Output {
             (**self).call(args)
@@ -271,7 +271,7 @@ mod impls {
     #[rustc_const_unstable(feature = "const_trait_impl", issue = "143874")]
     impl<A: Tuple, F: ?Sized> const FnMut<A> for &F
     where
-        F: ~const Fn<A>,
+        F: [const] Fn<A>,
     {
         extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
             (**self).call(args)
@@ -282,7 +282,7 @@ mod impls {
     #[rustc_const_unstable(feature = "const_trait_impl", issue = "143874")]
     impl<A: Tuple, F: ?Sized> const FnOnce<A> for &F
     where
-        F: ~const Fn<A>,
+        F: [const] Fn<A>,
     {
         type Output = F::Output;
 
@@ -295,7 +295,7 @@ mod impls {
     #[rustc_const_unstable(feature = "const_trait_impl", issue = "143874")]
     impl<A: Tuple, F: ?Sized> const FnMut<A> for &mut F
     where
-        F: ~const FnMut<A>,
+        F: [const] FnMut<A>,
     {
         extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
             (*self).call_mut(args)
@@ -306,7 +306,7 @@ mod impls {
     #[rustc_const_unstable(feature = "const_trait_impl", issue = "143874")]
     impl<A: Tuple, F: ?Sized> const FnOnce<A> for &mut F
     where
-        F: ~const FnMut<A>,
+        F: [const] FnMut<A>,
     {
         type Output = F::Output;
         extern "rust-call" fn call_once(self, args: A) -> F::Output {
diff --git a/library/core/src/ops/index.rs b/library/core/src/ops/index.rs
index d8489e9a949..1aed2fb4742 100644
--- a/library/core/src/ops/index.rs
+++ b/library/core/src/ops/index.rs
@@ -169,7 +169,7 @@ see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#ind
 #[doc(alias = "[]")]
 #[rustc_const_unstable(feature = "const_index", issue = "143775")]
 #[const_trait]
-pub trait IndexMut<Idx: ?Sized>: ~const Index<Idx> {
+pub trait IndexMut<Idx: ?Sized>: [const] Index<Idx> {
     /// Performs the mutable indexing (`container[index]`) operation.
     ///
     /// # Panics
diff --git a/library/core/src/ops/range.rs b/library/core/src/ops/range.rs
index f33a33e6b75..95d1e2069ac 100644
--- a/library/core/src/ops/range.rs
+++ b/library/core/src/ops/range.rs
@@ -853,7 +853,7 @@ pub trait RangeBounds<T: ?Sized> {
     /// assert!( RangeBounds::is_empty(&(f32::NAN..5.0)));
     /// ```
     ///
-    /// But never empty is either side is unbounded:
+    /// But never empty if either side is unbounded:
     ///
     /// ```
     /// #![feature(range_bounds_is_empty)]
diff --git a/library/core/src/ops/try_trait.rs b/library/core/src/ops/try_trait.rs
index a889c824be5..76bf438878f 100644
--- a/library/core/src/ops/try_trait.rs
+++ b/library/core/src/ops/try_trait.rs
@@ -130,7 +130,7 @@ use crate::ops::ControlFlow;
 #[lang = "Try"]
 #[const_trait]
 #[rustc_const_unstable(feature = "const_try", issue = "74935")]
-pub trait Try: ~const FromResidual {
+pub trait Try: [const] FromResidual {
     /// The type of the value produced by `?` when *not* short-circuiting.
     #[unstable(feature = "try_trait_v2", issue = "84277", old_name = "try_trait")]
     type Output;
diff --git a/library/core/src/option.rs b/library/core/src/option.rs
index ed070fbd227..560d20ce617 100644
--- a/library/core/src/option.rs
+++ b/library/core/src/option.rs
@@ -651,7 +651,7 @@ impl<T> Option<T> {
     #[inline]
     #[stable(feature = "is_some_and", since = "1.70.0")]
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
-    pub const fn is_some_and(self, f: impl ~const FnOnce(T) -> bool + ~const Destruct) -> bool {
+    pub const fn is_some_and(self, f: impl [const] FnOnce(T) -> bool + [const] Destruct) -> bool {
         match self {
             None => false,
             Some(x) => f(x),
@@ -700,7 +700,7 @@ impl<T> Option<T> {
     #[inline]
     #[stable(feature = "is_none_or", since = "1.82.0")]
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
-    pub const fn is_none_or(self, f: impl ~const FnOnce(T) -> bool + ~const Destruct) -> bool {
+    pub const fn is_none_or(self, f: impl [const] FnOnce(T) -> bool + [const] Destruct) -> bool {
         match self {
             None => true,
             Some(x) => f(x),
@@ -1030,7 +1030,7 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn unwrap_or(self, default: T) -> T
     where
-        T: ~const Destruct,
+        T: [const] Destruct,
     {
         match self {
             Some(x) => x,
@@ -1053,7 +1053,7 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn unwrap_or_else<F>(self, f: F) -> T
     where
-        F: ~const FnOnce() -> T + ~const Destruct,
+        F: [const] FnOnce() -> T + [const] Destruct,
     {
         match self {
             Some(x) => x,
@@ -1085,7 +1085,7 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn unwrap_or_default(self) -> T
     where
-        T: ~const Default,
+        T: [const] Default,
     {
         match self {
             Some(x) => x,
@@ -1152,7 +1152,7 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn map<U, F>(self, f: F) -> Option<U>
     where
-        F: ~const FnOnce(T) -> U + ~const Destruct,
+        F: [const] FnOnce(T) -> U + [const] Destruct,
     {
         match self {
             Some(x) => Some(f(x)),
@@ -1183,7 +1183,7 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn inspect<F>(self, f: F) -> Self
     where
-        F: ~const FnOnce(&T) + ~const Destruct,
+        F: [const] FnOnce(&T) + [const] Destruct,
     {
         if let Some(ref x) = self {
             f(x);
@@ -1216,8 +1216,8 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn map_or<U, F>(self, default: U, f: F) -> U
     where
-        F: ~const FnOnce(T) -> U + ~const Destruct,
-        U: ~const Destruct,
+        F: [const] FnOnce(T) -> U + [const] Destruct,
+        U: [const] Destruct,
     {
         match self {
             Some(t) => f(t),
@@ -1263,8 +1263,8 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn map_or_else<U, D, F>(self, default: D, f: F) -> U
     where
-        D: ~const FnOnce() -> U + ~const Destruct,
-        F: ~const FnOnce(T) -> U + ~const Destruct,
+        D: [const] FnOnce() -> U + [const] Destruct,
+        F: [const] FnOnce(T) -> U + [const] Destruct,
     {
         match self {
             Some(t) => f(t),
@@ -1294,8 +1294,8 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn map_or_default<U, F>(self, f: F) -> U
     where
-        U: ~const Default,
-        F: ~const FnOnce(T) -> U + ~const Destruct,
+        U: [const] Default,
+        F: [const] FnOnce(T) -> U + [const] Destruct,
     {
         match self {
             Some(t) => f(t),
@@ -1327,7 +1327,7 @@ impl<T> Option<T> {
     #[inline]
     #[stable(feature = "rust1", since = "1.0.0")]
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
-    pub const fn ok_or<E: ~const Destruct>(self, err: E) -> Result<T, E> {
+    pub const fn ok_or<E: [const] Destruct>(self, err: E) -> Result<T, E> {
         match self {
             Some(v) => Ok(v),
             None => Err(err),
@@ -1355,7 +1355,7 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn ok_or_else<E, F>(self, err: F) -> Result<T, E>
     where
-        F: ~const FnOnce() -> E + ~const Destruct,
+        F: [const] FnOnce() -> E + [const] Destruct,
     {
         match self {
             Some(v) => Ok(v),
@@ -1487,8 +1487,8 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn and<U>(self, optb: Option<U>) -> Option<U>
     where
-        T: ~const Destruct,
-        U: ~const Destruct,
+        T: [const] Destruct,
+        U: [const] Destruct,
     {
         match self {
             Some(_) => optb,
@@ -1531,7 +1531,7 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn and_then<U, F>(self, f: F) -> Option<U>
     where
-        F: ~const FnOnce(T) -> Option<U> + ~const Destruct,
+        F: [const] FnOnce(T) -> Option<U> + [const] Destruct,
     {
         match self {
             Some(x) => f(x),
@@ -1568,8 +1568,8 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn filter<P>(self, predicate: P) -> Self
     where
-        P: ~const FnOnce(&T) -> bool + ~const Destruct,
-        T: ~const Destruct,
+        P: [const] FnOnce(&T) -> bool + [const] Destruct,
+        T: [const] Destruct,
     {
         if let Some(x) = self {
             if predicate(&x) {
@@ -1611,7 +1611,7 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn or(self, optb: Option<T>) -> Option<T>
     where
-        T: ~const Destruct,
+        T: [const] Destruct,
     {
         match self {
             x @ Some(_) => x,
@@ -1637,10 +1637,10 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn or_else<F>(self, f: F) -> Option<T>
     where
-        F: ~const FnOnce() -> Option<T> + ~const Destruct,
+        F: [const] FnOnce() -> Option<T> + [const] Destruct,
         //FIXME(const_hack): this `T: ~const Destruct` is unnecessary, but even precise live drops can't tell
         // no value of type `T` gets dropped here
-        T: ~const Destruct,
+        T: [const] Destruct,
     {
         match self {
             x @ Some(_) => x,
@@ -1674,7 +1674,7 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn xor(self, optb: Option<T>) -> Option<T>
     where
-        T: ~const Destruct,
+        T: [const] Destruct,
     {
         match (self, optb) {
             (a @ Some(_), None) => a,
@@ -1712,7 +1712,7 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn insert(&mut self, value: T) -> &mut T
     where
-        T: ~const Destruct,
+        T: [const] Destruct,
     {
         *self = Some(value);
 
@@ -1768,7 +1768,7 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn get_or_insert_default(&mut self) -> &mut T
     where
-        T: ~const Default + ~const Destruct,
+        T: [const] Default + [const] Destruct,
     {
         self.get_or_insert_with(T::default)
     }
@@ -1795,8 +1795,8 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn get_or_insert_with<F>(&mut self, f: F) -> &mut T
     where
-        F: ~const FnOnce() -> T + ~const Destruct,
-        T: ~const Destruct,
+        F: [const] FnOnce() -> T + [const] Destruct,
+        T: [const] Destruct,
     {
         if let None = self {
             *self = Some(f());
@@ -1863,7 +1863,7 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn take_if<P>(&mut self, predicate: P) -> Option<T>
     where
-        P: ~const FnOnce(&mut T) -> bool + ~const Destruct,
+        P: [const] FnOnce(&mut T) -> bool + [const] Destruct,
     {
         if self.as_mut().map_or(false, predicate) { self.take() } else { None }
     }
@@ -1911,8 +1911,8 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn zip<U>(self, other: Option<U>) -> Option<(T, U)>
     where
-        T: ~const Destruct,
-        U: ~const Destruct,
+        T: [const] Destruct,
+        U: [const] Destruct,
     {
         match (self, other) {
             (Some(a), Some(b)) => Some((a, b)),
@@ -1952,9 +1952,9 @@ impl<T> Option<T> {
     #[rustc_const_unstable(feature = "const_option_ops", issue = "143956")]
     pub const fn zip_with<U, F, R>(self, other: Option<U>, f: F) -> Option<R>
     where
-        F: ~const FnOnce(T, U) -> R + ~const Destruct,
-        T: ~const Destruct,
-        U: ~const Destruct,
+        F: [const] FnOnce(T, U) -> R + [const] Destruct,
+        T: [const] Destruct,
+        U: [const] Destruct,
     {
         match (self, other) {
             (Some(a), Some(b)) => Some(f(a, b)),
@@ -2149,7 +2149,7 @@ impl<T> const Clone for Option<T>
 where
     // FIXME(const_hack): the T: ~const Destruct should be inferred from the Self: ~const Destruct in clone_from.
     // See https://github.com/rust-lang/rust/issues/144207
-    T: ~const Clone + ~const Destruct,
+    T: [const] Clone + [const] Destruct,
 {
     #[inline]
     fn clone(&self) -> Self {
@@ -2307,7 +2307,7 @@ impl<'a, T> const From<&'a mut Option<T>> for Option<&'a mut T> {
 impl<T> crate::marker::StructuralPartialEq for Option<T> {}
 #[stable(feature = "rust1", since = "1.0.0")]
 #[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
-impl<T: ~const PartialEq> const PartialEq for Option<T> {
+impl<T: [const] PartialEq> const PartialEq for Option<T> {
     #[inline]
     fn eq(&self, other: &Self) -> bool {
         // Spelling out the cases explicitly optimizes better than
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
index 2ad520b7ead..6546dde39ac 100644
--- a/library/core/src/ptr/const_ptr.rs
+++ b/library/core/src/ptr/const_ptr.rs
@@ -1430,6 +1430,28 @@ impl<T: PointeeSized> *const T {
     }
 }
 
+impl<T> *const T {
+    /// Casts from a type to its maybe-uninitialized version.
+    #[must_use]
+    #[inline(always)]
+    #[unstable(feature = "cast_maybe_uninit", issue = "145036")]
+    pub const fn cast_uninit(self) -> *const MaybeUninit<T> {
+        self as _
+    }
+}
+impl<T> *const MaybeUninit<T> {
+    /// Casts from a maybe-uninitialized type to its initialized version.
+    ///
+    /// This is always safe, since UB can only occur if the pointer is read
+    /// before being initialized.
+    #[must_use]
+    #[inline(always)]
+    #[unstable(feature = "cast_maybe_uninit", issue = "145036")]
+    pub const fn cast_init(self) -> *const T {
+        self as _
+    }
+}
+
 impl<T> *const [T] {
     /// Returns the length of a raw slice.
     ///
@@ -1528,7 +1550,7 @@ impl<T> *const [T] {
     #[inline]
     pub const unsafe fn get_unchecked<I>(self, index: I) -> *const I::Output
     where
-        I: ~const SliceIndex<[T]>,
+        I: [const] SliceIndex<[T]>,
     {
         // SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds.
         unsafe { index.get_unchecked(self) }
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index 1a2a5182567..b2607e45324 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -885,10 +885,10 @@ pub const fn without_provenance<T>(addr: usize) -> *const T {
 /// This is useful for initializing types which lazily allocate, like
 /// `Vec::new` does.
 ///
-/// Note that the pointer value may potentially represent a valid pointer to
-/// a `T`, which means this must not be used as a "not yet initialized"
-/// sentinel value. Types that lazily allocate must track initialization by
-/// some other means.
+/// Note that the address of the returned pointer may potentially
+/// be that of a valid pointer, which means this must not be used
+/// as a "not yet initialized" sentinel value.
+/// Types that lazily allocate must track initialization by some other means.
 #[inline(always)]
 #[must_use]
 #[stable(feature = "strict_provenance", since = "1.84.0")]
@@ -928,10 +928,10 @@ pub const fn without_provenance_mut<T>(addr: usize) -> *mut T {
 /// This is useful for initializing types which lazily allocate, like
 /// `Vec::new` does.
 ///
-/// Note that the pointer value may potentially represent a valid pointer to
-/// a `T`, which means this must not be used as a "not yet initialized"
-/// sentinel value. Types that lazily allocate must track initialization by
-/// some other means.
+/// Note that the address of the returned pointer may potentially
+/// be that of a valid pointer, which means this must not be used
+/// as a "not yet initialized" sentinel value.
+/// Types that lazily allocate must track initialization by some other means.
 #[inline(always)]
 #[must_use]
 #[stable(feature = "strict_provenance", since = "1.84.0")]
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
index 579e2461103..4add964141a 100644
--- a/library/core/src/ptr/mut_ptr.rs
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -1687,6 +1687,31 @@ impl<T: PointeeSized> *mut T {
     }
 }
 
+impl<T> *mut T {
+    /// Casts from a type to its maybe-uninitialized version.
+    ///
+    /// This is always safe, since UB can only occur if the pointer is read
+    /// before being initialized.
+    #[must_use]
+    #[inline(always)]
+    #[unstable(feature = "cast_maybe_uninit", issue = "145036")]
+    pub const fn cast_uninit(self) -> *mut MaybeUninit<T> {
+        self as _
+    }
+}
+impl<T> *mut MaybeUninit<T> {
+    /// Casts from a maybe-uninitialized type to its initialized version.
+    ///
+    /// This is always safe, since UB can only occur if the pointer is read
+    /// before being initialized.
+    #[must_use]
+    #[inline(always)]
+    #[unstable(feature = "cast_maybe_uninit", issue = "145036")]
+    pub const fn cast_init(self) -> *mut T {
+        self as _
+    }
+}
+
 impl<T> *mut [T] {
     /// Returns the length of a raw slice.
     ///
@@ -1885,7 +1910,7 @@ impl<T> *mut [T] {
     #[inline(always)]
     pub const unsafe fn get_unchecked_mut<I>(self, index: I) -> *mut I::Output
     where
-        I: ~const SliceIndex<[T]>,
+        I: [const] SliceIndex<[T]>,
     {
         // SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds.
         unsafe { index.get_unchecked_mut(self) }
diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs
index 62da6567cca..da382b8715e 100644
--- a/library/core/src/ptr/non_null.rs
+++ b/library/core/src/ptr/non_null.rs
@@ -109,10 +109,10 @@ impl<T: Sized> NonNull<T> {
     /// This is useful for initializing types which lazily allocate, like
     /// `Vec::new` does.
     ///
-    /// Note that the pointer value may potentially represent a valid pointer to
-    /// a `T`, which means this must not be used as a "not yet initialized"
-    /// sentinel value. Types that lazily allocate must track initialization by
-    /// some other means.
+    /// Note that the address of the returned pointer may potentially
+    /// be that of a valid pointer, which means this must not be used
+    /// as a "not yet initialized" sentinel value.
+    /// Types that lazily allocate must track initialization by some other means.
     ///
     /// # Examples
     ///
@@ -1357,6 +1357,28 @@ impl<T: PointeeSized> NonNull<T> {
     }
 }
 
+impl<T> NonNull<T> {
+    /// Casts from a type to its maybe-uninitialized version.
+    #[must_use]
+    #[inline(always)]
+    #[unstable(feature = "cast_maybe_uninit", issue = "145036")]
+    pub const fn cast_uninit(self) -> NonNull<MaybeUninit<T>> {
+        self.cast()
+    }
+}
+impl<T> NonNull<MaybeUninit<T>> {
+    /// Casts from a maybe-uninitialized type to its initialized version.
+    ///
+    /// This is always safe, since UB can only occur if the pointer is read
+    /// before being initialized.
+    #[must_use]
+    #[inline(always)]
+    #[unstable(feature = "cast_maybe_uninit", issue = "145036")]
+    pub const fn cast_init(self) -> NonNull<T> {
+        self.cast()
+    }
+}
+
 impl<T> NonNull<[T]> {
     /// Creates a non-null raw slice from a thin pointer and a length.
     ///
@@ -1601,7 +1623,7 @@ impl<T> NonNull<[T]> {
     #[inline]
     pub const unsafe fn get_unchecked_mut<I>(self, index: I) -> NonNull<I::Output>
     where
-        I: ~const SliceIndex<[T]>,
+        I: [const] SliceIndex<[T]>,
     {
         // SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds.
         // As a consequence, the resulting pointer cannot be null.
diff --git a/library/core/src/ptr/unique.rs b/library/core/src/ptr/unique.rs
index e9e13f9e97f..4302c1b1e44 100644
--- a/library/core/src/ptr/unique.rs
+++ b/library/core/src/ptr/unique.rs
@@ -63,10 +63,10 @@ impl<T: Sized> Unique<T> {
     /// This is useful for initializing types which lazily allocate, like
     /// `Vec::new` does.
     ///
-    /// Note that the pointer value may potentially represent a valid pointer to
-    /// a `T`, which means this must not be used as a "not yet initialized"
-    /// sentinel value. Types that lazily allocate must track initialization by
-    /// some other means.
+    /// Note that the address of the returned pointer may potentially
+    /// be that of a valid pointer, which means this must not be used
+    /// as a "not yet initialized" sentinel value.
+    /// Types that lazily allocate must track initialization by some other means.
     #[must_use]
     #[inline]
     pub const fn dangling() -> Self {
diff --git a/library/core/src/result.rs b/library/core/src/result.rs
index 474f86395ae..6148bdb866a 100644
--- a/library/core/src/result.rs
+++ b/library/core/src/result.rs
@@ -610,9 +610,9 @@ impl<T, E> Result<T, E> {
     #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")]
     pub const fn is_ok_and<F>(self, f: F) -> bool
     where
-        F: ~const FnOnce(T) -> bool + ~const Destruct,
-        T: ~const Destruct,
-        E: ~const Destruct,
+        F: [const] FnOnce(T) -> bool + [const] Destruct,
+        T: [const] Destruct,
+        E: [const] Destruct,
     {
         match self {
             Err(_) => false,
@@ -665,9 +665,9 @@ impl<T, E> Result<T, E> {
     #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")]
     pub const fn is_err_and<F>(self, f: F) -> bool
     where
-        F: ~const FnOnce(E) -> bool + ~const Destruct,
-        E: ~const Destruct,
-        T: ~const Destruct,
+        F: [const] FnOnce(E) -> bool + [const] Destruct,
+        E: [const] Destruct,
+        T: [const] Destruct,
     {
         match self {
             Ok(_) => false,
@@ -699,8 +699,8 @@ impl<T, E> Result<T, E> {
     #[rustc_diagnostic_item = "result_ok_method"]
     pub const fn ok(self) -> Option<T>
     where
-        T: ~const Destruct,
-        E: ~const Destruct,
+        T: [const] Destruct,
+        E: [const] Destruct,
     {
         match self {
             Ok(x) => Some(x),
@@ -727,8 +727,8 @@ impl<T, E> Result<T, E> {
     #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")]
     pub const fn err(self) -> Option<E>
     where
-        T: ~const Destruct,
-        E: ~const Destruct,
+        T: [const] Destruct,
+        E: [const] Destruct,
     {
         match self {
             Ok(_) => None,
@@ -822,7 +822,7 @@ impl<T, E> Result<T, E> {
     #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")]
     pub const fn map<U, F>(self, op: F) -> Result<U, E>
     where
-        F: ~const FnOnce(T) -> U + ~const Destruct,
+        F: [const] FnOnce(T) -> U + [const] Destruct,
     {
         match self {
             Ok(t) => Ok(op(t)),
@@ -854,10 +854,10 @@ impl<T, E> Result<T, E> {
     #[must_use = "if you don't need the returned value, use `if let` instead"]
     pub const fn map_or<U, F>(self, default: U, f: F) -> U
     where
-        F: ~const FnOnce(T) -> U + ~const Destruct,
-        T: ~const Destruct,
-        E: ~const Destruct,
-        U: ~const Destruct,
+        F: [const] FnOnce(T) -> U + [const] Destruct,
+        T: [const] Destruct,
+        E: [const] Destruct,
+        U: [const] Destruct,
     {
         match self {
             Ok(t) => f(t),
@@ -888,8 +888,8 @@ impl<T, E> Result<T, E> {
     #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")]
     pub const fn map_or_else<U, D, F>(self, default: D, f: F) -> U
     where
-        D: ~const FnOnce(E) -> U + ~const Destruct,
-        F: ~const FnOnce(T) -> U + ~const Destruct,
+        D: [const] FnOnce(E) -> U + [const] Destruct,
+        F: [const] FnOnce(T) -> U + [const] Destruct,
     {
         match self {
             Ok(t) => f(t),
@@ -919,10 +919,10 @@ impl<T, E> Result<T, E> {
     #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")]
     pub const fn map_or_default<U, F>(self, f: F) -> U
     where
-        F: ~const FnOnce(T) -> U + ~const Destruct,
-        U: ~const Default,
-        T: ~const Destruct,
-        E: ~const Destruct,
+        F: [const] FnOnce(T) -> U + [const] Destruct,
+        U: [const] Default,
+        T: [const] Destruct,
+        E: [const] Destruct,
     {
         match self {
             Ok(t) => f(t),
@@ -953,7 +953,7 @@ impl<T, E> Result<T, E> {
     #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")]
     pub const fn map_err<F, O>(self, op: O) -> Result<T, F>
     where
-        O: ~const FnOnce(E) -> F + ~const Destruct,
+        O: [const] FnOnce(E) -> F + [const] Destruct,
     {
         match self {
             Ok(t) => Ok(t),
@@ -979,7 +979,7 @@ impl<T, E> Result<T, E> {
     #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")]
     pub const fn inspect<F>(self, f: F) -> Self
     where
-        F: ~const FnOnce(&T) + ~const Destruct,
+        F: [const] FnOnce(&T) + [const] Destruct,
     {
         if let Ok(ref t) = self {
             f(t);
@@ -1007,7 +1007,7 @@ impl<T, E> Result<T, E> {
     #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")]
     pub const fn inspect_err<F>(self, f: F) -> Self
     where
-        F: ~const FnOnce(&E) + ~const Destruct,
+        F: [const] FnOnce(&E) + [const] Destruct,
     {
         if let Err(ref e) = self {
             f(e);
@@ -1254,8 +1254,8 @@ impl<T, E> Result<T, E> {
     #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")]
     pub const fn unwrap_or_default(self) -> T
     where
-        T: ~const Default + ~const Destruct,
-        E: ~const Destruct,
+        T: [const] Default + [const] Destruct,
+        E: [const] Destruct,
     {
         match self {
             Ok(x) => x,
@@ -1350,7 +1350,7 @@ impl<T, E> Result<T, E> {
     #[rustc_const_unstable(feature = "const_try", issue = "74935")]
     pub const fn into_ok(self) -> T
     where
-        E: ~const Into<!>,
+        E: [const] Into<!>,
     {
         match self {
             Ok(x) => x,
@@ -1387,7 +1387,7 @@ impl<T, E> Result<T, E> {
     #[rustc_const_unstable(feature = "const_try", issue = "74935")]
     pub const fn into_err(self) -> E
     where
-        T: ~const Into<!>,
+        T: [const] Into<!>,
     {
         match self {
             Ok(x) => x.into(),
@@ -1431,9 +1431,9 @@ impl<T, E> Result<T, E> {
     #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")]
     pub const fn and<U>(self, res: Result<U, E>) -> Result<U, E>
     where
-        T: ~const Destruct,
-        E: ~const Destruct,
-        U: ~const Destruct,
+        T: [const] Destruct,
+        E: [const] Destruct,
+        U: [const] Destruct,
     {
         match self {
             Ok(_) => res,
@@ -1477,7 +1477,7 @@ impl<T, E> Result<T, E> {
     #[rustc_confusables("flat_map", "flatmap")]
     pub const fn and_then<U, F>(self, op: F) -> Result<U, E>
     where
-        F: ~const FnOnce(T) -> Result<U, E> + ~const Destruct,
+        F: [const] FnOnce(T) -> Result<U, E> + [const] Destruct,
     {
         match self {
             Ok(t) => op(t),
@@ -1517,9 +1517,9 @@ impl<T, E> Result<T, E> {
     #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")]
     pub const fn or<F>(self, res: Result<T, F>) -> Result<T, F>
     where
-        T: ~const Destruct,
-        E: ~const Destruct,
-        F: ~const Destruct,
+        T: [const] Destruct,
+        E: [const] Destruct,
+        F: [const] Destruct,
     {
         match self {
             Ok(v) => Ok(v),
@@ -1548,7 +1548,7 @@ impl<T, E> Result<T, E> {
     #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")]
     pub const fn or_else<F, O>(self, op: O) -> Result<T, F>
     where
-        O: ~const FnOnce(E) -> Result<T, F> + ~const Destruct,
+        O: [const] FnOnce(E) -> Result<T, F> + [const] Destruct,
     {
         match self {
             Ok(t) => Ok(t),
@@ -1579,8 +1579,8 @@ impl<T, E> Result<T, E> {
     #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")]
     pub const fn unwrap_or(self, default: T) -> T
     where
-        T: ~const Destruct,
-        E: ~const Destruct,
+        T: [const] Destruct,
+        E: [const] Destruct,
     {
         match self {
             Ok(t) => t,
@@ -1605,7 +1605,7 @@ impl<T, E> Result<T, E> {
     #[rustc_const_unstable(feature = "const_result_trait_fn", issue = "144211")]
     pub const fn unwrap_or_else<F>(self, op: F) -> T
     where
-        F: ~const FnOnce(E) -> T + ~const Destruct,
+        F: [const] FnOnce(E) -> T + [const] Destruct,
     {
         match self {
             Ok(t) => t,
@@ -2164,7 +2164,7 @@ impl<T, E> const ops::Try for Result<T, E> {
 
 #[unstable(feature = "try_trait_v2", issue = "84277", old_name = "try_trait")]
 #[rustc_const_unstable(feature = "const_try", issue = "74935")]
-impl<T, E, F: ~const From<E>> const ops::FromResidual<Result<convert::Infallible, E>>
+impl<T, E, F: [const] From<E>> const ops::FromResidual<Result<convert::Infallible, E>>
     for Result<T, F>
 {
     #[inline]
@@ -2178,7 +2178,7 @@ impl<T, E, F: ~const From<E>> const ops::FromResidual<Result<convert::Infallible
 #[diagnostic::do_not_recommend]
 #[unstable(feature = "try_trait_v2_yeet", issue = "96374")]
 #[rustc_const_unstable(feature = "const_try", issue = "74935")]
-impl<T, E, F: ~const From<E>> const ops::FromResidual<ops::Yeet<E>> for Result<T, F> {
+impl<T, E, F: [const] From<E>> const ops::FromResidual<ops::Yeet<E>> for Result<T, F> {
     #[inline]
     fn from_residual(ops::Yeet(e): ops::Yeet<E>) -> Self {
         Err(From::from(e))
diff --git a/library/core/src/slice/cmp.rs b/library/core/src/slice/cmp.rs
index 1eda8bc1bec..68bd12aa7bf 100644
--- a/library/core/src/slice/cmp.rs
+++ b/library/core/src/slice/cmp.rs
@@ -11,7 +11,7 @@ use crate::ops::ControlFlow;
 #[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
 impl<T, U> const PartialEq<[U]> for [T]
 where
-    T: ~const PartialEq<U>,
+    T: [const] PartialEq<U>,
 {
     fn eq(&self, other: &[U]) -> bool {
         SlicePartialEq::equal(self, other)
@@ -109,7 +109,7 @@ trait SlicePartialEq<B> {
 #[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
 impl<A, B> const SlicePartialEq<B> for [A]
 where
-    A: ~const PartialEq<B>,
+    A: [const] PartialEq<B>,
 {
     default fn equal(&self, other: &[B]) -> bool {
         if self.len() != other.len() {
@@ -138,7 +138,7 @@ where
 #[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
 impl<A, B> const SlicePartialEq<B> for [A]
 where
-    A: ~const BytewiseEq<B>,
+    A: [const] BytewiseEq<B>,
 {
     fn equal(&self, other: &[B]) -> bool {
         if self.len() != other.len() {
diff --git a/library/core/src/slice/index.rs b/library/core/src/slice/index.rs
index 322b3580ede..ae360df80f6 100644
--- a/library/core/src/slice/index.rs
+++ b/library/core/src/slice/index.rs
@@ -9,7 +9,7 @@ use crate::{ops, range};
 #[rustc_const_unstable(feature = "const_index", issue = "143775")]
 impl<T, I> const ops::Index<I> for [T]
 where
-    I: ~const SliceIndex<[T]>,
+    I: [const] SliceIndex<[T]>,
 {
     type Output = I::Output;
 
@@ -23,7 +23,7 @@ where
 #[rustc_const_unstable(feature = "const_index", issue = "143775")]
 impl<T, I> const ops::IndexMut<I> for [T]
 where
-    I: ~const SliceIndex<[T]>,
+    I: [const] SliceIndex<[T]>,
 {
     #[inline(always)]
     fn index_mut(&mut self, index: I) -> &mut I::Output {
diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs
index 1dddc48e68e..64f5b5dd831 100644
--- a/library/core/src/slice/mod.rs
+++ b/library/core/src/slice/mod.rs
@@ -569,7 +569,7 @@ impl<T> [T] {
     #[rustc_const_unstable(feature = "const_index", issue = "143775")]
     pub const fn get<I>(&self, index: I) -> Option<&I::Output>
     where
-        I: ~const SliceIndex<Self>,
+        I: [const] SliceIndex<Self>,
     {
         index.get(self)
     }
@@ -596,7 +596,7 @@ impl<T> [T] {
     #[rustc_const_unstable(feature = "const_index", issue = "143775")]
     pub const fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output>
     where
-        I: ~const SliceIndex<Self>,
+        I: [const] SliceIndex<Self>,
     {
         index.get_mut(self)
     }
@@ -636,7 +636,7 @@ impl<T> [T] {
     #[rustc_const_unstable(feature = "const_index", issue = "143775")]
     pub const unsafe fn get_unchecked<I>(&self, index: I) -> &I::Output
     where
-        I: ~const SliceIndex<Self>,
+        I: [const] SliceIndex<Self>,
     {
         // SAFETY: the caller must uphold most of the safety requirements for `get_unchecked`;
         // the slice is dereferenceable because `self` is a safe reference.
@@ -681,7 +681,7 @@ impl<T> [T] {
     #[rustc_const_unstable(feature = "const_index", issue = "143775")]
     pub const unsafe fn get_unchecked_mut<I>(&mut self, index: I) -> &mut I::Output
     where
-        I: ~const SliceIndex<Self>,
+        I: [const] SliceIndex<Self>,
     {
         // SAFETY: the caller must uphold the safety requirements for `get_unchecked_mut`;
         // the slice is dereferenceable because `self` is a safe reference.
@@ -969,7 +969,7 @@ impl<T> [T] {
     /// assert!(v == [3, 2, 1]);
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
-    #[rustc_const_stable(feature = "const_slice_reverse", since = "CURRENT_RUSTC_VERSION")]
+    #[rustc_const_stable(feature = "const_slice_reverse", since = "1.90.0")]
     #[inline]
     pub const fn reverse(&mut self) {
         let half_len = self.len() / 2;
diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs
index c40af4de7e0..1b6e84175b9 100644
--- a/library/core/src/str/mod.rs
+++ b/library/core/src/str/mod.rs
@@ -603,7 +603,7 @@ impl str {
     #[stable(feature = "str_checked_slicing", since = "1.20.0")]
     #[rustc_const_unstable(feature = "const_index", issue = "143775")]
     #[inline]
-    pub const fn get<I: ~const SliceIndex<str>>(&self, i: I) -> Option<&I::Output> {
+    pub const fn get<I: [const] SliceIndex<str>>(&self, i: I) -> Option<&I::Output> {
         i.get(self)
     }
 
@@ -636,7 +636,7 @@ impl str {
     #[stable(feature = "str_checked_slicing", since = "1.20.0")]
     #[rustc_const_unstable(feature = "const_index", issue = "143775")]
     #[inline]
-    pub const fn get_mut<I: ~const SliceIndex<str>>(&mut self, i: I) -> Option<&mut I::Output> {
+    pub const fn get_mut<I: [const] SliceIndex<str>>(&mut self, i: I) -> Option<&mut I::Output> {
         i.get_mut(self)
     }
 
diff --git a/library/core/src/str/traits.rs b/library/core/src/str/traits.rs
index 1597d1c1fa8..dc88f35eca7 100644
--- a/library/core/src/str/traits.rs
+++ b/library/core/src/str/traits.rs
@@ -53,7 +53,7 @@ impl PartialOrd for str {
 #[rustc_const_unstable(feature = "const_index", issue = "143775")]
 impl<I> const ops::Index<I> for str
 where
-    I: ~const SliceIndex<str>,
+    I: [const] SliceIndex<str>,
 {
     type Output = I::Output;
 
@@ -67,7 +67,7 @@ where
 #[rustc_const_unstable(feature = "const_index", issue = "143775")]
 impl<I> const ops::IndexMut<I> for str
 where
-    I: ~const SliceIndex<str>,
+    I: [const] SliceIndex<str>,
 {
     #[inline]
     fn index_mut(&mut self, index: I) -> &mut I::Output {
diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
index 70c02ead358..44a6895f90a 100644
--- a/library/core/src/sync/atomic.rs
+++ b/library/core/src/sync/atomic.rs
@@ -2293,7 +2293,7 @@ impl<T> AtomicPtr<T> {
     #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
     pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T {
         // SAFETY: data races are prevented by atomic intrinsics.
-        unsafe { atomic_add(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() }
+        unsafe { atomic_add(self.p.get(), val, order).cast() }
     }
 
     /// Offsets the pointer's address by subtracting `val` *bytes*, returning the
@@ -2318,9 +2318,10 @@ impl<T> AtomicPtr<T> {
     /// #![feature(strict_provenance_atomic_ptr)]
     /// use core::sync::atomic::{AtomicPtr, Ordering};
     ///
-    /// let atom = AtomicPtr::<i64>::new(core::ptr::without_provenance_mut(1));
-    /// assert_eq!(atom.fetch_byte_sub(1, Ordering::Relaxed).addr(), 1);
-    /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 0);
+    /// let mut arr = [0i64, 1];
+    /// let atom = AtomicPtr::<i64>::new(&raw mut arr[1]);
+    /// assert_eq!(atom.fetch_byte_sub(8, Ordering::Relaxed).addr(), (&raw const arr[1]).addr());
+    /// assert_eq!(atom.load(Ordering::Relaxed).addr(), (&raw const arr[0]).addr());
     /// ```
     #[inline]
     #[cfg(target_has_atomic = "ptr")]
@@ -2328,7 +2329,7 @@ impl<T> AtomicPtr<T> {
     #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
     pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T {
         // SAFETY: data races are prevented by atomic intrinsics.
-        unsafe { atomic_sub(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() }
+        unsafe { atomic_sub(self.p.get(), val, order).cast() }
     }
 
     /// Performs a bitwise "or" operation on the address of the current pointer,
@@ -2379,7 +2380,7 @@ impl<T> AtomicPtr<T> {
     #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
     pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T {
         // SAFETY: data races are prevented by atomic intrinsics.
-        unsafe { atomic_or(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() }
+        unsafe { atomic_or(self.p.get(), val, order).cast() }
     }
 
     /// Performs a bitwise "and" operation on the address of the current
@@ -2429,7 +2430,7 @@ impl<T> AtomicPtr<T> {
     #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
     pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T {
         // SAFETY: data races are prevented by atomic intrinsics.
-        unsafe { atomic_and(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() }
+        unsafe { atomic_and(self.p.get(), val, order).cast() }
     }
 
     /// Performs a bitwise "xor" operation on the address of the current
@@ -2477,7 +2478,7 @@ impl<T> AtomicPtr<T> {
     #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
     pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T {
         // SAFETY: data races are prevented by atomic intrinsics.
-        unsafe { atomic_xor(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() }
+        unsafe { atomic_xor(self.p.get(), val, order).cast() }
     }
 
     /// Returns a mutable pointer to the underlying pointer.
@@ -3981,15 +3982,15 @@ unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic)]
 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
-unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+unsafe fn atomic_add<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
     // SAFETY: the caller must uphold the safety contract for `atomic_add`.
     unsafe {
         match order {
-            Relaxed => intrinsics::atomic_xadd::<T, { AO::Relaxed }>(dst, val),
-            Acquire => intrinsics::atomic_xadd::<T, { AO::Acquire }>(dst, val),
-            Release => intrinsics::atomic_xadd::<T, { AO::Release }>(dst, val),
-            AcqRel => intrinsics::atomic_xadd::<T, { AO::AcqRel }>(dst, val),
-            SeqCst => intrinsics::atomic_xadd::<T, { AO::SeqCst }>(dst, val),
+            Relaxed => intrinsics::atomic_xadd::<T, U, { AO::Relaxed }>(dst, val),
+            Acquire => intrinsics::atomic_xadd::<T, U, { AO::Acquire }>(dst, val),
+            Release => intrinsics::atomic_xadd::<T, U, { AO::Release }>(dst, val),
+            AcqRel => intrinsics::atomic_xadd::<T, U, { AO::AcqRel }>(dst, val),
+            SeqCst => intrinsics::atomic_xadd::<T, U, { AO::SeqCst }>(dst, val),
         }
     }
 }
@@ -3998,15 +3999,15 @@ unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic)]
 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
-unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+unsafe fn atomic_sub<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
     // SAFETY: the caller must uphold the safety contract for `atomic_sub`.
     unsafe {
         match order {
-            Relaxed => intrinsics::atomic_xsub::<T, { AO::Relaxed }>(dst, val),
-            Acquire => intrinsics::atomic_xsub::<T, { AO::Acquire }>(dst, val),
-            Release => intrinsics::atomic_xsub::<T, { AO::Release }>(dst, val),
-            AcqRel => intrinsics::atomic_xsub::<T, { AO::AcqRel }>(dst, val),
-            SeqCst => intrinsics::atomic_xsub::<T, { AO::SeqCst }>(dst, val),
+            Relaxed => intrinsics::atomic_xsub::<T, U, { AO::Relaxed }>(dst, val),
+            Acquire => intrinsics::atomic_xsub::<T, U, { AO::Acquire }>(dst, val),
+            Release => intrinsics::atomic_xsub::<T, U, { AO::Release }>(dst, val),
+            AcqRel => intrinsics::atomic_xsub::<T, U, { AO::AcqRel }>(dst, val),
+            SeqCst => intrinsics::atomic_xsub::<T, U, { AO::SeqCst }>(dst, val),
         }
     }
 }
@@ -4147,15 +4148,15 @@ unsafe fn atomic_compare_exchange_weak<T: Copy>(
 #[inline]
 #[cfg(target_has_atomic)]
 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
-unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+unsafe fn atomic_and<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
     // SAFETY: the caller must uphold the safety contract for `atomic_and`
     unsafe {
         match order {
-            Relaxed => intrinsics::atomic_and::<T, { AO::Relaxed }>(dst, val),
-            Acquire => intrinsics::atomic_and::<T, { AO::Acquire }>(dst, val),
-            Release => intrinsics::atomic_and::<T, { AO::Release }>(dst, val),
-            AcqRel => intrinsics::atomic_and::<T, { AO::AcqRel }>(dst, val),
-            SeqCst => intrinsics::atomic_and::<T, { AO::SeqCst }>(dst, val),
+            Relaxed => intrinsics::atomic_and::<T, U, { AO::Relaxed }>(dst, val),
+            Acquire => intrinsics::atomic_and::<T, U, { AO::Acquire }>(dst, val),
+            Release => intrinsics::atomic_and::<T, U, { AO::Release }>(dst, val),
+            AcqRel => intrinsics::atomic_and::<T, U, { AO::AcqRel }>(dst, val),
+            SeqCst => intrinsics::atomic_and::<T, U, { AO::SeqCst }>(dst, val),
         }
     }
 }
@@ -4163,15 +4164,15 @@ unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic)]
 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
-unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+unsafe fn atomic_nand<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
     // SAFETY: the caller must uphold the safety contract for `atomic_nand`
     unsafe {
         match order {
-            Relaxed => intrinsics::atomic_nand::<T, { AO::Relaxed }>(dst, val),
-            Acquire => intrinsics::atomic_nand::<T, { AO::Acquire }>(dst, val),
-            Release => intrinsics::atomic_nand::<T, { AO::Release }>(dst, val),
-            AcqRel => intrinsics::atomic_nand::<T, { AO::AcqRel }>(dst, val),
-            SeqCst => intrinsics::atomic_nand::<T, { AO::SeqCst }>(dst, val),
+            Relaxed => intrinsics::atomic_nand::<T, U, { AO::Relaxed }>(dst, val),
+            Acquire => intrinsics::atomic_nand::<T, U, { AO::Acquire }>(dst, val),
+            Release => intrinsics::atomic_nand::<T, U, { AO::Release }>(dst, val),
+            AcqRel => intrinsics::atomic_nand::<T, U, { AO::AcqRel }>(dst, val),
+            SeqCst => intrinsics::atomic_nand::<T, U, { AO::SeqCst }>(dst, val),
         }
     }
 }
@@ -4179,15 +4180,15 @@ unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic)]
 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
-unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+unsafe fn atomic_or<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
     // SAFETY: the caller must uphold the safety contract for `atomic_or`
     unsafe {
         match order {
-            SeqCst => intrinsics::atomic_or::<T, { AO::SeqCst }>(dst, val),
-            Acquire => intrinsics::atomic_or::<T, { AO::Acquire }>(dst, val),
-            Release => intrinsics::atomic_or::<T, { AO::Release }>(dst, val),
-            AcqRel => intrinsics::atomic_or::<T, { AO::AcqRel }>(dst, val),
-            Relaxed => intrinsics::atomic_or::<T, { AO::Relaxed }>(dst, val),
+            SeqCst => intrinsics::atomic_or::<T, U, { AO::SeqCst }>(dst, val),
+            Acquire => intrinsics::atomic_or::<T, U, { AO::Acquire }>(dst, val),
+            Release => intrinsics::atomic_or::<T, U, { AO::Release }>(dst, val),
+            AcqRel => intrinsics::atomic_or::<T, U, { AO::AcqRel }>(dst, val),
+            Relaxed => intrinsics::atomic_or::<T, U, { AO::Relaxed }>(dst, val),
         }
     }
 }
@@ -4195,15 +4196,15 @@ unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic)]
 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
-unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+unsafe fn atomic_xor<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
     // SAFETY: the caller must uphold the safety contract for `atomic_xor`
     unsafe {
         match order {
-            SeqCst => intrinsics::atomic_xor::<T, { AO::SeqCst }>(dst, val),
-            Acquire => intrinsics::atomic_xor::<T, { AO::Acquire }>(dst, val),
-            Release => intrinsics::atomic_xor::<T, { AO::Release }>(dst, val),
-            AcqRel => intrinsics::atomic_xor::<T, { AO::AcqRel }>(dst, val),
-            Relaxed => intrinsics::atomic_xor::<T, { AO::Relaxed }>(dst, val),
+            SeqCst => intrinsics::atomic_xor::<T, U, { AO::SeqCst }>(dst, val),
+            Acquire => intrinsics::atomic_xor::<T, U, { AO::Acquire }>(dst, val),
+            Release => intrinsics::atomic_xor::<T, U, { AO::Release }>(dst, val),
+            AcqRel => intrinsics::atomic_xor::<T, U, { AO::AcqRel }>(dst, val),
+            Relaxed => intrinsics::atomic_xor::<T, U, { AO::Relaxed }>(dst, val),
         }
     }
 }
diff --git a/library/core/src/time.rs b/library/core/src/time.rs
index 0fb5c0bac75..0cc570f4b73 100644
--- a/library/core/src/time.rs
+++ b/library/core/src/time.rs
@@ -373,7 +373,6 @@ impl Duration {
     /// # Examples
     ///
     /// ```
-    /// #![feature(duration_constructors_lite)]
     /// use std::time::Duration;
     ///
     /// let duration = Duration::from_hours(6);
@@ -381,7 +380,8 @@ impl Duration {
     /// assert_eq!(6 * 60 * 60, duration.as_secs());
     /// assert_eq!(0, duration.subsec_nanos());
     /// ```
-    #[unstable(feature = "duration_constructors_lite", issue = "140881")]
+    #[stable(feature = "duration_constructors_lite", since = "CURRENT_RUSTC_VERSION")]
+    #[rustc_const_stable(feature = "duration_constructors_lite", since = "CURRENT_RUSTC_VERSION")]
     #[must_use]
     #[inline]
     pub const fn from_hours(hours: u64) -> Duration {
@@ -401,7 +401,6 @@ impl Duration {
     /// # Examples
     ///
     /// ```
-    /// #![feature(duration_constructors_lite)]
     /// use std::time::Duration;
     ///
     /// let duration = Duration::from_mins(10);
@@ -409,7 +408,8 @@ impl Duration {
     /// assert_eq!(10 * 60, duration.as_secs());
     /// assert_eq!(0, duration.subsec_nanos());
     /// ```
-    #[unstable(feature = "duration_constructors_lite", issue = "140881")]
+    #[stable(feature = "duration_constructors_lite", since = "CURRENT_RUSTC_VERSION")]
+    #[rustc_const_stable(feature = "duration_constructors_lite", since = "CURRENT_RUSTC_VERSION")]
     #[must_use]
     #[inline]
     pub const fn from_mins(mins: u64) -> Duration {
@@ -1100,7 +1100,8 @@ impl Duration {
 }
 
 #[stable(feature = "duration", since = "1.3.0")]
-impl Add for Duration {
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl const Add for Duration {
     type Output = Duration;
 
     #[inline]
@@ -1110,7 +1111,8 @@ impl Add for Duration {
 }
 
 #[stable(feature = "time_augmented_assignment", since = "1.9.0")]
-impl AddAssign for Duration {
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl const AddAssign for Duration {
     #[inline]
     fn add_assign(&mut self, rhs: Duration) {
         *self = *self + rhs;
@@ -1118,7 +1120,8 @@ impl AddAssign for Duration {
 }
 
 #[stable(feature = "duration", since = "1.3.0")]
-impl Sub for Duration {
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl const Sub for Duration {
     type Output = Duration;
 
     #[inline]
@@ -1128,7 +1131,8 @@ impl Sub for Duration {
 }
 
 #[stable(feature = "time_augmented_assignment", since = "1.9.0")]
-impl SubAssign for Duration {
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl const SubAssign for Duration {
     #[inline]
     fn sub_assign(&mut self, rhs: Duration) {
         *self = *self - rhs;
@@ -1136,7 +1140,8 @@ impl SubAssign for Duration {
 }
 
 #[stable(feature = "duration", since = "1.3.0")]
-impl Mul<u32> for Duration {
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl const Mul<u32> for Duration {
     type Output = Duration;
 
     #[inline]
@@ -1146,7 +1151,8 @@ impl Mul<u32> for Duration {
 }
 
 #[stable(feature = "symmetric_u32_duration_mul", since = "1.31.0")]
-impl Mul<Duration> for u32 {
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl const Mul<Duration> for u32 {
     type Output = Duration;
 
     #[inline]
@@ -1156,7 +1162,8 @@ impl Mul<Duration> for u32 {
 }
 
 #[stable(feature = "time_augmented_assignment", since = "1.9.0")]
-impl MulAssign<u32> for Duration {
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl const MulAssign<u32> for Duration {
     #[inline]
     fn mul_assign(&mut self, rhs: u32) {
         *self = *self * rhs;
@@ -1164,7 +1171,8 @@ impl MulAssign<u32> for Duration {
 }
 
 #[stable(feature = "duration", since = "1.3.0")]
-impl Div<u32> for Duration {
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl const Div<u32> for Duration {
     type Output = Duration;
 
     #[inline]
@@ -1175,7 +1183,8 @@ impl Div<u32> for Duration {
 }
 
 #[stable(feature = "time_augmented_assignment", since = "1.9.0")]
-impl DivAssign<u32> for Duration {
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl const DivAssign<u32> for Duration {
     #[inline]
     #[track_caller]
     fn div_assign(&mut self, rhs: u32) {
diff --git a/library/coretests/tests/char.rs b/library/coretests/tests/char.rs
index 153fb36925e..852f073bae1 100644
--- a/library/coretests/tests/char.rs
+++ b/library/coretests/tests/char.rs
@@ -21,7 +21,6 @@ fn test_convert() {
     assert!(char::try_from(0xFFFF_FFFF_u32).is_err());
 }
 
-/* FIXME(#110395)
 #[test]
 const fn test_convert_const() {
     assert!(u32::from('a') == 0x61);
@@ -31,7 +30,6 @@ const fn test_convert_const() {
     assert!(char::from(b'a') == 'a');
     assert!(char::from(b'\xFF') == '\u{FF}');
 }
-*/
 
 #[test]
 fn test_from_str() {
diff --git a/library/coretests/tests/convert.rs b/library/coretests/tests/convert.rs
index f76dd277884..f1048f4cf09 100644
--- a/library/coretests/tests/convert.rs
+++ b/library/coretests/tests/convert.rs
@@ -1,4 +1,3 @@
-/* FIXME(#110395)
 #[test]
 fn convert() {
     const fn from(x: i32) -> i32 {
@@ -15,4 +14,3 @@ fn convert() {
     const BAR: Vec<String> = into(Vec::new());
     assert_eq!(BAR, Vec::<String>::new());
 }
-*/
diff --git a/library/coretests/tests/floats/f128.rs b/library/coretests/tests/floats/f128.rs
index 36d6a20a944..ac4a2066530 100644
--- a/library/coretests/tests/floats/f128.rs
+++ b/library/coretests/tests/floats/f128.rs
@@ -15,21 +15,6 @@ const TOL: f128 = 1e-12;
 /// signs.
 const TOL_PRECISE: f128 = 1e-28;
 
-/// Smallest number
-const TINY_BITS: u128 = 0x1;
-
-/// Next smallest number
-const TINY_UP_BITS: u128 = 0x2;
-
-/// Exponent = 0b11...10, Sifnificand 0b1111..10. Min val > 0
-const MAX_DOWN_BITS: u128 = 0x7ffefffffffffffffffffffffffffffe;
-
-/// Zeroed exponent, full significant
-const LARGEST_SUBNORMAL_BITS: u128 = 0x0000ffffffffffffffffffffffffffff;
-
-/// Exponent = 0b1, zeroed significand
-const SMALLEST_NORMAL_BITS: u128 = 0x00010000000000000000000000000000;
-
 /// First pattern over the mantissa
 const NAN_MASK1: u128 = 0x0000aaaaaaaaaaaaaaaaaaaaaaaaaaaa;
 
@@ -40,106 +25,6 @@ const NAN_MASK2: u128 = 0x00005555555555555555555555555555;
 // the intrinsics.
 
 #[test]
-#[cfg(any(miri, target_has_reliable_f128_math))]
-fn test_abs() {
-    assert_biteq!(f128::INFINITY.abs(), f128::INFINITY);
-    assert_biteq!(1f128.abs(), 1f128);
-    assert_biteq!(0f128.abs(), 0f128);
-    assert_biteq!((-0f128).abs(), 0f128);
-    assert_biteq!((-1f128).abs(), 1f128);
-    assert_biteq!(f128::NEG_INFINITY.abs(), f128::INFINITY);
-    assert_biteq!((1f128 / f128::NEG_INFINITY).abs(), 0f128);
-    assert!(f128::NAN.abs().is_nan());
-}
-
-#[test]
-fn test_is_sign_positive() {
-    assert!(f128::INFINITY.is_sign_positive());
-    assert!(1f128.is_sign_positive());
-    assert!(0f128.is_sign_positive());
-    assert!(!(-0f128).is_sign_positive());
-    assert!(!(-1f128).is_sign_positive());
-    assert!(!f128::NEG_INFINITY.is_sign_positive());
-    assert!(!(1f128 / f128::NEG_INFINITY).is_sign_positive());
-    assert!(f128::NAN.is_sign_positive());
-    assert!(!(-f128::NAN).is_sign_positive());
-}
-
-#[test]
-fn test_is_sign_negative() {
-    assert!(!f128::INFINITY.is_sign_negative());
-    assert!(!1f128.is_sign_negative());
-    assert!(!0f128.is_sign_negative());
-    assert!((-0f128).is_sign_negative());
-    assert!((-1f128).is_sign_negative());
-    assert!(f128::NEG_INFINITY.is_sign_negative());
-    assert!((1f128 / f128::NEG_INFINITY).is_sign_negative());
-    assert!(!f128::NAN.is_sign_negative());
-    assert!((-f128::NAN).is_sign_negative());
-}
-
-#[test]
-fn test_next_up() {
-    let tiny = f128::from_bits(TINY_BITS);
-    let tiny_up = f128::from_bits(TINY_UP_BITS);
-    let max_down = f128::from_bits(MAX_DOWN_BITS);
-    let largest_subnormal = f128::from_bits(LARGEST_SUBNORMAL_BITS);
-    let smallest_normal = f128::from_bits(SMALLEST_NORMAL_BITS);
-    assert_biteq!(f128::NEG_INFINITY.next_up(), f128::MIN);
-    assert_biteq!(f128::MIN.next_up(), -max_down);
-    assert_biteq!((-1.0 - f128::EPSILON).next_up(), -1.0f128);
-    assert_biteq!((-smallest_normal).next_up(), -largest_subnormal);
-    assert_biteq!((-tiny_up).next_up(), -tiny);
-    assert_biteq!((-tiny).next_up(), -0.0f128);
-    assert_biteq!((-0.0f128).next_up(), tiny);
-    assert_biteq!(0.0f128.next_up(), tiny);
-    assert_biteq!(tiny.next_up(), tiny_up);
-    assert_biteq!(largest_subnormal.next_up(), smallest_normal);
-    assert_biteq!(1.0f128.next_up(), 1.0 + f128::EPSILON);
-    assert_biteq!(f128::MAX.next_up(), f128::INFINITY);
-    assert_biteq!(f128::INFINITY.next_up(), f128::INFINITY);
-
-    // Check that NaNs roundtrip.
-    let nan0 = f128::NAN;
-    let nan1 = f128::from_bits(f128::NAN.to_bits() ^ 0x002a_aaaa);
-    let nan2 = f128::from_bits(f128::NAN.to_bits() ^ 0x0055_5555);
-    assert_biteq!(nan0.next_up(), nan0);
-    assert_biteq!(nan1.next_up(), nan1);
-    assert_biteq!(nan2.next_up(), nan2);
-}
-
-#[test]
-fn test_next_down() {
-    let tiny = f128::from_bits(TINY_BITS);
-    let tiny_up = f128::from_bits(TINY_UP_BITS);
-    let max_down = f128::from_bits(MAX_DOWN_BITS);
-    let largest_subnormal = f128::from_bits(LARGEST_SUBNORMAL_BITS);
-    let smallest_normal = f128::from_bits(SMALLEST_NORMAL_BITS);
-    assert_biteq!(f128::NEG_INFINITY.next_down(), f128::NEG_INFINITY);
-    assert_biteq!(f128::MIN.next_down(), f128::NEG_INFINITY);
-    assert_biteq!((-max_down).next_down(), f128::MIN);
-    assert_biteq!((-1.0f128).next_down(), -1.0 - f128::EPSILON);
-    assert_biteq!((-largest_subnormal).next_down(), -smallest_normal);
-    assert_biteq!((-tiny).next_down(), -tiny_up);
-    assert_biteq!((-0.0f128).next_down(), -tiny);
-    assert_biteq!((0.0f128).next_down(), -tiny);
-    assert_biteq!(tiny.next_down(), 0.0f128);
-    assert_biteq!(tiny_up.next_down(), tiny);
-    assert_biteq!(smallest_normal.next_down(), largest_subnormal);
-    assert_biteq!((1.0 + f128::EPSILON).next_down(), 1.0f128);
-    assert_biteq!(f128::MAX.next_down(), max_down);
-    assert_biteq!(f128::INFINITY.next_down(), f128::MAX);
-
-    // Check that NaNs roundtrip.
-    let nan0 = f128::NAN;
-    let nan1 = f128::from_bits(f128::NAN.to_bits() ^ 0x002a_aaaa);
-    let nan2 = f128::from_bits(f128::NAN.to_bits() ^ 0x0055_5555);
-    assert_biteq!(nan0.next_down(), nan0);
-    assert_biteq!(nan1.next_down(), nan1);
-    assert_biteq!(nan2.next_down(), nan2);
-}
-
-#[test]
 #[cfg(not(miri))]
 #[cfg(target_has_reliable_f128_math)]
 fn test_mul_add() {
@@ -194,19 +79,6 @@ fn test_powi() {
 }
 
 #[test]
-#[cfg(not(miri))]
-#[cfg(target_has_reliable_f128_math)]
-fn test_sqrt_domain() {
-    assert!(f128::NAN.sqrt().is_nan());
-    assert!(f128::NEG_INFINITY.sqrt().is_nan());
-    assert!((-1.0f128).sqrt().is_nan());
-    assert_biteq!((-0.0f128).sqrt(), -0.0);
-    assert_biteq!(0.0f128.sqrt(), 0.0);
-    assert_biteq!(1.0f128.sqrt(), 1.0);
-    assert_biteq!(f128::INFINITY.sqrt(), f128::INFINITY);
-}
-
-#[test]
 fn test_to_degrees() {
     let pi: f128 = consts::PI;
     let nan: f128 = f128::NAN;
@@ -261,168 +133,6 @@ fn test_float_bits_conv() {
 }
 
 #[test]
-#[should_panic]
-fn test_clamp_min_greater_than_max() {
-    let _ = 1.0f128.clamp(3.0, 1.0);
-}
-
-#[test]
-#[should_panic]
-fn test_clamp_min_is_nan() {
-    let _ = 1.0f128.clamp(f128::NAN, 1.0);
-}
-
-#[test]
-#[should_panic]
-fn test_clamp_max_is_nan() {
-    let _ = 1.0f128.clamp(3.0, f128::NAN);
-}
-
-#[test]
-fn test_total_cmp() {
-    use core::cmp::Ordering;
-
-    fn quiet_bit_mask() -> u128 {
-        1 << (f128::MANTISSA_DIGITS - 2)
-    }
-
-    // FIXME(f16_f128): test subnormals when powf is available
-    // fn min_subnorm() -> f128 {
-    //     f128::MIN_POSITIVE / f128::powf(2.0, f128::MANTISSA_DIGITS as f128 - 1.0)
-    // }
-
-    // fn max_subnorm() -> f128 {
-    //     f128::MIN_POSITIVE - min_subnorm()
-    // }
-
-    fn q_nan() -> f128 {
-        f128::from_bits(f128::NAN.to_bits() | quiet_bit_mask())
-    }
-
-    fn s_nan() -> f128 {
-        f128::from_bits((f128::NAN.to_bits() & !quiet_bit_mask()) + 42)
-    }
-
-    assert_eq!(Ordering::Equal, (-q_nan()).total_cmp(&-q_nan()));
-    assert_eq!(Ordering::Equal, (-s_nan()).total_cmp(&-s_nan()));
-    assert_eq!(Ordering::Equal, (-f128::INFINITY).total_cmp(&-f128::INFINITY));
-    assert_eq!(Ordering::Equal, (-f128::MAX).total_cmp(&-f128::MAX));
-    assert_eq!(Ordering::Equal, (-2.5_f128).total_cmp(&-2.5));
-    assert_eq!(Ordering::Equal, (-1.0_f128).total_cmp(&-1.0));
-    assert_eq!(Ordering::Equal, (-1.5_f128).total_cmp(&-1.5));
-    assert_eq!(Ordering::Equal, (-0.5_f128).total_cmp(&-0.5));
-    assert_eq!(Ordering::Equal, (-f128::MIN_POSITIVE).total_cmp(&-f128::MIN_POSITIVE));
-    // assert_eq!(Ordering::Equal, (-max_subnorm()).total_cmp(&-max_subnorm()));
-    // assert_eq!(Ordering::Equal, (-min_subnorm()).total_cmp(&-min_subnorm()));
-    assert_eq!(Ordering::Equal, (-0.0_f128).total_cmp(&-0.0));
-    assert_eq!(Ordering::Equal, 0.0_f128.total_cmp(&0.0));
-    // assert_eq!(Ordering::Equal, min_subnorm().total_cmp(&min_subnorm()));
-    // assert_eq!(Ordering::Equal, max_subnorm().total_cmp(&max_subnorm()));
-    assert_eq!(Ordering::Equal, f128::MIN_POSITIVE.total_cmp(&f128::MIN_POSITIVE));
-    assert_eq!(Ordering::Equal, 0.5_f128.total_cmp(&0.5));
-    assert_eq!(Ordering::Equal, 1.0_f128.total_cmp(&1.0));
-    assert_eq!(Ordering::Equal, 1.5_f128.total_cmp(&1.5));
-    assert_eq!(Ordering::Equal, 2.5_f128.total_cmp(&2.5));
-    assert_eq!(Ordering::Equal, f128::MAX.total_cmp(&f128::MAX));
-    assert_eq!(Ordering::Equal, f128::INFINITY.total_cmp(&f128::INFINITY));
-    assert_eq!(Ordering::Equal, s_nan().total_cmp(&s_nan()));
-    assert_eq!(Ordering::Equal, q_nan().total_cmp(&q_nan()));
-
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-s_nan()));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f128::INFINITY));
-    assert_eq!(Ordering::Less, (-f128::INFINITY).total_cmp(&-f128::MAX));
-    assert_eq!(Ordering::Less, (-f128::MAX).total_cmp(&-2.5));
-    assert_eq!(Ordering::Less, (-2.5_f128).total_cmp(&-1.5));
-    assert_eq!(Ordering::Less, (-1.5_f128).total_cmp(&-1.0));
-    assert_eq!(Ordering::Less, (-1.0_f128).total_cmp(&-0.5));
-    assert_eq!(Ordering::Less, (-0.5_f128).total_cmp(&-f128::MIN_POSITIVE));
-    // assert_eq!(Ordering::Less, (-f128::MIN_POSITIVE).total_cmp(&-max_subnorm()));
-    // assert_eq!(Ordering::Less, (-max_subnorm()).total_cmp(&-min_subnorm()));
-    // assert_eq!(Ordering::Less, (-min_subnorm()).total_cmp(&-0.0));
-    assert_eq!(Ordering::Less, (-0.0_f128).total_cmp(&0.0));
-    // assert_eq!(Ordering::Less, 0.0_f128.total_cmp(&min_subnorm()));
-    // assert_eq!(Ordering::Less, min_subnorm().total_cmp(&max_subnorm()));
-    // assert_eq!(Ordering::Less, max_subnorm().total_cmp(&f128::MIN_POSITIVE));
-    assert_eq!(Ordering::Less, f128::MIN_POSITIVE.total_cmp(&0.5));
-    assert_eq!(Ordering::Less, 0.5_f128.total_cmp(&1.0));
-    assert_eq!(Ordering::Less, 1.0_f128.total_cmp(&1.5));
-    assert_eq!(Ordering::Less, 1.5_f128.total_cmp(&2.5));
-    assert_eq!(Ordering::Less, 2.5_f128.total_cmp(&f128::MAX));
-    assert_eq!(Ordering::Less, f128::MAX.total_cmp(&f128::INFINITY));
-    assert_eq!(Ordering::Less, f128::INFINITY.total_cmp(&s_nan()));
-    assert_eq!(Ordering::Less, s_nan().total_cmp(&q_nan()));
-
-    assert_eq!(Ordering::Greater, (-s_nan()).total_cmp(&-q_nan()));
-    assert_eq!(Ordering::Greater, (-f128::INFINITY).total_cmp(&-s_nan()));
-    assert_eq!(Ordering::Greater, (-f128::MAX).total_cmp(&-f128::INFINITY));
-    assert_eq!(Ordering::Greater, (-2.5_f128).total_cmp(&-f128::MAX));
-    assert_eq!(Ordering::Greater, (-1.5_f128).total_cmp(&-2.5));
-    assert_eq!(Ordering::Greater, (-1.0_f128).total_cmp(&-1.5));
-    assert_eq!(Ordering::Greater, (-0.5_f128).total_cmp(&-1.0));
-    assert_eq!(Ordering::Greater, (-f128::MIN_POSITIVE).total_cmp(&-0.5));
-    // assert_eq!(Ordering::Greater, (-max_subnorm()).total_cmp(&-f128::MIN_POSITIVE));
-    // assert_eq!(Ordering::Greater, (-min_subnorm()).total_cmp(&-max_subnorm()));
-    // assert_eq!(Ordering::Greater, (-0.0_f128).total_cmp(&-min_subnorm()));
-    assert_eq!(Ordering::Greater, 0.0_f128.total_cmp(&-0.0));
-    // assert_eq!(Ordering::Greater, min_subnorm().total_cmp(&0.0));
-    // assert_eq!(Ordering::Greater, max_subnorm().total_cmp(&min_subnorm()));
-    // assert_eq!(Ordering::Greater, f128::MIN_POSITIVE.total_cmp(&max_subnorm()));
-    assert_eq!(Ordering::Greater, 0.5_f128.total_cmp(&f128::MIN_POSITIVE));
-    assert_eq!(Ordering::Greater, 1.0_f128.total_cmp(&0.5));
-    assert_eq!(Ordering::Greater, 1.5_f128.total_cmp(&1.0));
-    assert_eq!(Ordering::Greater, 2.5_f128.total_cmp(&1.5));
-    assert_eq!(Ordering::Greater, f128::MAX.total_cmp(&2.5));
-    assert_eq!(Ordering::Greater, f128::INFINITY.total_cmp(&f128::MAX));
-    assert_eq!(Ordering::Greater, s_nan().total_cmp(&f128::INFINITY));
-    assert_eq!(Ordering::Greater, q_nan().total_cmp(&s_nan()));
-
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-s_nan()));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f128::INFINITY));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f128::MAX));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-2.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-1.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-1.0));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-0.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f128::MIN_POSITIVE));
-    // assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-max_subnorm()));
-    // assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-min_subnorm()));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-0.0));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&0.0));
-    // assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&min_subnorm()));
-    // assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&max_subnorm()));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f128::MIN_POSITIVE));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&0.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&1.0));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&1.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&2.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f128::MAX));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f128::INFINITY));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&s_nan()));
-
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f128::INFINITY));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f128::MAX));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-2.5));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-1.5));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-1.0));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-0.5));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f128::MIN_POSITIVE));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-max_subnorm()));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-min_subnorm()));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-0.0));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&0.0));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&min_subnorm()));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&max_subnorm()));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f128::MIN_POSITIVE));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&0.5));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&1.0));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&1.5));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&2.5));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f128::MAX));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f128::INFINITY));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&s_nan()));
-}
-
-#[test]
 fn test_algebraic() {
     let a: f128 = 123.0;
     let b: f128 = 456.0;
diff --git a/library/coretests/tests/floats/f16.rs b/library/coretests/tests/floats/f16.rs
index 351c008a37b..bb9c8a002fe 100644
--- a/library/coretests/tests/floats/f16.rs
+++ b/library/coretests/tests/floats/f16.rs
@@ -21,21 +21,6 @@ const TOL_P2: f16 = 0.5;
 #[allow(unused)]
 const TOL_P4: f16 = 10.0;
 
-/// Smallest number
-const TINY_BITS: u16 = 0x1;
-
-/// Next smallest number
-const TINY_UP_BITS: u16 = 0x2;
-
-/// Exponent = 0b11...10, Sifnificand 0b1111..10. Min val > 0
-const MAX_DOWN_BITS: u16 = 0x7bfe;
-
-/// Zeroed exponent, full significant
-const LARGEST_SUBNORMAL_BITS: u16 = 0x03ff;
-
-/// Exponent = 0b1, zeroed significand
-const SMALLEST_NORMAL_BITS: u16 = 0x0400;
-
 /// First pattern over the mantissa
 const NAN_MASK1: u16 = 0x02aa;
 
@@ -46,106 +31,6 @@ const NAN_MASK2: u16 = 0x0155;
 // the intrinsics.
 
 #[test]
-#[cfg(any(miri, target_has_reliable_f16_math))]
-fn test_abs() {
-    assert_biteq!(f16::INFINITY.abs(), f16::INFINITY);
-    assert_biteq!(1f16.abs(), 1f16);
-    assert_biteq!(0f16.abs(), 0f16);
-    assert_biteq!((-0f16).abs(), 0f16);
-    assert_biteq!((-1f16).abs(), 1f16);
-    assert_biteq!(f16::NEG_INFINITY.abs(), f16::INFINITY);
-    assert_biteq!((1f16 / f16::NEG_INFINITY).abs(), 0f16);
-    assert!(f16::NAN.abs().is_nan());
-}
-
-#[test]
-fn test_is_sign_positive() {
-    assert!(f16::INFINITY.is_sign_positive());
-    assert!(1f16.is_sign_positive());
-    assert!(0f16.is_sign_positive());
-    assert!(!(-0f16).is_sign_positive());
-    assert!(!(-1f16).is_sign_positive());
-    assert!(!f16::NEG_INFINITY.is_sign_positive());
-    assert!(!(1f16 / f16::NEG_INFINITY).is_sign_positive());
-    assert!(f16::NAN.is_sign_positive());
-    assert!(!(-f16::NAN).is_sign_positive());
-}
-
-#[test]
-fn test_is_sign_negative() {
-    assert!(!f16::INFINITY.is_sign_negative());
-    assert!(!1f16.is_sign_negative());
-    assert!(!0f16.is_sign_negative());
-    assert!((-0f16).is_sign_negative());
-    assert!((-1f16).is_sign_negative());
-    assert!(f16::NEG_INFINITY.is_sign_negative());
-    assert!((1f16 / f16::NEG_INFINITY).is_sign_negative());
-    assert!(!f16::NAN.is_sign_negative());
-    assert!((-f16::NAN).is_sign_negative());
-}
-
-#[test]
-fn test_next_up() {
-    let tiny = f16::from_bits(TINY_BITS);
-    let tiny_up = f16::from_bits(TINY_UP_BITS);
-    let max_down = f16::from_bits(MAX_DOWN_BITS);
-    let largest_subnormal = f16::from_bits(LARGEST_SUBNORMAL_BITS);
-    let smallest_normal = f16::from_bits(SMALLEST_NORMAL_BITS);
-    assert_biteq!(f16::NEG_INFINITY.next_up(), f16::MIN);
-    assert_biteq!(f16::MIN.next_up(), -max_down);
-    assert_biteq!((-1.0 - f16::EPSILON).next_up(), -1.0f16);
-    assert_biteq!((-smallest_normal).next_up(), -largest_subnormal);
-    assert_biteq!((-tiny_up).next_up(), -tiny);
-    assert_biteq!((-tiny).next_up(), -0.0f16);
-    assert_biteq!((-0.0f16).next_up(), tiny);
-    assert_biteq!(0.0f16.next_up(), tiny);
-    assert_biteq!(tiny.next_up(), tiny_up);
-    assert_biteq!(largest_subnormal.next_up(), smallest_normal);
-    assert_biteq!(1.0f16.next_up(), 1.0 + f16::EPSILON);
-    assert_biteq!(f16::MAX.next_up(), f16::INFINITY);
-    assert_biteq!(f16::INFINITY.next_up(), f16::INFINITY);
-
-    // Check that NaNs roundtrip.
-    let nan0 = f16::NAN;
-    let nan1 = f16::from_bits(f16::NAN.to_bits() ^ NAN_MASK1);
-    let nan2 = f16::from_bits(f16::NAN.to_bits() ^ NAN_MASK2);
-    assert_biteq!(nan0.next_up(), nan0);
-    assert_biteq!(nan1.next_up(), nan1);
-    assert_biteq!(nan2.next_up(), nan2);
-}
-
-#[test]
-fn test_next_down() {
-    let tiny = f16::from_bits(TINY_BITS);
-    let tiny_up = f16::from_bits(TINY_UP_BITS);
-    let max_down = f16::from_bits(MAX_DOWN_BITS);
-    let largest_subnormal = f16::from_bits(LARGEST_SUBNORMAL_BITS);
-    let smallest_normal = f16::from_bits(SMALLEST_NORMAL_BITS);
-    assert_biteq!(f16::NEG_INFINITY.next_down(), f16::NEG_INFINITY);
-    assert_biteq!(f16::MIN.next_down(), f16::NEG_INFINITY);
-    assert_biteq!((-max_down).next_down(), f16::MIN);
-    assert_biteq!((-1.0f16).next_down(), -1.0 - f16::EPSILON);
-    assert_biteq!((-largest_subnormal).next_down(), -smallest_normal);
-    assert_biteq!((-tiny).next_down(), -tiny_up);
-    assert_biteq!((-0.0f16).next_down(), -tiny);
-    assert_biteq!((0.0f16).next_down(), -tiny);
-    assert_biteq!(tiny.next_down(), 0.0f16);
-    assert_biteq!(tiny_up.next_down(), tiny);
-    assert_biteq!(smallest_normal.next_down(), largest_subnormal);
-    assert_biteq!((1.0 + f16::EPSILON).next_down(), 1.0f16);
-    assert_biteq!(f16::MAX.next_down(), max_down);
-    assert_biteq!(f16::INFINITY.next_down(), f16::MAX);
-
-    // Check that NaNs roundtrip.
-    let nan0 = f16::NAN;
-    let nan1 = f16::from_bits(f16::NAN.to_bits() ^ NAN_MASK1);
-    let nan2 = f16::from_bits(f16::NAN.to_bits() ^ NAN_MASK2);
-    assert_biteq!(nan0.next_down(), nan0);
-    assert_biteq!(nan1.next_down(), nan1);
-    assert_biteq!(nan2.next_down(), nan2);
-}
-
-#[test]
 #[cfg(not(miri))]
 #[cfg(target_has_reliable_f16_math)]
 fn test_mul_add() {
@@ -196,19 +81,6 @@ fn test_powi() {
 }
 
 #[test]
-#[cfg(not(miri))]
-#[cfg(target_has_reliable_f16_math)]
-fn test_sqrt_domain() {
-    assert!(f16::NAN.sqrt().is_nan());
-    assert!(f16::NEG_INFINITY.sqrt().is_nan());
-    assert!((-1.0f16).sqrt().is_nan());
-    assert_biteq!((-0.0f16).sqrt(), -0.0);
-    assert_biteq!(0.0f16.sqrt(), 0.0);
-    assert_biteq!(1.0f16.sqrt(), 1.0);
-    assert_biteq!(f16::INFINITY.sqrt(), f16::INFINITY);
-}
-
-#[test]
 fn test_to_degrees() {
     let pi: f16 = consts::PI;
     let nan: f16 = f16::NAN;
@@ -260,172 +132,6 @@ fn test_float_bits_conv() {
 }
 
 #[test]
-#[should_panic]
-fn test_clamp_min_greater_than_max() {
-    let _ = 1.0f16.clamp(3.0, 1.0);
-}
-
-#[test]
-#[should_panic]
-fn test_clamp_min_is_nan() {
-    let _ = 1.0f16.clamp(f16::NAN, 1.0);
-}
-
-#[test]
-#[should_panic]
-fn test_clamp_max_is_nan() {
-    let _ = 1.0f16.clamp(3.0, f16::NAN);
-}
-
-#[test]
-#[cfg(not(miri))]
-#[cfg(target_has_reliable_f16_math)]
-fn test_total_cmp() {
-    use core::cmp::Ordering;
-
-    fn quiet_bit_mask() -> u16 {
-        1 << (f16::MANTISSA_DIGITS - 2)
-    }
-
-    fn min_subnorm() -> f16 {
-        f16::MIN_POSITIVE / f16::powf(2.0, f16::MANTISSA_DIGITS as f16 - 1.0)
-    }
-
-    fn max_subnorm() -> f16 {
-        f16::MIN_POSITIVE - min_subnorm()
-    }
-
-    fn q_nan() -> f16 {
-        f16::from_bits(f16::NAN.to_bits() | quiet_bit_mask())
-    }
-
-    // FIXME(f16_f128): Tests involving sNaN are disabled because without optimizations,
-    // `total_cmp` is getting incorrectly lowered to code that includes a `extend`/`trunc` round
-    // trip, which quiets sNaNs. See: https://github.com/llvm/llvm-project/issues/104915
-    // fn s_nan() -> f16 {
-    //     f16::from_bits((f16::NAN.to_bits() & !quiet_bit_mask()) + 42)
-    // }
-
-    assert_eq!(Ordering::Equal, (-q_nan()).total_cmp(&-q_nan()));
-    // assert_eq!(Ordering::Equal, (-s_nan()).total_cmp(&-s_nan()));
-    assert_eq!(Ordering::Equal, (-f16::INFINITY).total_cmp(&-f16::INFINITY));
-    assert_eq!(Ordering::Equal, (-f16::MAX).total_cmp(&-f16::MAX));
-    assert_eq!(Ordering::Equal, (-2.5_f16).total_cmp(&-2.5));
-    assert_eq!(Ordering::Equal, (-1.0_f16).total_cmp(&-1.0));
-    assert_eq!(Ordering::Equal, (-1.5_f16).total_cmp(&-1.5));
-    assert_eq!(Ordering::Equal, (-0.5_f16).total_cmp(&-0.5));
-    assert_eq!(Ordering::Equal, (-f16::MIN_POSITIVE).total_cmp(&-f16::MIN_POSITIVE));
-    assert_eq!(Ordering::Equal, (-max_subnorm()).total_cmp(&-max_subnorm()));
-    assert_eq!(Ordering::Equal, (-min_subnorm()).total_cmp(&-min_subnorm()));
-    assert_eq!(Ordering::Equal, (-0.0_f16).total_cmp(&-0.0));
-    assert_eq!(Ordering::Equal, 0.0_f16.total_cmp(&0.0));
-    assert_eq!(Ordering::Equal, min_subnorm().total_cmp(&min_subnorm()));
-    assert_eq!(Ordering::Equal, max_subnorm().total_cmp(&max_subnorm()));
-    assert_eq!(Ordering::Equal, f16::MIN_POSITIVE.total_cmp(&f16::MIN_POSITIVE));
-    assert_eq!(Ordering::Equal, 0.5_f16.total_cmp(&0.5));
-    assert_eq!(Ordering::Equal, 1.0_f16.total_cmp(&1.0));
-    assert_eq!(Ordering::Equal, 1.5_f16.total_cmp(&1.5));
-    assert_eq!(Ordering::Equal, 2.5_f16.total_cmp(&2.5));
-    assert_eq!(Ordering::Equal, f16::MAX.total_cmp(&f16::MAX));
-    assert_eq!(Ordering::Equal, f16::INFINITY.total_cmp(&f16::INFINITY));
-    // assert_eq!(Ordering::Equal, s_nan().total_cmp(&s_nan()));
-    assert_eq!(Ordering::Equal, q_nan().total_cmp(&q_nan()));
-
-    // assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-s_nan()));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f16::INFINITY));
-    assert_eq!(Ordering::Less, (-f16::INFINITY).total_cmp(&-f16::MAX));
-    assert_eq!(Ordering::Less, (-f16::MAX).total_cmp(&-2.5));
-    assert_eq!(Ordering::Less, (-2.5_f16).total_cmp(&-1.5));
-    assert_eq!(Ordering::Less, (-1.5_f16).total_cmp(&-1.0));
-    assert_eq!(Ordering::Less, (-1.0_f16).total_cmp(&-0.5));
-    assert_eq!(Ordering::Less, (-0.5_f16).total_cmp(&-f16::MIN_POSITIVE));
-    assert_eq!(Ordering::Less, (-f16::MIN_POSITIVE).total_cmp(&-max_subnorm()));
-    assert_eq!(Ordering::Less, (-max_subnorm()).total_cmp(&-min_subnorm()));
-    assert_eq!(Ordering::Less, (-min_subnorm()).total_cmp(&-0.0));
-    assert_eq!(Ordering::Less, (-0.0_f16).total_cmp(&0.0));
-    assert_eq!(Ordering::Less, 0.0_f16.total_cmp(&min_subnorm()));
-    assert_eq!(Ordering::Less, min_subnorm().total_cmp(&max_subnorm()));
-    assert_eq!(Ordering::Less, max_subnorm().total_cmp(&f16::MIN_POSITIVE));
-    assert_eq!(Ordering::Less, f16::MIN_POSITIVE.total_cmp(&0.5));
-    assert_eq!(Ordering::Less, 0.5_f16.total_cmp(&1.0));
-    assert_eq!(Ordering::Less, 1.0_f16.total_cmp(&1.5));
-    assert_eq!(Ordering::Less, 1.5_f16.total_cmp(&2.5));
-    assert_eq!(Ordering::Less, 2.5_f16.total_cmp(&f16::MAX));
-    assert_eq!(Ordering::Less, f16::MAX.total_cmp(&f16::INFINITY));
-    // assert_eq!(Ordering::Less, f16::INFINITY.total_cmp(&s_nan()));
-    // assert_eq!(Ordering::Less, s_nan().total_cmp(&q_nan()));
-
-    // assert_eq!(Ordering::Greater, (-s_nan()).total_cmp(&-q_nan()));
-    // assert_eq!(Ordering::Greater, (-f16::INFINITY).total_cmp(&-s_nan()));
-    assert_eq!(Ordering::Greater, (-f16::MAX).total_cmp(&-f16::INFINITY));
-    assert_eq!(Ordering::Greater, (-2.5_f16).total_cmp(&-f16::MAX));
-    assert_eq!(Ordering::Greater, (-1.5_f16).total_cmp(&-2.5));
-    assert_eq!(Ordering::Greater, (-1.0_f16).total_cmp(&-1.5));
-    assert_eq!(Ordering::Greater, (-0.5_f16).total_cmp(&-1.0));
-    assert_eq!(Ordering::Greater, (-f16::MIN_POSITIVE).total_cmp(&-0.5));
-    assert_eq!(Ordering::Greater, (-max_subnorm()).total_cmp(&-f16::MIN_POSITIVE));
-    assert_eq!(Ordering::Greater, (-min_subnorm()).total_cmp(&-max_subnorm()));
-    assert_eq!(Ordering::Greater, (-0.0_f16).total_cmp(&-min_subnorm()));
-    assert_eq!(Ordering::Greater, 0.0_f16.total_cmp(&-0.0));
-    assert_eq!(Ordering::Greater, min_subnorm().total_cmp(&0.0));
-    assert_eq!(Ordering::Greater, max_subnorm().total_cmp(&min_subnorm()));
-    assert_eq!(Ordering::Greater, f16::MIN_POSITIVE.total_cmp(&max_subnorm()));
-    assert_eq!(Ordering::Greater, 0.5_f16.total_cmp(&f16::MIN_POSITIVE));
-    assert_eq!(Ordering::Greater, 1.0_f16.total_cmp(&0.5));
-    assert_eq!(Ordering::Greater, 1.5_f16.total_cmp(&1.0));
-    assert_eq!(Ordering::Greater, 2.5_f16.total_cmp(&1.5));
-    assert_eq!(Ordering::Greater, f16::MAX.total_cmp(&2.5));
-    assert_eq!(Ordering::Greater, f16::INFINITY.total_cmp(&f16::MAX));
-    // assert_eq!(Ordering::Greater, s_nan().total_cmp(&f16::INFINITY));
-    // assert_eq!(Ordering::Greater, q_nan().total_cmp(&s_nan()));
-
-    // assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-s_nan()));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f16::INFINITY));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f16::MAX));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-2.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-1.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-1.0));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-0.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f16::MIN_POSITIVE));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-max_subnorm()));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-min_subnorm()));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-0.0));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&0.0));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&min_subnorm()));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&max_subnorm()));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f16::MIN_POSITIVE));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&0.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&1.0));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&1.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&2.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f16::MAX));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f16::INFINITY));
-    // assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&s_nan()));
-
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f16::INFINITY));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f16::MAX));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-2.5));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-1.5));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-1.0));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-0.5));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f16::MIN_POSITIVE));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-max_subnorm()));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-min_subnorm()));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-0.0));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&0.0));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&min_subnorm()));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&max_subnorm()));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f16::MIN_POSITIVE));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&0.5));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&1.0));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&1.5));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&2.5));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f16::MAX));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f16::INFINITY));
-    // assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&s_nan()));
-}
-
-#[test]
 fn test_algebraic() {
     let a: f16 = 123.0;
     let b: f16 = 456.0;
diff --git a/library/coretests/tests/floats/f32.rs b/library/coretests/tests/floats/f32.rs
index 267b0e4e294..e77e44655dc 100644
--- a/library/coretests/tests/floats/f32.rs
+++ b/library/coretests/tests/floats/f32.rs
@@ -3,21 +3,6 @@ use core::f32::consts;
 
 use super::{assert_approx_eq, assert_biteq};
 
-/// Smallest number
-const TINY_BITS: u32 = 0x1;
-
-/// Next smallest number
-const TINY_UP_BITS: u32 = 0x2;
-
-/// Exponent = 0b11...10, Sifnificand 0b1111..10. Min val > 0
-const MAX_DOWN_BITS: u32 = 0x7f7f_fffe;
-
-/// Zeroed exponent, full significant
-const LARGEST_SUBNORMAL_BITS: u32 = 0x007f_ffff;
-
-/// Exponent = 0b1, zeroed significand
-const SMALLEST_NORMAL_BITS: u32 = 0x0080_0000;
-
 /// First pattern over the mantissa
 const NAN_MASK1: u32 = 0x002a_aaaa;
 
@@ -29,117 +14,6 @@ const NAN_MASK2: u32 = 0x0055_5555;
 /// They serve as a way to get an idea of the real precision of floating point operations on different platforms.
 const APPROX_DELTA: f32 = if cfg!(miri) { 1e-4 } else { 1e-6 };
 
-#[test]
-fn test_abs() {
-    assert_biteq!(f32::INFINITY.abs(), f32::INFINITY);
-    assert_biteq!(1f32.abs(), 1f32);
-    assert_biteq!(0f32.abs(), 0f32);
-    assert_biteq!((-0f32).abs(), 0f32);
-    assert_biteq!((-1f32).abs(), 1f32);
-    assert_biteq!(f32::NEG_INFINITY.abs(), f32::INFINITY);
-    assert_biteq!((1f32 / f32::NEG_INFINITY).abs(), 0f32);
-    assert!(f32::NAN.abs().is_nan());
-}
-
-#[test]
-fn test_signum() {
-    assert_biteq!(f32::INFINITY.signum(), 1f32);
-    assert_biteq!(1f32.signum(), 1f32);
-    assert_biteq!(0f32.signum(), 1f32);
-    assert_biteq!((-0f32).signum(), -1f32);
-    assert_biteq!((-1f32).signum(), -1f32);
-    assert_biteq!(f32::NEG_INFINITY.signum(), -1f32);
-    assert_biteq!((1f32 / f32::NEG_INFINITY).signum(), -1f32);
-    assert!(f32::NAN.signum().is_nan());
-}
-
-#[test]
-fn test_is_sign_positive() {
-    assert!(f32::INFINITY.is_sign_positive());
-    assert!(1f32.is_sign_positive());
-    assert!(0f32.is_sign_positive());
-    assert!(!(-0f32).is_sign_positive());
-    assert!(!(-1f32).is_sign_positive());
-    assert!(!f32::NEG_INFINITY.is_sign_positive());
-    assert!(!(1f32 / f32::NEG_INFINITY).is_sign_positive());
-    assert!(f32::NAN.is_sign_positive());
-    assert!(!(-f32::NAN).is_sign_positive());
-}
-
-#[test]
-fn test_is_sign_negative() {
-    assert!(!f32::INFINITY.is_sign_negative());
-    assert!(!1f32.is_sign_negative());
-    assert!(!0f32.is_sign_negative());
-    assert!((-0f32).is_sign_negative());
-    assert!((-1f32).is_sign_negative());
-    assert!(f32::NEG_INFINITY.is_sign_negative());
-    assert!((1f32 / f32::NEG_INFINITY).is_sign_negative());
-    assert!(!f32::NAN.is_sign_negative());
-    assert!((-f32::NAN).is_sign_negative());
-}
-
-#[test]
-fn test_next_up() {
-    let tiny = f32::from_bits(TINY_BITS);
-    let tiny_up = f32::from_bits(TINY_UP_BITS);
-    let max_down = f32::from_bits(MAX_DOWN_BITS);
-    let largest_subnormal = f32::from_bits(LARGEST_SUBNORMAL_BITS);
-    let smallest_normal = f32::from_bits(SMALLEST_NORMAL_BITS);
-    assert_biteq!(f32::NEG_INFINITY.next_up(), f32::MIN);
-    assert_biteq!(f32::MIN.next_up(), -max_down);
-    assert_biteq!((-1.0f32 - f32::EPSILON).next_up(), -1.0f32);
-    assert_biteq!((-smallest_normal).next_up(), -largest_subnormal);
-    assert_biteq!((-tiny_up).next_up(), -tiny);
-    assert_biteq!((-tiny).next_up(), -0.0f32);
-    assert_biteq!((-0.0f32).next_up(), tiny);
-    assert_biteq!(0.0f32.next_up(), tiny);
-    assert_biteq!(tiny.next_up(), tiny_up);
-    assert_biteq!(largest_subnormal.next_up(), smallest_normal);
-    assert_biteq!(1.0f32.next_up(), 1.0 + f32::EPSILON);
-    assert_biteq!(f32::MAX.next_up(), f32::INFINITY);
-    assert_biteq!(f32::INFINITY.next_up(), f32::INFINITY);
-
-    // Check that NaNs roundtrip.
-    let nan0 = f32::NAN;
-    let nan1 = f32::from_bits(f32::NAN.to_bits() ^ NAN_MASK1);
-    let nan2 = f32::from_bits(f32::NAN.to_bits() ^ NAN_MASK2);
-    assert_biteq!(nan0.next_up(), nan0);
-    assert_biteq!(nan1.next_up(), nan1);
-    assert_biteq!(nan2.next_up(), nan2);
-}
-
-#[test]
-fn test_next_down() {
-    let tiny = f32::from_bits(TINY_BITS);
-    let tiny_up = f32::from_bits(TINY_UP_BITS);
-    let max_down = f32::from_bits(MAX_DOWN_BITS);
-    let largest_subnormal = f32::from_bits(LARGEST_SUBNORMAL_BITS);
-    let smallest_normal = f32::from_bits(SMALLEST_NORMAL_BITS);
-    assert_biteq!(f32::NEG_INFINITY.next_down(), f32::NEG_INFINITY);
-    assert_biteq!(f32::MIN.next_down(), f32::NEG_INFINITY);
-    assert_biteq!((-max_down).next_down(), f32::MIN);
-    assert_biteq!((-1.0f32).next_down(), -1.0 - f32::EPSILON);
-    assert_biteq!((-largest_subnormal).next_down(), -smallest_normal);
-    assert_biteq!((-tiny).next_down(), -tiny_up);
-    assert_biteq!((-0.0f32).next_down(), -tiny);
-    assert_biteq!((0.0f32).next_down(), -tiny);
-    assert_biteq!(tiny.next_down(), 0.0f32);
-    assert_biteq!(tiny_up.next_down(), tiny);
-    assert_biteq!(smallest_normal.next_down(), largest_subnormal);
-    assert_biteq!((1.0 + f32::EPSILON).next_down(), 1.0f32);
-    assert_biteq!(f32::MAX.next_down(), max_down);
-    assert_biteq!(f32::INFINITY.next_down(), f32::MAX);
-
-    // Check that NaNs roundtrip.
-    let nan0 = f32::NAN;
-    let nan1 = f32::from_bits(f32::NAN.to_bits() ^ NAN_MASK1);
-    let nan2 = f32::from_bits(f32::NAN.to_bits() ^ NAN_MASK2);
-    assert_biteq!(nan0.next_down(), nan0);
-    assert_biteq!(nan1.next_down(), nan1);
-    assert_biteq!(nan2.next_down(), nan2);
-}
-
 // FIXME(#140515): mingw has an incorrect fma https://sourceforge.net/p/mingw-w64/bugs/848/
 #[cfg_attr(all(target_os = "windows", target_env = "gnu", not(target_abi = "llvm")), ignore)]
 #[test]
@@ -187,17 +61,6 @@ fn test_powi() {
 }
 
 #[test]
-fn test_sqrt_domain() {
-    assert!(f32::NAN.sqrt().is_nan());
-    assert!(f32::NEG_INFINITY.sqrt().is_nan());
-    assert!((-1.0f32).sqrt().is_nan());
-    assert_biteq!((-0.0f32).sqrt(), -0.0);
-    assert_biteq!(0.0f32.sqrt(), 0.0);
-    assert_biteq!(1.0f32.sqrt(), 1.0);
-    assert_biteq!(f32::INFINITY.sqrt(), f32::INFINITY);
-}
-
-#[test]
 fn test_to_degrees() {
     let pi: f32 = consts::PI;
     let nan: f32 = f32::NAN;
@@ -250,167 +113,6 @@ fn test_float_bits_conv() {
 }
 
 #[test]
-#[should_panic]
-fn test_clamp_min_greater_than_max() {
-    let _ = 1.0f32.clamp(3.0, 1.0);
-}
-
-#[test]
-#[should_panic]
-fn test_clamp_min_is_nan() {
-    let _ = 1.0f32.clamp(f32::NAN, 1.0);
-}
-
-#[test]
-#[should_panic]
-fn test_clamp_max_is_nan() {
-    let _ = 1.0f32.clamp(3.0, f32::NAN);
-}
-
-#[test]
-fn test_total_cmp() {
-    use core::cmp::Ordering;
-
-    fn quiet_bit_mask() -> u32 {
-        1 << (f32::MANTISSA_DIGITS - 2)
-    }
-
-    fn min_subnorm() -> f32 {
-        f32::MIN_POSITIVE / f32::powf(2.0, f32::MANTISSA_DIGITS as f32 - 1.0)
-    }
-
-    fn max_subnorm() -> f32 {
-        f32::MIN_POSITIVE - min_subnorm()
-    }
-
-    fn q_nan() -> f32 {
-        f32::from_bits(f32::NAN.to_bits() | quiet_bit_mask())
-    }
-
-    fn s_nan() -> f32 {
-        f32::from_bits((f32::NAN.to_bits() & !quiet_bit_mask()) + 42)
-    }
-
-    assert_eq!(Ordering::Equal, (-q_nan()).total_cmp(&-q_nan()));
-    assert_eq!(Ordering::Equal, (-s_nan()).total_cmp(&-s_nan()));
-    assert_eq!(Ordering::Equal, (-f32::INFINITY).total_cmp(&-f32::INFINITY));
-    assert_eq!(Ordering::Equal, (-f32::MAX).total_cmp(&-f32::MAX));
-    assert_eq!(Ordering::Equal, (-2.5_f32).total_cmp(&-2.5));
-    assert_eq!(Ordering::Equal, (-1.0_f32).total_cmp(&-1.0));
-    assert_eq!(Ordering::Equal, (-1.5_f32).total_cmp(&-1.5));
-    assert_eq!(Ordering::Equal, (-0.5_f32).total_cmp(&-0.5));
-    assert_eq!(Ordering::Equal, (-f32::MIN_POSITIVE).total_cmp(&-f32::MIN_POSITIVE));
-    assert_eq!(Ordering::Equal, (-max_subnorm()).total_cmp(&-max_subnorm()));
-    assert_eq!(Ordering::Equal, (-min_subnorm()).total_cmp(&-min_subnorm()));
-    assert_eq!(Ordering::Equal, (-0.0_f32).total_cmp(&-0.0));
-    assert_eq!(Ordering::Equal, 0.0_f32.total_cmp(&0.0));
-    assert_eq!(Ordering::Equal, min_subnorm().total_cmp(&min_subnorm()));
-    assert_eq!(Ordering::Equal, max_subnorm().total_cmp(&max_subnorm()));
-    assert_eq!(Ordering::Equal, f32::MIN_POSITIVE.total_cmp(&f32::MIN_POSITIVE));
-    assert_eq!(Ordering::Equal, 0.5_f32.total_cmp(&0.5));
-    assert_eq!(Ordering::Equal, 1.0_f32.total_cmp(&1.0));
-    assert_eq!(Ordering::Equal, 1.5_f32.total_cmp(&1.5));
-    assert_eq!(Ordering::Equal, 2.5_f32.total_cmp(&2.5));
-    assert_eq!(Ordering::Equal, f32::MAX.total_cmp(&f32::MAX));
-    assert_eq!(Ordering::Equal, f32::INFINITY.total_cmp(&f32::INFINITY));
-    assert_eq!(Ordering::Equal, s_nan().total_cmp(&s_nan()));
-    assert_eq!(Ordering::Equal, q_nan().total_cmp(&q_nan()));
-
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-s_nan()));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f32::INFINITY));
-    assert_eq!(Ordering::Less, (-f32::INFINITY).total_cmp(&-f32::MAX));
-    assert_eq!(Ordering::Less, (-f32::MAX).total_cmp(&-2.5));
-    assert_eq!(Ordering::Less, (-2.5_f32).total_cmp(&-1.5));
-    assert_eq!(Ordering::Less, (-1.5_f32).total_cmp(&-1.0));
-    assert_eq!(Ordering::Less, (-1.0_f32).total_cmp(&-0.5));
-    assert_eq!(Ordering::Less, (-0.5_f32).total_cmp(&-f32::MIN_POSITIVE));
-    assert_eq!(Ordering::Less, (-f32::MIN_POSITIVE).total_cmp(&-max_subnorm()));
-    assert_eq!(Ordering::Less, (-max_subnorm()).total_cmp(&-min_subnorm()));
-    assert_eq!(Ordering::Less, (-min_subnorm()).total_cmp(&-0.0));
-    assert_eq!(Ordering::Less, (-0.0_f32).total_cmp(&0.0));
-    assert_eq!(Ordering::Less, 0.0_f32.total_cmp(&min_subnorm()));
-    assert_eq!(Ordering::Less, min_subnorm().total_cmp(&max_subnorm()));
-    assert_eq!(Ordering::Less, max_subnorm().total_cmp(&f32::MIN_POSITIVE));
-    assert_eq!(Ordering::Less, f32::MIN_POSITIVE.total_cmp(&0.5));
-    assert_eq!(Ordering::Less, 0.5_f32.total_cmp(&1.0));
-    assert_eq!(Ordering::Less, 1.0_f32.total_cmp(&1.5));
-    assert_eq!(Ordering::Less, 1.5_f32.total_cmp(&2.5));
-    assert_eq!(Ordering::Less, 2.5_f32.total_cmp(&f32::MAX));
-    assert_eq!(Ordering::Less, f32::MAX.total_cmp(&f32::INFINITY));
-    assert_eq!(Ordering::Less, f32::INFINITY.total_cmp(&s_nan()));
-    assert_eq!(Ordering::Less, s_nan().total_cmp(&q_nan()));
-
-    assert_eq!(Ordering::Greater, (-s_nan()).total_cmp(&-q_nan()));
-    assert_eq!(Ordering::Greater, (-f32::INFINITY).total_cmp(&-s_nan()));
-    assert_eq!(Ordering::Greater, (-f32::MAX).total_cmp(&-f32::INFINITY));
-    assert_eq!(Ordering::Greater, (-2.5_f32).total_cmp(&-f32::MAX));
-    assert_eq!(Ordering::Greater, (-1.5_f32).total_cmp(&-2.5));
-    assert_eq!(Ordering::Greater, (-1.0_f32).total_cmp(&-1.5));
-    assert_eq!(Ordering::Greater, (-0.5_f32).total_cmp(&-1.0));
-    assert_eq!(Ordering::Greater, (-f32::MIN_POSITIVE).total_cmp(&-0.5));
-    assert_eq!(Ordering::Greater, (-max_subnorm()).total_cmp(&-f32::MIN_POSITIVE));
-    assert_eq!(Ordering::Greater, (-min_subnorm()).total_cmp(&-max_subnorm()));
-    assert_eq!(Ordering::Greater, (-0.0_f32).total_cmp(&-min_subnorm()));
-    assert_eq!(Ordering::Greater, 0.0_f32.total_cmp(&-0.0));
-    assert_eq!(Ordering::Greater, min_subnorm().total_cmp(&0.0));
-    assert_eq!(Ordering::Greater, max_subnorm().total_cmp(&min_subnorm()));
-    assert_eq!(Ordering::Greater, f32::MIN_POSITIVE.total_cmp(&max_subnorm()));
-    assert_eq!(Ordering::Greater, 0.5_f32.total_cmp(&f32::MIN_POSITIVE));
-    assert_eq!(Ordering::Greater, 1.0_f32.total_cmp(&0.5));
-    assert_eq!(Ordering::Greater, 1.5_f32.total_cmp(&1.0));
-    assert_eq!(Ordering::Greater, 2.5_f32.total_cmp(&1.5));
-    assert_eq!(Ordering::Greater, f32::MAX.total_cmp(&2.5));
-    assert_eq!(Ordering::Greater, f32::INFINITY.total_cmp(&f32::MAX));
-    assert_eq!(Ordering::Greater, s_nan().total_cmp(&f32::INFINITY));
-    assert_eq!(Ordering::Greater, q_nan().total_cmp(&s_nan()));
-
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-s_nan()));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f32::INFINITY));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f32::MAX));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-2.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-1.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-1.0));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-0.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f32::MIN_POSITIVE));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-max_subnorm()));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-min_subnorm()));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-0.0));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&0.0));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&min_subnorm()));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&max_subnorm()));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f32::MIN_POSITIVE));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&0.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&1.0));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&1.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&2.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f32::MAX));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f32::INFINITY));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&s_nan()));
-
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f32::INFINITY));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f32::MAX));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-2.5));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-1.5));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-1.0));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-0.5));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f32::MIN_POSITIVE));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-max_subnorm()));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-min_subnorm()));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-0.0));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&0.0));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&min_subnorm()));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&max_subnorm()));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f32::MIN_POSITIVE));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&0.5));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&1.0));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&1.5));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&2.5));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f32::MAX));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f32::INFINITY));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&s_nan()));
-}
-
-#[test]
 fn test_algebraic() {
     let a: f32 = 123.0;
     let b: f32 = 456.0;
diff --git a/library/coretests/tests/floats/f64.rs b/library/coretests/tests/floats/f64.rs
index 735b7a76515..fea9cc19b39 100644
--- a/library/coretests/tests/floats/f64.rs
+++ b/library/coretests/tests/floats/f64.rs
@@ -3,136 +3,12 @@ use core::f64::consts;
 
 use super::{assert_approx_eq, assert_biteq};
 
-/// Smallest number
-const TINY_BITS: u64 = 0x1;
-
-/// Next smallest number
-const TINY_UP_BITS: u64 = 0x2;
-
-/// Exponent = 0b11...10, Sifnificand 0b1111..10. Min val > 0
-const MAX_DOWN_BITS: u64 = 0x7fef_ffff_ffff_fffe;
-
-/// Zeroed exponent, full significant
-const LARGEST_SUBNORMAL_BITS: u64 = 0x000f_ffff_ffff_ffff;
-
-/// Exponent = 0b1, zeroed significand
-const SMALLEST_NORMAL_BITS: u64 = 0x0010_0000_0000_0000;
-
 /// First pattern over the mantissa
 const NAN_MASK1: u64 = 0x000a_aaaa_aaaa_aaaa;
 
 /// Second pattern over the mantissa
 const NAN_MASK2: u64 = 0x0005_5555_5555_5555;
 
-#[test]
-fn test_abs() {
-    assert_biteq!(f64::INFINITY.abs(), f64::INFINITY);
-    assert_biteq!(1f64.abs(), 1f64);
-    assert_biteq!(0f64.abs(), 0f64);
-    assert_biteq!((-0f64).abs(), 0f64);
-    assert_biteq!((-1f64).abs(), 1f64);
-    assert_biteq!(f64::NEG_INFINITY.abs(), f64::INFINITY);
-    assert_biteq!((1f64 / f64::NEG_INFINITY).abs(), 0f64);
-    assert!(f64::NAN.abs().is_nan());
-}
-
-#[test]
-fn test_signum() {
-    assert_biteq!(f64::INFINITY.signum(), 1f64);
-    assert_biteq!(1f64.signum(), 1f64);
-    assert_biteq!(0f64.signum(), 1f64);
-    assert_biteq!((-0f64).signum(), -1f64);
-    assert_biteq!((-1f64).signum(), -1f64);
-    assert_biteq!(f64::NEG_INFINITY.signum(), -1f64);
-    assert_biteq!((1f64 / f64::NEG_INFINITY).signum(), -1f64);
-    assert!(f64::NAN.signum().is_nan());
-}
-
-#[test]
-fn test_is_sign_positive() {
-    assert!(f64::INFINITY.is_sign_positive());
-    assert!(1f64.is_sign_positive());
-    assert!(0f64.is_sign_positive());
-    assert!(!(-0f64).is_sign_positive());
-    assert!(!(-1f64).is_sign_positive());
-    assert!(!f64::NEG_INFINITY.is_sign_positive());
-    assert!(!(1f64 / f64::NEG_INFINITY).is_sign_positive());
-    assert!(f64::NAN.is_sign_positive());
-    assert!(!(-f64::NAN).is_sign_positive());
-}
-
-#[test]
-fn test_is_sign_negative() {
-    assert!(!f64::INFINITY.is_sign_negative());
-    assert!(!1f64.is_sign_negative());
-    assert!(!0f64.is_sign_negative());
-    assert!((-0f64).is_sign_negative());
-    assert!((-1f64).is_sign_negative());
-    assert!(f64::NEG_INFINITY.is_sign_negative());
-    assert!((1f64 / f64::NEG_INFINITY).is_sign_negative());
-    assert!(!f64::NAN.is_sign_negative());
-    assert!((-f64::NAN).is_sign_negative());
-}
-
-#[test]
-fn test_next_up() {
-    let tiny = f64::from_bits(TINY_BITS);
-    let tiny_up = f64::from_bits(TINY_UP_BITS);
-    let max_down = f64::from_bits(MAX_DOWN_BITS);
-    let largest_subnormal = f64::from_bits(LARGEST_SUBNORMAL_BITS);
-    let smallest_normal = f64::from_bits(SMALLEST_NORMAL_BITS);
-    assert_biteq!(f64::NEG_INFINITY.next_up(), f64::MIN);
-    assert_biteq!(f64::MIN.next_up(), -max_down);
-    assert_biteq!((-1.0 - f64::EPSILON).next_up(), -1.0f64);
-    assert_biteq!((-smallest_normal).next_up(), -largest_subnormal);
-    assert_biteq!((-tiny_up).next_up(), -tiny);
-    assert_biteq!((-tiny).next_up(), -0.0f64);
-    assert_biteq!((-0.0f64).next_up(), tiny);
-    assert_biteq!(0.0f64.next_up(), tiny);
-    assert_biteq!(tiny.next_up(), tiny_up);
-    assert_biteq!(largest_subnormal.next_up(), smallest_normal);
-    assert_biteq!(1.0f64.next_up(), 1.0 + f64::EPSILON);
-    assert_biteq!(f64::MAX.next_up(), f64::INFINITY);
-    assert_biteq!(f64::INFINITY.next_up(), f64::INFINITY);
-
-    let nan0 = f64::NAN;
-    let nan1 = f64::from_bits(f64::NAN.to_bits() ^ NAN_MASK1);
-    let nan2 = f64::from_bits(f64::NAN.to_bits() ^ NAN_MASK2);
-    assert_biteq!(nan0.next_up(), nan0);
-    assert_biteq!(nan1.next_up(), nan1);
-    assert_biteq!(nan2.next_up(), nan2);
-}
-
-#[test]
-fn test_next_down() {
-    let tiny = f64::from_bits(TINY_BITS);
-    let tiny_up = f64::from_bits(TINY_UP_BITS);
-    let max_down = f64::from_bits(MAX_DOWN_BITS);
-    let largest_subnormal = f64::from_bits(LARGEST_SUBNORMAL_BITS);
-    let smallest_normal = f64::from_bits(SMALLEST_NORMAL_BITS);
-    assert_biteq!(f64::NEG_INFINITY.next_down(), f64::NEG_INFINITY);
-    assert_biteq!(f64::MIN.next_down(), f64::NEG_INFINITY);
-    assert_biteq!((-max_down).next_down(), f64::MIN);
-    assert_biteq!((-1.0f64).next_down(), -1.0 - f64::EPSILON);
-    assert_biteq!((-largest_subnormal).next_down(), -smallest_normal);
-    assert_biteq!((-tiny).next_down(), -tiny_up);
-    assert_biteq!((-0.0f64).next_down(), -tiny);
-    assert_biteq!((0.0f64).next_down(), -tiny);
-    assert_biteq!(tiny.next_down(), 0.0f64);
-    assert_biteq!(tiny_up.next_down(), tiny);
-    assert_biteq!(smallest_normal.next_down(), largest_subnormal);
-    assert_biteq!((1.0 + f64::EPSILON).next_down(), 1.0f64);
-    assert_biteq!(f64::MAX.next_down(), max_down);
-    assert_biteq!(f64::INFINITY.next_down(), f64::MAX);
-
-    let nan0 = f64::NAN;
-    let nan1 = f64::from_bits(f64::NAN.to_bits() ^ NAN_MASK1);
-    let nan2 = f64::from_bits(f64::NAN.to_bits() ^ NAN_MASK2);
-    assert_biteq!(nan0.next_down(), nan0);
-    assert_biteq!(nan1.next_down(), nan1);
-    assert_biteq!(nan2.next_down(), nan2);
-}
-
 // FIXME(#140515): mingw has an incorrect fma https://sourceforge.net/p/mingw-w64/bugs/848/
 #[cfg_attr(all(target_os = "windows", target_env = "gnu", not(target_abi = "llvm")), ignore)]
 #[test]
@@ -180,17 +56,6 @@ fn test_powi() {
 }
 
 #[test]
-fn test_sqrt_domain() {
-    assert!(f64::NAN.sqrt().is_nan());
-    assert!(f64::NEG_INFINITY.sqrt().is_nan());
-    assert!((-1.0f64).sqrt().is_nan());
-    assert_biteq!((-0.0f64).sqrt(), -0.0);
-    assert_biteq!(0.0f64.sqrt(), 0.0);
-    assert_biteq!(1.0f64.sqrt(), 1.0);
-    assert_biteq!(f64::INFINITY.sqrt(), f64::INFINITY);
-}
-
-#[test]
 fn test_to_degrees() {
     let pi: f64 = consts::PI;
     let nan: f64 = f64::NAN;
@@ -241,167 +106,6 @@ fn test_float_bits_conv() {
 }
 
 #[test]
-#[should_panic]
-fn test_clamp_min_greater_than_max() {
-    let _ = 1.0f64.clamp(3.0, 1.0);
-}
-
-#[test]
-#[should_panic]
-fn test_clamp_min_is_nan() {
-    let _ = 1.0f64.clamp(f64::NAN, 1.0);
-}
-
-#[test]
-#[should_panic]
-fn test_clamp_max_is_nan() {
-    let _ = 1.0f64.clamp(3.0, f64::NAN);
-}
-
-#[test]
-fn test_total_cmp() {
-    use core::cmp::Ordering;
-
-    fn quiet_bit_mask() -> u64 {
-        1 << (f64::MANTISSA_DIGITS - 2)
-    }
-
-    fn min_subnorm() -> f64 {
-        f64::MIN_POSITIVE / f64::powf(2.0, f64::MANTISSA_DIGITS as f64 - 1.0)
-    }
-
-    fn max_subnorm() -> f64 {
-        f64::MIN_POSITIVE - min_subnorm()
-    }
-
-    fn q_nan() -> f64 {
-        f64::from_bits(f64::NAN.to_bits() | quiet_bit_mask())
-    }
-
-    fn s_nan() -> f64 {
-        f64::from_bits((f64::NAN.to_bits() & !quiet_bit_mask()) + 42)
-    }
-
-    assert_eq!(Ordering::Equal, (-q_nan()).total_cmp(&-q_nan()));
-    assert_eq!(Ordering::Equal, (-s_nan()).total_cmp(&-s_nan()));
-    assert_eq!(Ordering::Equal, (-f64::INFINITY).total_cmp(&-f64::INFINITY));
-    assert_eq!(Ordering::Equal, (-f64::MAX).total_cmp(&-f64::MAX));
-    assert_eq!(Ordering::Equal, (-2.5_f64).total_cmp(&-2.5));
-    assert_eq!(Ordering::Equal, (-1.0_f64).total_cmp(&-1.0));
-    assert_eq!(Ordering::Equal, (-1.5_f64).total_cmp(&-1.5));
-    assert_eq!(Ordering::Equal, (-0.5_f64).total_cmp(&-0.5));
-    assert_eq!(Ordering::Equal, (-f64::MIN_POSITIVE).total_cmp(&-f64::MIN_POSITIVE));
-    assert_eq!(Ordering::Equal, (-max_subnorm()).total_cmp(&-max_subnorm()));
-    assert_eq!(Ordering::Equal, (-min_subnorm()).total_cmp(&-min_subnorm()));
-    assert_eq!(Ordering::Equal, (-0.0_f64).total_cmp(&-0.0));
-    assert_eq!(Ordering::Equal, 0.0_f64.total_cmp(&0.0));
-    assert_eq!(Ordering::Equal, min_subnorm().total_cmp(&min_subnorm()));
-    assert_eq!(Ordering::Equal, max_subnorm().total_cmp(&max_subnorm()));
-    assert_eq!(Ordering::Equal, f64::MIN_POSITIVE.total_cmp(&f64::MIN_POSITIVE));
-    assert_eq!(Ordering::Equal, 0.5_f64.total_cmp(&0.5));
-    assert_eq!(Ordering::Equal, 1.0_f64.total_cmp(&1.0));
-    assert_eq!(Ordering::Equal, 1.5_f64.total_cmp(&1.5));
-    assert_eq!(Ordering::Equal, 2.5_f64.total_cmp(&2.5));
-    assert_eq!(Ordering::Equal, f64::MAX.total_cmp(&f64::MAX));
-    assert_eq!(Ordering::Equal, f64::INFINITY.total_cmp(&f64::INFINITY));
-    assert_eq!(Ordering::Equal, s_nan().total_cmp(&s_nan()));
-    assert_eq!(Ordering::Equal, q_nan().total_cmp(&q_nan()));
-
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-s_nan()));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f64::INFINITY));
-    assert_eq!(Ordering::Less, (-f64::INFINITY).total_cmp(&-f64::MAX));
-    assert_eq!(Ordering::Less, (-f64::MAX).total_cmp(&-2.5));
-    assert_eq!(Ordering::Less, (-2.5_f64).total_cmp(&-1.5));
-    assert_eq!(Ordering::Less, (-1.5_f64).total_cmp(&-1.0));
-    assert_eq!(Ordering::Less, (-1.0_f64).total_cmp(&-0.5));
-    assert_eq!(Ordering::Less, (-0.5_f64).total_cmp(&-f64::MIN_POSITIVE));
-    assert_eq!(Ordering::Less, (-f64::MIN_POSITIVE).total_cmp(&-max_subnorm()));
-    assert_eq!(Ordering::Less, (-max_subnorm()).total_cmp(&-min_subnorm()));
-    assert_eq!(Ordering::Less, (-min_subnorm()).total_cmp(&-0.0));
-    assert_eq!(Ordering::Less, (-0.0_f64).total_cmp(&0.0));
-    assert_eq!(Ordering::Less, 0.0_f64.total_cmp(&min_subnorm()));
-    assert_eq!(Ordering::Less, min_subnorm().total_cmp(&max_subnorm()));
-    assert_eq!(Ordering::Less, max_subnorm().total_cmp(&f64::MIN_POSITIVE));
-    assert_eq!(Ordering::Less, f64::MIN_POSITIVE.total_cmp(&0.5));
-    assert_eq!(Ordering::Less, 0.5_f64.total_cmp(&1.0));
-    assert_eq!(Ordering::Less, 1.0_f64.total_cmp(&1.5));
-    assert_eq!(Ordering::Less, 1.5_f64.total_cmp(&2.5));
-    assert_eq!(Ordering::Less, 2.5_f64.total_cmp(&f64::MAX));
-    assert_eq!(Ordering::Less, f64::MAX.total_cmp(&f64::INFINITY));
-    assert_eq!(Ordering::Less, f64::INFINITY.total_cmp(&s_nan()));
-    assert_eq!(Ordering::Less, s_nan().total_cmp(&q_nan()));
-
-    assert_eq!(Ordering::Greater, (-s_nan()).total_cmp(&-q_nan()));
-    assert_eq!(Ordering::Greater, (-f64::INFINITY).total_cmp(&-s_nan()));
-    assert_eq!(Ordering::Greater, (-f64::MAX).total_cmp(&-f64::INFINITY));
-    assert_eq!(Ordering::Greater, (-2.5_f64).total_cmp(&-f64::MAX));
-    assert_eq!(Ordering::Greater, (-1.5_f64).total_cmp(&-2.5));
-    assert_eq!(Ordering::Greater, (-1.0_f64).total_cmp(&-1.5));
-    assert_eq!(Ordering::Greater, (-0.5_f64).total_cmp(&-1.0));
-    assert_eq!(Ordering::Greater, (-f64::MIN_POSITIVE).total_cmp(&-0.5));
-    assert_eq!(Ordering::Greater, (-max_subnorm()).total_cmp(&-f64::MIN_POSITIVE));
-    assert_eq!(Ordering::Greater, (-min_subnorm()).total_cmp(&-max_subnorm()));
-    assert_eq!(Ordering::Greater, (-0.0_f64).total_cmp(&-min_subnorm()));
-    assert_eq!(Ordering::Greater, 0.0_f64.total_cmp(&-0.0));
-    assert_eq!(Ordering::Greater, min_subnorm().total_cmp(&0.0));
-    assert_eq!(Ordering::Greater, max_subnorm().total_cmp(&min_subnorm()));
-    assert_eq!(Ordering::Greater, f64::MIN_POSITIVE.total_cmp(&max_subnorm()));
-    assert_eq!(Ordering::Greater, 0.5_f64.total_cmp(&f64::MIN_POSITIVE));
-    assert_eq!(Ordering::Greater, 1.0_f64.total_cmp(&0.5));
-    assert_eq!(Ordering::Greater, 1.5_f64.total_cmp(&1.0));
-    assert_eq!(Ordering::Greater, 2.5_f64.total_cmp(&1.5));
-    assert_eq!(Ordering::Greater, f64::MAX.total_cmp(&2.5));
-    assert_eq!(Ordering::Greater, f64::INFINITY.total_cmp(&f64::MAX));
-    assert_eq!(Ordering::Greater, s_nan().total_cmp(&f64::INFINITY));
-    assert_eq!(Ordering::Greater, q_nan().total_cmp(&s_nan()));
-
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-s_nan()));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f64::INFINITY));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f64::MAX));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-2.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-1.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-1.0));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-0.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f64::MIN_POSITIVE));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-max_subnorm()));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-min_subnorm()));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-0.0));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&0.0));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&min_subnorm()));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&max_subnorm()));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f64::MIN_POSITIVE));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&0.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&1.0));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&1.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&2.5));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f64::MAX));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f64::INFINITY));
-    assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&s_nan()));
-
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f64::INFINITY));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f64::MAX));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-2.5));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-1.5));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-1.0));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-0.5));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f64::MIN_POSITIVE));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-max_subnorm()));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-min_subnorm()));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-0.0));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&0.0));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&min_subnorm()));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&max_subnorm()));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f64::MIN_POSITIVE));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&0.5));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&1.0));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&1.5));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&2.5));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f64::MAX));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f64::INFINITY));
-    assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&s_nan()));
-}
-
-#[test]
 fn test_algebraic() {
     let a: f64 = 123.0;
     let b: f64 = 456.0;
diff --git a/library/coretests/tests/floats/mod.rs b/library/coretests/tests/floats/mod.rs
index 43431bba695..2c2a07920d0 100644
--- a/library/coretests/tests/floats/mod.rs
+++ b/library/coretests/tests/floats/mod.rs
@@ -2,34 +2,80 @@ use std::num::FpCategory as Fp;
 use std::ops::{Add, Div, Mul, Rem, Sub};
 
 trait TestableFloat {
+    /// Unsigned int with the same size, for converting to/from bits.
+    type Int;
     /// Set the default tolerance for float comparison based on the type.
     const APPROX: Self;
+    const ZERO: Self;
+    const ONE: Self;
     const MIN_POSITIVE_NORMAL: Self;
     const MAX_SUBNORMAL: Self;
+    /// Smallest number
+    const TINY: Self;
+    /// Next smallest number
+    const TINY_UP: Self;
+    /// Exponent = 0b11...10, Significand 0b1111..10. Min val > 0
+    const MAX_DOWN: Self;
+    /// First pattern over the mantissa
+    const NAN_MASK1: Self::Int;
+    /// Second pattern over the mantissa
+    const NAN_MASK2: Self::Int;
 }
 
 impl TestableFloat for f16 {
+    type Int = u16;
     const APPROX: Self = 1e-3;
+    const ZERO: Self = 0.0;
+    const ONE: Self = 1.0;
     const MIN_POSITIVE_NORMAL: Self = Self::MIN_POSITIVE;
     const MAX_SUBNORMAL: Self = Self::MIN_POSITIVE.next_down();
+    const TINY: Self = Self::from_bits(0x1);
+    const TINY_UP: Self = Self::from_bits(0x2);
+    const MAX_DOWN: Self = Self::from_bits(0x7bfe);
+    const NAN_MASK1: Self::Int = 0x02aa;
+    const NAN_MASK2: Self::Int = 0x0155;
 }
 
 impl TestableFloat for f32 {
+    type Int = u32;
     const APPROX: Self = 1e-6;
+    const ZERO: Self = 0.0;
+    const ONE: Self = 1.0;
     const MIN_POSITIVE_NORMAL: Self = Self::MIN_POSITIVE;
     const MAX_SUBNORMAL: Self = Self::MIN_POSITIVE.next_down();
+    const TINY: Self = Self::from_bits(0x1);
+    const TINY_UP: Self = Self::from_bits(0x2);
+    const MAX_DOWN: Self = Self::from_bits(0x7f7f_fffe);
+    const NAN_MASK1: Self::Int = 0x002a_aaaa;
+    const NAN_MASK2: Self::Int = 0x0055_5555;
 }
 
 impl TestableFloat for f64 {
+    type Int = u64;
     const APPROX: Self = 1e-6;
+    const ZERO: Self = 0.0;
+    const ONE: Self = 1.0;
     const MIN_POSITIVE_NORMAL: Self = Self::MIN_POSITIVE;
     const MAX_SUBNORMAL: Self = Self::MIN_POSITIVE.next_down();
+    const TINY: Self = Self::from_bits(0x1);
+    const TINY_UP: Self = Self::from_bits(0x2);
+    const MAX_DOWN: Self = Self::from_bits(0x7fef_ffff_ffff_fffe);
+    const NAN_MASK1: Self::Int = 0x000a_aaaa_aaaa_aaaa;
+    const NAN_MASK2: Self::Int = 0x0005_5555_5555_5555;
 }
 
 impl TestableFloat for f128 {
+    type Int = u128;
     const APPROX: Self = 1e-9;
+    const ZERO: Self = 0.0;
+    const ONE: Self = 1.0;
     const MIN_POSITIVE_NORMAL: Self = Self::MIN_POSITIVE;
     const MAX_SUBNORMAL: Self = Self::MIN_POSITIVE.next_down();
+    const TINY: Self = Self::from_bits(0x1);
+    const TINY_UP: Self = Self::from_bits(0x2);
+    const MAX_DOWN: Self = Self::from_bits(0x7ffefffffffffffffffffffffffffffe);
+    const NAN_MASK1: Self::Int = 0x0000aaaaaaaaaaaaaaaaaaaaaaaaaaaa;
+    const NAN_MASK2: Self::Int = 0x00005555555555555555555555555555;
 }
 
 /// Determine the tolerance for values of the argument type.
@@ -342,15 +388,14 @@ float_test! {
         f128: #[cfg(any(miri, target_has_reliable_f128))],
     },
     test<Float> {
-        let zero: Float = 0.0;
-        assert_biteq!(0.0, zero);
-        assert!(!zero.is_infinite());
-        assert!(zero.is_finite());
-        assert!(zero.is_sign_positive());
-        assert!(!zero.is_sign_negative());
-        assert!(!zero.is_nan());
-        assert!(!zero.is_normal());
-        assert!(matches!(zero.classify(), Fp::Zero));
+        assert_biteq!(0.0, Float::ZERO);
+        assert!(!Float::ZERO.is_infinite());
+        assert!(Float::ZERO.is_finite());
+        assert!(Float::ZERO.is_sign_positive());
+        assert!(!Float::ZERO.is_sign_negative());
+        assert!(!Float::ZERO.is_nan());
+        assert!(!Float::ZERO.is_normal());
+        assert!(matches!(Float::ZERO.classify(), Fp::Zero));
     }
 }
 
@@ -381,15 +426,14 @@ float_test! {
         f128: #[cfg(any(miri, target_has_reliable_f128))],
     },
     test<Float> {
-        let one: Float = 1.0;
-        assert_biteq!(1.0, one);
-        assert!(!one.is_infinite());
-        assert!(one.is_finite());
-        assert!(one.is_sign_positive());
-        assert!(!one.is_sign_negative());
-        assert!(!one.is_nan());
-        assert!(one.is_normal());
-        assert!(matches!(one.classify(), Fp::Normal));
+        assert_biteq!(1.0, Float::ONE);
+        assert!(!Float::ONE.is_infinite());
+        assert!(Float::ONE.is_finite());
+        assert!(Float::ONE.is_sign_positive());
+        assert!(!Float::ONE.is_sign_negative());
+        assert!(!Float::ONE.is_nan());
+        assert!(Float::ONE.is_normal());
+        assert!(matches!(Float::ONE.classify(), Fp::Normal));
     }
 }
 
@@ -403,11 +447,10 @@ float_test! {
         let nan: Float = Float::NAN;
         let inf: Float = Float::INFINITY;
         let neg_inf: Float = Float::NEG_INFINITY;
-        let zero: Float = 0.0;
         let pos: Float = 5.3;
         let neg: Float = -10.732;
         assert!(nan.is_nan());
-        assert!(!zero.is_nan());
+        assert!(!Float::ZERO.is_nan());
         assert!(!pos.is_nan());
         assert!(!neg.is_nan());
         assert!(!inf.is_nan());
@@ -425,13 +468,12 @@ float_test! {
         let nan: Float = Float::NAN;
         let inf: Float = Float::INFINITY;
         let neg_inf: Float = Float::NEG_INFINITY;
-        let zero: Float = 0.0;
         let pos: Float = 42.8;
         let neg: Float = -109.2;
         assert!(!nan.is_infinite());
         assert!(inf.is_infinite());
         assert!(neg_inf.is_infinite());
-        assert!(!zero.is_infinite());
+        assert!(!Float::ZERO.is_infinite());
         assert!(!pos.is_infinite());
         assert!(!neg.is_infinite());
     }
@@ -447,13 +489,12 @@ float_test! {
         let nan: Float = Float::NAN;
         let inf: Float = Float::INFINITY;
         let neg_inf: Float = Float::NEG_INFINITY;
-        let zero: Float = 0.0;
         let pos: Float = 42.8;
         let neg: Float = -109.2;
         assert!(!nan.is_finite());
         assert!(!inf.is_finite());
         assert!(!neg_inf.is_finite());
-        assert!(zero.is_finite());
+        assert!(Float::ZERO.is_finite());
         assert!(pos.is_finite());
         assert!(neg.is_finite());
     }
@@ -469,15 +510,13 @@ float_test! {
         let nan: Float = Float::NAN;
         let inf: Float = Float::INFINITY;
         let neg_inf: Float = Float::NEG_INFINITY;
-        let zero: Float = 0.0;
         let neg_zero: Float = -0.0;
-        let one : Float = 1.0;
         assert!(!nan.is_normal());
         assert!(!inf.is_normal());
         assert!(!neg_inf.is_normal());
-        assert!(!zero.is_normal());
+        assert!(!Float::ZERO.is_normal());
         assert!(!neg_zero.is_normal());
-        assert!(one.is_normal());
+        assert!(Float::ONE.is_normal());
         assert!(Float::MIN_POSITIVE_NORMAL.is_normal());
         assert!(!Float::MAX_SUBNORMAL.is_normal());
     }
@@ -492,15 +531,13 @@ float_test! {
         let nan: Float = Float::NAN;
         let inf: Float = Float::INFINITY;
         let neg_inf: Float = Float::NEG_INFINITY;
-        let zero: Float = 0.0;
         let neg_zero: Float = -0.0;
-        let one: Float = 1.0;
         assert!(matches!(nan.classify(), Fp::Nan));
         assert!(matches!(inf.classify(), Fp::Infinite));
         assert!(matches!(neg_inf.classify(), Fp::Infinite));
-        assert!(matches!(zero.classify(), Fp::Zero));
+        assert!(matches!(Float::ZERO.classify(), Fp::Zero));
         assert!(matches!(neg_zero.classify(), Fp::Zero));
-        assert!(matches!(one.classify(), Fp::Normal));
+        assert!(matches!(Float::ONE.classify(), Fp::Normal));
         assert!(matches!(Float::MIN_POSITIVE_NORMAL.classify(), Fp::Normal));
         assert!(matches!(Float::MAX_SUBNORMAL.classify(), Fp::Subnormal));
     }
@@ -720,10 +757,14 @@ float_test! {
         f128: #[cfg(any(miri, target_has_reliable_f128_math))],
     },
     test<Float> {
-        assert_biteq!((-1.0 as Float).abs(), 1.0);
-        assert_biteq!((1.0 as Float).abs(), 1.0);
-        assert_biteq!(Float::NEG_INFINITY.abs(), Float::INFINITY);
         assert_biteq!(Float::INFINITY.abs(), Float::INFINITY);
+        assert_biteq!(Float::ONE.abs(), Float::ONE);
+        assert_biteq!(Float::ZERO.abs(), Float::ZERO);
+        assert_biteq!((-Float::ZERO).abs(), Float::ZERO);
+        assert_biteq!((-Float::ONE).abs(), Float::ONE);
+        assert_biteq!(Float::NEG_INFINITY.abs(), Float::INFINITY);
+        assert_biteq!((Float::ONE / Float::NEG_INFINITY).abs(), Float::ZERO);
+        assert!(Float::NAN.abs().is_nan());
     }
 }
 
@@ -951,3 +992,351 @@ float_test! {
         assert!(Float::NEG_INFINITY.fract().is_nan());
     }
 }
+
+float_test! {
+    name: signum,
+    attrs: {
+        f16: #[cfg(any(miri, target_has_reliable_f16_math))],
+        f128: #[cfg(any(miri, target_has_reliable_f128_math))],
+    },
+    test<Float> {
+        assert_biteq!(Float::INFINITY.signum(), Float::ONE);
+        assert_biteq!(Float::ONE.signum(), Float::ONE);
+        assert_biteq!(Float::ZERO.signum(), Float::ONE);
+        assert_biteq!((-Float::ZERO).signum(), -Float::ONE);
+        assert_biteq!((-Float::ONE).signum(), -Float::ONE);
+        assert_biteq!(Float::NEG_INFINITY.signum(), -Float::ONE);
+        assert_biteq!((Float::ONE / Float::NEG_INFINITY).signum(), -Float::ONE);
+        assert!(Float::NAN.signum().is_nan());
+    }
+}
+
+float_test! {
+    name: is_sign_positive,
+    attrs: {
+        f16: #[cfg(any(miri, target_has_reliable_f16))],
+        f128: #[cfg(any(miri, target_has_reliable_f128))],
+    },
+    test<Float> {
+        assert!(Float::INFINITY.is_sign_positive());
+        assert!(Float::ONE.is_sign_positive());
+        assert!(Float::ZERO.is_sign_positive());
+        assert!(!(-Float::ZERO).is_sign_positive());
+        assert!(!(-Float::ONE).is_sign_positive());
+        assert!(!Float::NEG_INFINITY.is_sign_positive());
+        assert!(!(Float::ONE / Float::NEG_INFINITY).is_sign_positive());
+        assert!(Float::NAN.is_sign_positive());
+        assert!(!(-Float::NAN).is_sign_positive());
+    }
+}
+
+float_test! {
+    name: is_sign_negative,
+    attrs: {
+        f16: #[cfg(any(miri, target_has_reliable_f16))],
+        f128: #[cfg(any(miri, target_has_reliable_f128))],
+    },
+    test<Float> {
+        assert!(!Float::INFINITY.is_sign_negative());
+        assert!(!Float::ONE.is_sign_negative());
+        assert!(!Float::ZERO.is_sign_negative());
+        assert!((-Float::ZERO).is_sign_negative());
+        assert!((-Float::ONE).is_sign_negative());
+        assert!(Float::NEG_INFINITY.is_sign_negative());
+        assert!((Float::ONE / Float::NEG_INFINITY).is_sign_negative());
+        assert!(!Float::NAN.is_sign_negative());
+        assert!((-Float::NAN).is_sign_negative());
+    }
+}
+
+float_test! {
+    name: next_up,
+    attrs: {
+        f16: #[cfg(any(miri, target_has_reliable_f16))],
+        f128: #[cfg(any(miri, target_has_reliable_f128))],
+    },
+    test<Float> {
+        assert_biteq!(Float::NEG_INFINITY.next_up(), Float::MIN);
+        assert_biteq!(Float::MIN.next_up(), -Float::MAX_DOWN);
+        assert_biteq!((-Float::ONE - Float::EPSILON).next_up(), -Float::ONE);
+        assert_biteq!((-Float::MIN_POSITIVE_NORMAL).next_up(), -Float::MAX_SUBNORMAL);
+        assert_biteq!((-Float::TINY_UP).next_up(), -Float::TINY);
+        assert_biteq!((-Float::TINY).next_up(), -Float::ZERO);
+        assert_biteq!((-Float::ZERO).next_up(), Float::TINY);
+        assert_biteq!(Float::ZERO.next_up(), Float::TINY);
+        assert_biteq!(Float::TINY.next_up(), Float::TINY_UP);
+        assert_biteq!(Float::MAX_SUBNORMAL.next_up(), Float::MIN_POSITIVE_NORMAL);
+        assert_biteq!(Float::ONE.next_up(), 1.0 + Float::EPSILON);
+        assert_biteq!(Float::MAX.next_up(), Float::INFINITY);
+        assert_biteq!(Float::INFINITY.next_up(), Float::INFINITY);
+
+        // Check that NaNs roundtrip.
+        let nan0 = Float::NAN;
+        let nan1 = Float::from_bits(Float::NAN.to_bits() ^ Float::NAN_MASK1);
+        let nan2 = Float::from_bits(Float::NAN.to_bits() ^ Float::NAN_MASK2);
+        assert_biteq!(nan0.next_up(), nan0);
+        assert_biteq!(nan1.next_up(), nan1);
+        assert_biteq!(nan2.next_up(), nan2);
+    }
+}
+
+float_test! {
+    name: next_down,
+    attrs: {
+        f16: #[cfg(any(miri, target_has_reliable_f16))],
+        f128: #[cfg(any(miri, target_has_reliable_f128))],
+    },
+    test<Float> {
+        assert_biteq!(Float::NEG_INFINITY.next_down(), Float::NEG_INFINITY);
+        assert_biteq!(Float::MIN.next_down(), Float::NEG_INFINITY);
+        assert_biteq!((-Float::MAX_DOWN).next_down(), Float::MIN);
+        assert_biteq!((-Float::ONE).next_down(), -1.0 - Float::EPSILON);
+        assert_biteq!((-Float::MAX_SUBNORMAL).next_down(), -Float::MIN_POSITIVE_NORMAL);
+        assert_biteq!((-Float::TINY).next_down(), -Float::TINY_UP);
+        assert_biteq!((-Float::ZERO).next_down(), -Float::TINY);
+        assert_biteq!((Float::ZERO).next_down(), -Float::TINY);
+        assert_biteq!(Float::TINY.next_down(), Float::ZERO);
+        assert_biteq!(Float::TINY_UP.next_down(), Float::TINY);
+        assert_biteq!(Float::MIN_POSITIVE_NORMAL.next_down(), Float::MAX_SUBNORMAL);
+        assert_biteq!((1.0 + Float::EPSILON).next_down(), Float::ONE);
+        assert_biteq!(Float::MAX.next_down(), Float::MAX_DOWN);
+        assert_biteq!(Float::INFINITY.next_down(), Float::MAX);
+
+        // Check that NaNs roundtrip.
+        let nan0 = Float::NAN;
+        let nan1 = Float::from_bits(Float::NAN.to_bits() ^ Float::NAN_MASK1);
+        let nan2 = Float::from_bits(Float::NAN.to_bits() ^ Float::NAN_MASK2);
+        assert_biteq!(nan0.next_down(), nan0);
+        assert_biteq!(nan1.next_down(), nan1);
+        assert_biteq!(nan2.next_down(), nan2);
+    }
+}
+
+// FIXME(f16_f128,miri): many of these have to be disabled since miri does not yet support
+// the intrinsics.
+
+float_test! {
+    name: sqrt_domain,
+    attrs: {
+        const: #[cfg(false)],
+        f16: #[cfg(all(not(miri), target_has_reliable_f16_math))],
+        f128: #[cfg(all(not(miri), target_has_reliable_f128_math))],
+    },
+    test<Float> {
+        assert!(Float::NAN.sqrt().is_nan());
+        assert!(Float::NEG_INFINITY.sqrt().is_nan());
+        assert!((-Float::ONE).sqrt().is_nan());
+        assert_biteq!((-Float::ZERO).sqrt(), -Float::ZERO);
+        assert_biteq!(Float::ZERO.sqrt(), Float::ZERO);
+        assert_biteq!(Float::ONE.sqrt(), Float::ONE);
+        assert_biteq!(Float::INFINITY.sqrt(), Float::INFINITY);
+    }
+}
+
+float_test! {
+    name: clamp_min_greater_than_max,
+    attrs: {
+        const: #[cfg(false)],
+        f16: #[should_panic, cfg(any(miri, target_has_reliable_f16))],
+        f32: #[should_panic],
+        f64: #[should_panic],
+        f128: #[should_panic, cfg(any(miri, target_has_reliable_f128))],
+    },
+    test<Float> {
+        let _ = Float::ONE.clamp(3.0, 1.0);
+    }
+}
+
+float_test! {
+    name: clamp_min_is_nan,
+    attrs: {
+        const: #[cfg(false)],
+        f16: #[should_panic, cfg(any(miri, target_has_reliable_f16))],
+        f32: #[should_panic],
+        f64: #[should_panic],
+        f128: #[should_panic, cfg(any(miri, target_has_reliable_f128))],
+    },
+    test<Float> {
+        let _ = Float::ONE.clamp(Float::NAN, 1.0);
+    }
+}
+
+float_test! {
+    name: clamp_max_is_nan,
+    attrs: {
+        const: #[cfg(false)],
+        f16: #[should_panic, cfg(any(miri, target_has_reliable_f16))],
+        f32: #[should_panic],
+        f64: #[should_panic],
+        f128: #[should_panic, cfg(any(miri, target_has_reliable_f128))],
+    },
+    test<Float> {
+        let _ = Float::ONE.clamp(3.0, Float::NAN);
+    }
+}
+
+float_test! {
+    name: total_cmp,
+    attrs: {
+        const: #[cfg(false)],
+        f16: #[cfg(all(not(miri), target_has_reliable_f16_math))],
+        f128: #[cfg(all(not(miri), target_has_reliable_f128_math))],
+    },
+    test<Float> {
+        use core::cmp::Ordering;
+
+        fn quiet_bit_mask() -> <Float as TestableFloat>::Int {
+            1 << (Float::MANTISSA_DIGITS - 2)
+        }
+
+        fn q_nan() -> Float {
+            Float::from_bits(Float::NAN.to_bits() | quiet_bit_mask())
+        }
+
+        assert_eq!(Ordering::Equal, Float::total_cmp(&-q_nan(), &-q_nan()));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&-Float::INFINITY, &-Float::INFINITY));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&-Float::MAX, &-Float::MAX));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&-2.5, &-2.5));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&-1.0, &-1.0));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&-1.5, &-1.5));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&-0.5, &-0.5));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&-Float::MIN_POSITIVE, &-Float::MIN_POSITIVE));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&-Float::MAX_SUBNORMAL, &-Float::MAX_SUBNORMAL));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&-Float::TINY, &-Float::TINY));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&-0.0, &-0.0));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&0.0, &0.0));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&Float::TINY, &Float::TINY));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&Float::MAX_SUBNORMAL, &Float::MAX_SUBNORMAL));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&Float::MIN_POSITIVE, &Float::MIN_POSITIVE));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&0.5, &0.5));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&1.0, &1.0));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&1.5, &1.5));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&2.5, &2.5));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&Float::MAX, &Float::MAX));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&Float::INFINITY, &Float::INFINITY));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&q_nan(), &q_nan()));
+
+        assert_eq!(Ordering::Less, Float::total_cmp(&-Float::INFINITY, &-Float::MAX));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-Float::MAX, &-2.5));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-2.5, &-1.5));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-1.5, &-1.0));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-1.0, &-0.5));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-0.5, &-Float::MIN_POSITIVE));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-Float::MIN_POSITIVE, &-Float::MAX_SUBNORMAL));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-Float::MAX_SUBNORMAL, &-Float::TINY));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-Float::TINY, &-0.0));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-0.0, &0.0));
+        assert_eq!(Ordering::Less, Float::total_cmp(&0.0, &Float::TINY));
+        assert_eq!(Ordering::Less, Float::total_cmp(&Float::TINY, &Float::MAX_SUBNORMAL));
+        assert_eq!(Ordering::Less, Float::total_cmp(&Float::MAX_SUBNORMAL, &Float::MIN_POSITIVE));
+        assert_eq!(Ordering::Less, Float::total_cmp(&Float::MIN_POSITIVE, &0.5));
+        assert_eq!(Ordering::Less, Float::total_cmp(&0.5, &1.0));
+        assert_eq!(Ordering::Less, Float::total_cmp(&1.0, &1.5));
+        assert_eq!(Ordering::Less, Float::total_cmp(&1.5, &2.5));
+        assert_eq!(Ordering::Less, Float::total_cmp(&2.5, &Float::MAX));
+        assert_eq!(Ordering::Less, Float::total_cmp(&Float::MAX, &Float::INFINITY));
+
+        assert_eq!(Ordering::Greater, Float::total_cmp(&-Float::MAX, &-Float::INFINITY));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&-2.5, &-Float::MAX));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&-1.5, &-2.5));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&-1.0, &-1.5));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&-0.5, &-1.0));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&-Float::MIN_POSITIVE, &-0.5));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&-Float::MAX_SUBNORMAL, &-Float::MIN_POSITIVE));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&-Float::TINY, &-Float::MAX_SUBNORMAL));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&-0.0, &-Float::TINY));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&0.0, &-0.0));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&Float::TINY, &0.0));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&Float::MAX_SUBNORMAL, &Float::TINY));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&Float::MIN_POSITIVE, &Float::MAX_SUBNORMAL));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&0.5, &Float::MIN_POSITIVE));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&1.0, &0.5));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&1.5, &1.0));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&2.5, &1.5));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&Float::MAX, &2.5));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&Float::INFINITY, &Float::MAX));
+
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &-Float::INFINITY));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &-Float::MAX));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &-2.5));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &-1.5));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &-1.0));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &-0.5));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &-Float::MIN_POSITIVE));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &-Float::MAX_SUBNORMAL));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &-Float::TINY));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &-0.0));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &0.0));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &Float::TINY));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &Float::MAX_SUBNORMAL));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &Float::MIN_POSITIVE));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &0.5));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &1.0));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &1.5));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &2.5));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &Float::MAX));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &Float::INFINITY));
+
+    }
+}
+
+// FIXME(f16): Tests involving sNaN are disabled because without optimizations, `total_cmp` is
+// getting incorrectly lowered to code that includes a `extend`/`trunc` round trip, which quiets
+// sNaNs. See: https://github.com/llvm/llvm-project/issues/104915
+
+float_test! {
+    name: total_cmp_s_nan,
+    attrs: {
+        const: #[cfg(false)],
+        f16: #[cfg(false)],
+        f128: #[cfg(all(not(miri), target_has_reliable_f128_math))],
+    },
+    test<Float> {
+        use core::cmp::Ordering;
+
+        fn quiet_bit_mask() -> <Float as TestableFloat>::Int {
+            1 << (Float::MANTISSA_DIGITS - 2)
+        }
+
+        fn q_nan() -> Float {
+            Float::from_bits(Float::NAN.to_bits() | quiet_bit_mask())
+        }
+
+        fn s_nan() -> Float {
+            Float::from_bits((Float::NAN.to_bits() & !quiet_bit_mask()) + 42)
+        }
+        assert_eq!(Ordering::Equal, Float::total_cmp(&-s_nan(), &-s_nan()));
+        assert_eq!(Ordering::Equal, Float::total_cmp(&s_nan(), &s_nan()));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &-s_nan()));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &-Float::INFINITY));
+        assert_eq!(Ordering::Less, Float::total_cmp(&Float::INFINITY, &s_nan()));
+        assert_eq!(Ordering::Less, Float::total_cmp(&s_nan(), &q_nan()));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&-s_nan(), &-q_nan()));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&-Float::INFINITY, &-s_nan()));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&s_nan(), &Float::INFINITY));
+        assert_eq!(Ordering::Greater, Float::total_cmp(&q_nan(), &s_nan()));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &-s_nan()));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-q_nan(), &s_nan()));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &-Float::INFINITY));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &-Float::MAX));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &-2.5));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &-1.5));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &-1.0));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &-0.5));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &-Float::MIN_POSITIVE));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &-Float::MAX_SUBNORMAL));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &-Float::TINY));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &-0.0));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &0.0));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &Float::TINY));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &Float::MAX_SUBNORMAL));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &Float::MIN_POSITIVE));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &0.5));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &1.0));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &1.5));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &2.5));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &Float::MAX));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &Float::INFINITY));
+        assert_eq!(Ordering::Less, Float::total_cmp(&-s_nan(), &s_nan()));
+    }
+}
diff --git a/library/coretests/tests/lib.rs b/library/coretests/tests/lib.rs
index 0a9c0c61c95..0c4d49f3c99 100644
--- a/library/coretests/tests/lib.rs
+++ b/library/coretests/tests/lib.rs
@@ -18,10 +18,13 @@
 #![feature(const_deref)]
 #![feature(const_destruct)]
 #![feature(const_eval_select)]
+#![feature(const_from)]
 #![feature(const_ops)]
+#![feature(const_option_ops)]
 #![feature(const_ref_cell)]
 #![feature(const_result_trait_fn)]
 #![feature(const_trait_impl)]
+#![feature(control_flow_ok)]
 #![feature(core_float_math)]
 #![feature(core_intrinsics)]
 #![feature(core_intrinsics_fallbacks)]
@@ -33,7 +36,6 @@
 #![feature(drop_guard)]
 #![feature(duration_constants)]
 #![feature(duration_constructors)]
-#![feature(duration_constructors_lite)]
 #![feature(error_generic_member_access)]
 #![feature(exact_div)]
 #![feature(exact_size_is_empty)]
diff --git a/library/coretests/tests/nonzero.rs b/library/coretests/tests/nonzero.rs
index 00232c9b706..eb06c34fd02 100644
--- a/library/coretests/tests/nonzero.rs
+++ b/library/coretests/tests/nonzero.rs
@@ -214,13 +214,11 @@ fn nonzero_const() {
     const ONE: Option<NonZero<u8>> = NonZero::new(1);
     assert!(ONE.is_some());
 
-    /* FIXME(#110395)
     const FROM_NONZERO_U8: u8 = u8::from(NONZERO_U8);
     assert_eq!(FROM_NONZERO_U8, 5);
 
     const NONZERO_CONVERT: NonZero<u32> = NonZero::<u32>::from(NONZERO_U8);
     assert_eq!(NONZERO_CONVERT.get(), 5);
-    */
 }
 
 #[test]
@@ -321,7 +319,7 @@ fn nonzero_trailing_zeros() {
 }
 
 #[test]
-fn test_nonzero_isolate_most_significant_one() {
+fn test_nonzero_isolate_highest_one() {
     // Signed most significant one
     macro_rules! nonzero_int_impl {
         ($($T:ty),+) => {
@@ -335,8 +333,8 @@ fn test_nonzero_isolate_most_significant_one() {
                     let mut i = 0;
                     while i < <$T>::BITS {
                         assert_eq!(
-                            NonZero::<$T>::new(BITS >> i).unwrap().isolate_most_significant_one(),
-                            NonZero::<$T>::new(MOST_SIG_ONE >> i).unwrap().isolate_most_significant_one()
+                            NonZero::<$T>::new(BITS >> i).unwrap().isolate_highest_one(),
+                            NonZero::<$T>::new(MOST_SIG_ONE >> i).unwrap().isolate_highest_one()
                         );
                         i += 1;
                     }
@@ -356,8 +354,8 @@ fn test_nonzero_isolate_most_significant_one() {
                     let mut i = 0;
                     while i < <$T>::BITS {
                         assert_eq!(
-                            NonZero::<$T>::new(BITS >> i).unwrap().isolate_most_significant_one(),
-                            NonZero::<$T>::new(MOST_SIG_ONE >> i).unwrap().isolate_most_significant_one(),
+                            NonZero::<$T>::new(BITS >> i).unwrap().isolate_highest_one(),
+                            NonZero::<$T>::new(MOST_SIG_ONE >> i).unwrap().isolate_highest_one(),
                         );
                         i += 1;
                     }
@@ -371,7 +369,7 @@ fn test_nonzero_isolate_most_significant_one() {
 }
 
 #[test]
-fn test_nonzero_isolate_least_significant_one() {
+fn test_nonzero_isolate_lowest_one() {
     // Signed least significant one
     macro_rules! nonzero_int_impl {
         ($($T:ty),+) => {
@@ -385,8 +383,8 @@ fn test_nonzero_isolate_least_significant_one() {
                     let mut i = 0;
                     while i < <$T>::BITS {
                         assert_eq!(
-                            NonZero::<$T>::new(BITS << i).unwrap().isolate_least_significant_one(),
-                            NonZero::<$T>::new(LEAST_SIG_ONE << i).unwrap().isolate_least_significant_one()
+                            NonZero::<$T>::new(BITS << i).unwrap().isolate_lowest_one(),
+                            NonZero::<$T>::new(LEAST_SIG_ONE << i).unwrap().isolate_lowest_one()
                         );
                         i += 1;
                     }
@@ -406,8 +404,8 @@ fn test_nonzero_isolate_least_significant_one() {
                     let mut i = 0;
                     while i < <$T>::BITS {
                         assert_eq!(
-                            NonZero::<$T>::new(BITS << i).unwrap().isolate_least_significant_one(),
-                            NonZero::<$T>::new(LEAST_SIG_ONE << i).unwrap().isolate_least_significant_one(),
+                            NonZero::<$T>::new(BITS << i).unwrap().isolate_lowest_one(),
+                            NonZero::<$T>::new(LEAST_SIG_ONE << i).unwrap().isolate_lowest_one(),
                         );
                         i += 1;
                     }
diff --git a/library/coretests/tests/num/const_from.rs b/library/coretests/tests/num/const_from.rs
index fa58e771879..aca18ef39de 100644
--- a/library/coretests/tests/num/const_from.rs
+++ b/library/coretests/tests/num/const_from.rs
@@ -1,4 +1,3 @@
-/* FIXME(#110395)
 #[test]
 fn from() {
     use core::convert::TryFrom;
@@ -24,4 +23,3 @@ fn from() {
     const I16_FROM_U16: Result<i16, TryFromIntError> = i16::try_from(1u16);
     assert_eq!(I16_FROM_U16, Ok(1i16));
 }
-*/
diff --git a/library/coretests/tests/num/int_macros.rs b/library/coretests/tests/num/int_macros.rs
index 41d399c1ad9..ca32fce861f 100644
--- a/library/coretests/tests/num/int_macros.rs
+++ b/library/coretests/tests/num/int_macros.rs
@@ -194,7 +194,7 @@ macro_rules! int_module {
         }
 
         #[test]
-        fn test_isolate_most_significant_one() {
+        fn test_isolate_highest_one() {
             const BITS: $T = -1;
             const MOST_SIG_ONE: $T = 1 << (<$T>::BITS - 1);
 
@@ -203,15 +203,15 @@ macro_rules! int_module {
             let mut i = 0;
             while i < <$T>::BITS {
                 assert_eq!(
-                    (BITS >> i).isolate_most_significant_one(),
-                    (MOST_SIG_ONE >> i).isolate_most_significant_one()
+                    (BITS >> i).isolate_highest_one(),
+                    (MOST_SIG_ONE >> i).isolate_highest_one()
                 );
                 i += 1;
             }
         }
 
         #[test]
-        fn test_isolate_least_significant_one() {
+        fn test_isolate_lowest_one() {
             const BITS: $T = -1;
             const LEAST_SIG_ONE: $T = 1;
 
@@ -220,8 +220,8 @@ macro_rules! int_module {
             let mut i = 0;
             while i < <$T>::BITS {
                 assert_eq!(
-                    (BITS << i).isolate_least_significant_one(),
-                    (LEAST_SIG_ONE << i).isolate_least_significant_one()
+                    (BITS << i).isolate_lowest_one(),
+                    (LEAST_SIG_ONE << i).isolate_lowest_one()
                 );
                 i += 1;
             }
diff --git a/library/coretests/tests/num/uint_macros.rs b/library/coretests/tests/num/uint_macros.rs
index 7e02027bdd6..8f389de70aa 100644
--- a/library/coretests/tests/num/uint_macros.rs
+++ b/library/coretests/tests/num/uint_macros.rs
@@ -151,7 +151,7 @@ macro_rules! uint_module {
         }
 
         #[test]
-        fn test_isolate_most_significant_one() {
+        fn test_isolate_highest_one() {
             const BITS: $T = <$T>::MAX;
             const MOST_SIG_ONE: $T = 1 << (<$T>::BITS - 1);
 
@@ -160,15 +160,15 @@ macro_rules! uint_module {
             let mut i = 0;
             while i < <$T>::BITS {
                 assert_eq!(
-                    (BITS >> i).isolate_most_significant_one(),
-                    (MOST_SIG_ONE >> i).isolate_most_significant_one(),
+                    (BITS >> i).isolate_highest_one(),
+                    (MOST_SIG_ONE >> i).isolate_highest_one(),
                 );
                 i += 1;
             }
         }
 
         #[test]
-        fn test_isolate_least_significant_one() {
+        fn test_isolate_lowest_one() {
             const BITS: $T = <$T>::MAX;
             const LEAST_SIG_ONE: $T = 1;
 
@@ -177,8 +177,8 @@ macro_rules! uint_module {
             let mut i = 0;
             while i < <$T>::BITS {
                 assert_eq!(
-                    (BITS << i).isolate_least_significant_one(),
-                    (LEAST_SIG_ONE << i).isolate_least_significant_one(),
+                    (BITS << i).isolate_lowest_one(),
+                    (LEAST_SIG_ONE << i).isolate_lowest_one(),
                 );
                 i += 1;
             }
diff --git a/library/coretests/tests/ops/control_flow.rs b/library/coretests/tests/ops/control_flow.rs
index eacfd63a6c4..1df6599ac4a 100644
--- a/library/coretests/tests/ops/control_flow.rs
+++ b/library/coretests/tests/ops/control_flow.rs
@@ -16,3 +16,15 @@ fn control_flow_discriminants_match_result() {
         discriminant_value(&Result::<i32, i32>::Ok(3)),
     );
 }
+
+#[test]
+fn control_flow_break_ok() {
+    assert_eq!(ControlFlow::<char, i32>::Break('b').break_ok(), Ok('b'));
+    assert_eq!(ControlFlow::<char, i32>::Continue(3).break_ok(), Err(3));
+}
+
+#[test]
+fn control_flow_continue_ok() {
+    assert_eq!(ControlFlow::<char, i32>::Break('b').continue_ok(), Err('b'));
+    assert_eq!(ControlFlow::<char, i32>::Continue(3).continue_ok(), Ok(3));
+}
diff --git a/library/coretests/tests/option.rs b/library/coretests/tests/option.rs
index 336a79a02ce..fc0f82ad6bb 100644
--- a/library/coretests/tests/option.rs
+++ b/library/coretests/tests/option.rs
@@ -87,7 +87,6 @@ fn test_and() {
     assert_eq!(x.and(Some(2)), None);
     assert_eq!(x.and(None::<isize>), None);
 
-    /* FIXME(#110395)
     const FOO: Option<isize> = Some(1);
     const A: Option<isize> = FOO.and(Some(2));
     const B: Option<isize> = FOO.and(None);
@@ -99,7 +98,6 @@ fn test_and() {
     const D: Option<isize> = BAR.and(None);
     assert_eq!(C, None);
     assert_eq!(D, None);
-    */
 }
 
 #[test]
diff --git a/library/coretests/tests/tuple.rs b/library/coretests/tests/tuple.rs
index ea1e281425c..5d680d10472 100644
--- a/library/coretests/tests/tuple.rs
+++ b/library/coretests/tests/tuple.rs
@@ -37,7 +37,7 @@ fn test_partial_ord() {
     assert!(!((1.0f64, 2.0f64) <= (f64::NAN, 3.0)));
     assert!(!((1.0f64, 2.0f64) > (f64::NAN, 3.0)));
     assert!(!((1.0f64, 2.0f64) >= (f64::NAN, 3.0)));
-    assert!(((1.0f64, 2.0f64) < (2.0, f64::NAN)));
+    assert!((1.0f64, 2.0f64) < (2.0, f64::NAN));
     assert!(!((2.0f64, 2.0f64) < (2.0, f64::NAN)));
 }
 
diff --git a/library/std/src/collections/hash/map.rs b/library/std/src/collections/hash/map.rs
index edbdd041145..15a7a770d1a 100644
--- a/library/std/src/collections/hash/map.rs
+++ b/library/std/src/collections/hash/map.rs
@@ -135,6 +135,8 @@ use crate::ops::Index;
 /// ]);
 /// ```
 ///
+/// ## `Entry` API
+///
 /// `HashMap` implements an [`Entry` API](#method.entry), which allows
 /// for complex methods of getting, setting, updating and removing keys and
 /// their values:
@@ -167,6 +169,8 @@ use crate::ops::Index;
 /// player_stats.entry("mana").and_modify(|mana| *mana += 200).or_insert(100);
 /// ```
 ///
+/// ## Usage with custom key types
+///
 /// The easiest way to use `HashMap` with a custom key type is to derive [`Eq`] and [`Hash`].
 /// We must also derive [`PartialEq`].
 ///
diff --git a/library/std/src/fs.rs b/library/std/src/fs.rs
index 72ad7c244ee..a220a3f56e9 100644
--- a/library/std/src/fs.rs
+++ b/library/std/src/fs.rs
@@ -814,7 +814,7 @@ impl File {
     ///
     /// If this file handle/descriptor, or a clone of it, already holds a lock, the exact behavior
     /// is unspecified and platform dependent, including the possibility that it will deadlock.
-    /// However, if this method returns `Ok(true)`, then it has acquired an exclusive lock.
+    /// However, if this method returns `Ok(())`, then it has acquired an exclusive lock.
     ///
     /// If the file is not open for writing, it is unspecified whether this function returns an error.
     ///
@@ -879,7 +879,7 @@ impl File {
     ///
     /// If this file handle, or a clone of it, already holds a lock, the exact behavior is
     /// unspecified and platform dependent, including the possibility that it will deadlock.
-    /// However, if this method returns `Ok(true)`, then it has acquired a shared lock.
+    /// However, if this method returns `Ok(())`, then it has acquired a shared lock.
     ///
     /// The lock will be released when this file (along with any other file descriptors/handles
     /// duplicated or inherited from it) is closed, or if the [`unlock`] method is called.
@@ -1111,6 +1111,11 @@ impl File {
     /// `futimes` on macOS before 10.13) and the `SetFileTime` function on Windows. Note that this
     /// [may change in the future][changes].
     ///
+    /// On most platforms, including UNIX and Windows platforms, this function can also change the
+    /// timestamps of a directory. To get a `File` representing a directory in order to call
+    /// `set_times`, open the directory with `File::open` without attempting to obtain write
+    /// permission.
+    ///
     /// [changes]: io#platform-specific-behavior
     ///
     /// # Errors
@@ -1128,7 +1133,7 @@ impl File {
     ///     use std::fs::{self, File, FileTimes};
     ///
     ///     let src = fs::metadata("src")?;
-    ///     let dest = File::options().write(true).open("dest")?;
+    ///     let dest = File::open("dest")?;
     ///     let times = FileTimes::new()
     ///         .set_accessed(src.accessed()?)
     ///         .set_modified(src.modified()?);
diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs
index fd06a3b540c..07b38c65898 100644
--- a/library/std/src/lib.rs
+++ b/library/std/src/lib.rs
@@ -281,9 +281,11 @@
 #![feature(cfg_target_thread_local)]
 #![feature(cfi_encoding)]
 #![feature(char_max_len)]
+#![feature(const_trait_impl)]
 #![feature(core_float_math)]
 #![feature(decl_macro)]
 #![feature(deprecated_suggestion)]
+#![feature(derive_const)]
 #![feature(doc_cfg)]
 #![feature(doc_cfg_hide)]
 #![feature(doc_masked)]
@@ -321,15 +323,19 @@
 #![feature(try_blocks)]
 #![feature(try_trait_v2)]
 #![feature(type_alias_impl_trait)]
-#![feature(unsigned_signed_diff)]
 // tidy-alphabetical-end
 //
 // Library features (core):
 // tidy-alphabetical-start
 #![feature(bstr)]
 #![feature(bstr_internals)]
+#![feature(cast_maybe_uninit)]
 #![feature(char_internals)]
 #![feature(clone_to_uninit)]
+#![feature(const_cmp)]
+#![feature(const_ops)]
+#![feature(const_option_ops)]
+#![feature(const_try)]
 #![feature(core_intrinsics)]
 #![feature(core_io_borrowed_buf)]
 #![feature(drop_guard)]
diff --git a/library/std/src/num/f32.rs b/library/std/src/num/f32.rs
index 2bff73add33..5dee68ad909 100644
--- a/library/std/src/num/f32.rs
+++ b/library/std/src/num/f32.rs
@@ -44,7 +44,7 @@ impl f32 {
     #[rustc_allow_incoherent_impl]
     #[must_use = "method returns a new number and does not mutate the original value"]
     #[stable(feature = "rust1", since = "1.0.0")]
-    #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")]
+    #[rustc_const_stable(feature = "const_float_round_methods", since = "1.90.0")]
     #[inline]
     pub const fn floor(self) -> f32 {
         core::f32::math::floor(self)
@@ -67,7 +67,7 @@ impl f32 {
     #[rustc_allow_incoherent_impl]
     #[must_use = "method returns a new number and does not mutate the original value"]
     #[stable(feature = "rust1", since = "1.0.0")]
-    #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")]
+    #[rustc_const_stable(feature = "const_float_round_methods", since = "1.90.0")]
     #[inline]
     pub const fn ceil(self) -> f32 {
         core::f32::math::ceil(self)
@@ -96,7 +96,7 @@ impl f32 {
     #[rustc_allow_incoherent_impl]
     #[must_use = "method returns a new number and does not mutate the original value"]
     #[stable(feature = "rust1", since = "1.0.0")]
-    #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")]
+    #[rustc_const_stable(feature = "const_float_round_methods", since = "1.90.0")]
     #[inline]
     pub const fn round(self) -> f32 {
         core::f32::math::round(self)
@@ -123,7 +123,7 @@ impl f32 {
     #[rustc_allow_incoherent_impl]
     #[must_use = "method returns a new number and does not mutate the original value"]
     #[stable(feature = "round_ties_even", since = "1.77.0")]
-    #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")]
+    #[rustc_const_stable(feature = "const_float_round_methods", since = "1.90.0")]
     #[inline]
     pub const fn round_ties_even(self) -> f32 {
         core::f32::math::round_ties_even(self)
@@ -149,7 +149,7 @@ impl f32 {
     #[rustc_allow_incoherent_impl]
     #[must_use = "method returns a new number and does not mutate the original value"]
     #[stable(feature = "rust1", since = "1.0.0")]
-    #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")]
+    #[rustc_const_stable(feature = "const_float_round_methods", since = "1.90.0")]
     #[inline]
     pub const fn trunc(self) -> f32 {
         core::f32::math::trunc(self)
@@ -173,7 +173,7 @@ impl f32 {
     #[rustc_allow_incoherent_impl]
     #[must_use = "method returns a new number and does not mutate the original value"]
     #[stable(feature = "rust1", since = "1.0.0")]
-    #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")]
+    #[rustc_const_stable(feature = "const_float_round_methods", since = "1.90.0")]
     #[inline]
     pub const fn fract(self) -> f32 {
         core::f32::math::fract(self)
diff --git a/library/std/src/num/f64.rs b/library/std/src/num/f64.rs
index b71e319f407..3ec80f68bdb 100644
--- a/library/std/src/num/f64.rs
+++ b/library/std/src/num/f64.rs
@@ -44,7 +44,7 @@ impl f64 {
     #[rustc_allow_incoherent_impl]
     #[must_use = "method returns a new number and does not mutate the original value"]
     #[stable(feature = "rust1", since = "1.0.0")]
-    #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")]
+    #[rustc_const_stable(feature = "const_float_round_methods", since = "1.90.0")]
     #[inline]
     pub const fn floor(self) -> f64 {
         core::f64::math::floor(self)
@@ -67,7 +67,7 @@ impl f64 {
     #[rustc_allow_incoherent_impl]
     #[must_use = "method returns a new number and does not mutate the original value"]
     #[stable(feature = "rust1", since = "1.0.0")]
-    #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")]
+    #[rustc_const_stable(feature = "const_float_round_methods", since = "1.90.0")]
     #[inline]
     pub const fn ceil(self) -> f64 {
         core::f64::math::ceil(self)
@@ -96,7 +96,7 @@ impl f64 {
     #[rustc_allow_incoherent_impl]
     #[must_use = "method returns a new number and does not mutate the original value"]
     #[stable(feature = "rust1", since = "1.0.0")]
-    #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")]
+    #[rustc_const_stable(feature = "const_float_round_methods", since = "1.90.0")]
     #[inline]
     pub const fn round(self) -> f64 {
         core::f64::math::round(self)
@@ -123,7 +123,7 @@ impl f64 {
     #[rustc_allow_incoherent_impl]
     #[must_use = "method returns a new number and does not mutate the original value"]
     #[stable(feature = "round_ties_even", since = "1.77.0")]
-    #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")]
+    #[rustc_const_stable(feature = "const_float_round_methods", since = "1.90.0")]
     #[inline]
     pub const fn round_ties_even(self) -> f64 {
         core::f64::math::round_ties_even(self)
@@ -149,7 +149,7 @@ impl f64 {
     #[rustc_allow_incoherent_impl]
     #[must_use = "method returns a new number and does not mutate the original value"]
     #[stable(feature = "rust1", since = "1.0.0")]
-    #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")]
+    #[rustc_const_stable(feature = "const_float_round_methods", since = "1.90.0")]
     #[inline]
     pub const fn trunc(self) -> f64 {
         core::f64::math::trunc(self)
@@ -173,7 +173,7 @@ impl f64 {
     #[rustc_allow_incoherent_impl]
     #[must_use = "method returns a new number and does not mutate the original value"]
     #[stable(feature = "rust1", since = "1.0.0")]
-    #[rustc_const_stable(feature = "const_float_round_methods", since = "CURRENT_RUSTC_VERSION")]
+    #[rustc_const_stable(feature = "const_float_round_methods", since = "1.90.0")]
     #[inline]
     pub const fn fract(self) -> f64 {
         core::f64::math::fract(self)
diff --git a/library/std/src/panic.rs b/library/std/src/panic.rs
index 913ef72f674..5e8d2f8e78e 100644
--- a/library/std/src/panic.rs
+++ b/library/std/src/panic.rs
@@ -60,6 +60,7 @@ impl<'a> PanicHookInfo<'a> {
     /// Returns the payload associated with the panic.
     ///
     /// This will commonly, but not always, be a `&'static str` or [`String`].
+    /// If you only care about such payloads, use [`payload_as_str`] instead.
     ///
     /// A invocation of the `panic!()` macro in Rust 2021 or later will always result in a
     /// panic payload of type `&'static str` or `String`.
@@ -69,6 +70,7 @@ impl<'a> PanicHookInfo<'a> {
     /// can result in a panic payload other than a `&'static str` or `String`.
     ///
     /// [`String`]: ../../std/string/struct.String.html
+    /// [`payload_as_str`]: PanicHookInfo::payload_as_str
     ///
     /// # Examples
     ///
@@ -108,8 +110,6 @@ impl<'a> PanicHookInfo<'a> {
     /// # Example
     ///
     /// ```should_panic
-    /// #![feature(panic_payload_as_str)]
-    ///
     /// std::panic::set_hook(Box::new(|panic_info| {
     ///     if let Some(s) = panic_info.payload_as_str() {
     ///         println!("panic occurred: {s:?}");
@@ -122,7 +122,7 @@ impl<'a> PanicHookInfo<'a> {
     /// ```
     #[must_use]
     #[inline]
-    #[unstable(feature = "panic_payload_as_str", issue = "125175")]
+    #[stable(feature = "panic_payload_as_str", since = "CURRENT_RUSTC_VERSION")]
     pub fn payload_as_str(&self) -> Option<&str> {
         if let Some(s) = self.payload.downcast_ref::<&str>() {
             Some(s)
diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs
index 224cd39855a..87a3fc80dfa 100644
--- a/library/std/src/panicking.rs
+++ b/library/std/src/panicking.rs
@@ -269,6 +269,7 @@ fn default_hook(info: &PanicHookInfo<'_>) {
 
         thread::with_current_name(|name| {
             let name = name.unwrap_or("<unnamed>");
+            let tid = thread::current_os_id();
 
             // Try to write the panic message to a buffer first to prevent other concurrent outputs
             // interleaving with it.
@@ -277,7 +278,7 @@ fn default_hook(info: &PanicHookInfo<'_>) {
 
             let write_msg = |dst: &mut dyn crate::io::Write| {
                 // We add a newline to ensure the panic message appears at the start of a line.
-                writeln!(dst, "\nthread '{name}' panicked at {location}:\n{msg}")
+                writeln!(dst, "\nthread '{name}' ({tid}) panicked at {location}:\n{msg}")
             };
 
             if write_msg(&mut cursor).is_ok() {
@@ -627,7 +628,7 @@ pub fn panicking() -> bool {
 /// Entry point of panics from the core crate (`panic_impl` lang item).
 #[cfg(not(any(test, doctest)))]
 #[panic_handler]
-pub fn begin_panic_handler(info: &core::panic::PanicInfo<'_>) -> ! {
+pub fn panic_handler(info: &core::panic::PanicInfo<'_>) -> ! {
     struct FormatStringPayload<'a> {
         inner: &'a core::panic::PanicMessage<'a>,
         string: Option<String>,
diff --git a/library/std/src/path.rs b/library/std/src/path.rs
index 055e7f81480..3b52804d6be 100644
--- a/library/std/src/path.rs
+++ b/library/std/src/path.rs
@@ -2678,11 +2678,12 @@ impl Path {
     /// # Examples
     ///
     /// ```
-    /// # #![feature(path_file_prefix)]
     /// use std::path::Path;
     ///
     /// assert_eq!("foo", Path::new("foo.rs").file_prefix().unwrap());
     /// assert_eq!("foo", Path::new("foo.tar.gz").file_prefix().unwrap());
+    /// assert_eq!(".config", Path::new(".config").file_prefix().unwrap());
+    /// assert_eq!(".config", Path::new(".config.toml").file_prefix().unwrap());
     /// ```
     ///
     /// # See Also
@@ -2691,7 +2692,7 @@ impl Path {
     ///
     /// [`Path::file_stem`]: Path::file_stem
     ///
-    #[unstable(feature = "path_file_prefix", issue = "86319")]
+    #[stable(feature = "path_file_prefix", since = "CURRENT_RUSTC_VERSION")]
     #[must_use]
     pub fn file_prefix(&self) -> Option<&OsStr> {
         self.file_name().map(split_file_at_dot).and_then(|(before, _after)| Some(before))
diff --git a/library/std/src/sys/args/common.rs b/library/std/src/sys/args/common.rs
index e787105a05a..33f3794ee63 100644
--- a/library/std/src/sys/args/common.rs
+++ b/library/std/src/sys/args/common.rs
@@ -12,7 +12,7 @@ impl !Sync for Args {}
 
 impl Args {
     #[inline]
-    pub(super) fn new(args: Vec<OsString>) -> Self {
+    pub fn new(args: Vec<OsString>) -> Self {
         Args { iter: args.into_iter() }
     }
 }
diff --git a/library/std/src/sys/configure_builtins.rs b/library/std/src/sys/configure_builtins.rs
new file mode 100644
index 00000000000..9d776b778dc
--- /dev/null
+++ b/library/std/src/sys/configure_builtins.rs
@@ -0,0 +1,22 @@
+/// Hook into .init_array to enable LSE atomic operations at startup, if
+/// supported.
+#[cfg(all(target_arch = "aarch64", target_os = "linux", not(feature = "compiler-builtins-c")))]
+#[used]
+#[unsafe(link_section = ".init_array.90")]
+static RUST_LSE_INIT: extern "C" fn() = {
+    extern "C" fn init_lse() {
+        use crate::arch;
+
+        // This is provided by compiler-builtins::aarch64_linux.
+        unsafe extern "C" {
+            fn __rust_enable_lse();
+        }
+
+        if arch::is_aarch64_feature_detected!("lse") {
+            unsafe {
+                __rust_enable_lse();
+            }
+        }
+    }
+    init_lse
+};
diff --git a/library/std/src/sys/fs/windows.rs b/library/std/src/sys/fs/windows.rs
index 9b674a25165..bb3e4bc30ca 100644
--- a/library/std/src/sys/fs/windows.rs
+++ b/library/std/src/sys/fs/windows.rs
@@ -80,7 +80,7 @@ pub struct OpenOptions {
     attributes: u32,
     share_mode: u32,
     security_qos_flags: u32,
-    security_attributes: *mut c::SECURITY_ATTRIBUTES,
+    inherit_handle: bool,
 }
 
 #[derive(Clone, PartialEq, Eq, Debug)]
@@ -203,7 +203,7 @@ impl OpenOptions {
             share_mode: c::FILE_SHARE_READ | c::FILE_SHARE_WRITE | c::FILE_SHARE_DELETE,
             attributes: 0,
             security_qos_flags: 0,
-            security_attributes: ptr::null_mut(),
+            inherit_handle: false,
         }
     }
 
@@ -243,8 +243,8 @@ impl OpenOptions {
         // receive is `SECURITY_ANONYMOUS = 0x0`, which we can't check for later on.
         self.security_qos_flags = flags | c::SECURITY_SQOS_PRESENT;
     }
-    pub fn security_attributes(&mut self, attrs: *mut c::SECURITY_ATTRIBUTES) {
-        self.security_attributes = attrs;
+    pub fn inherit_handle(&mut self, inherit: bool) {
+        self.inherit_handle = inherit;
     }
 
     fn get_access_mode(&self) -> io::Result<u32> {
@@ -307,12 +307,17 @@ impl File {
 
     fn open_native(path: &WCStr, opts: &OpenOptions) -> io::Result<File> {
         let creation = opts.get_creation_mode()?;
+        let sa = c::SECURITY_ATTRIBUTES {
+            nLength: size_of::<c::SECURITY_ATTRIBUTES>() as u32,
+            lpSecurityDescriptor: ptr::null_mut(),
+            bInheritHandle: opts.inherit_handle as c::BOOL,
+        };
         let handle = unsafe {
             c::CreateFileW(
                 path.as_ptr(),
                 opts.get_access_mode()?,
                 opts.share_mode,
-                opts.security_attributes,
+                if opts.inherit_handle { &sa } else { ptr::null() },
                 creation,
                 opts.get_flags_and_attributes(),
                 ptr::null_mut(),
@@ -1601,7 +1606,7 @@ pub fn junction_point(original: &Path, link: &Path) -> io::Result<()> {
     };
     unsafe {
         let ptr = header.PathBuffer.as_mut_ptr();
-        ptr.copy_from(abs_path.as_ptr().cast::<MaybeUninit<u16>>(), abs_path.len());
+        ptr.copy_from(abs_path.as_ptr().cast_uninit(), abs_path.len());
 
         let mut ret = 0;
         cvt(c::DeviceIoControl(
diff --git a/library/std/src/sys/io/io_slice/uefi.rs b/library/std/src/sys/io/io_slice/uefi.rs
new file mode 100644
index 00000000000..909cfbea0b7
--- /dev/null
+++ b/library/std/src/sys/io/io_slice/uefi.rs
@@ -0,0 +1,74 @@
+//! A buffer type used with `Write::write_vectored` for UEFI Networking APIs. Vectored writing to
+//! File is not supported as of UEFI Spec 2.11.
+
+use crate::marker::PhantomData;
+use crate::slice;
+
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub struct IoSlice<'a> {
+    len: u32,
+    data: *const u8,
+    _p: PhantomData<&'a [u8]>,
+}
+
+impl<'a> IoSlice<'a> {
+    #[inline]
+    pub fn new(buf: &'a [u8]) -> IoSlice<'a> {
+        let len = buf.len().try_into().unwrap();
+        Self { len, data: buf.as_ptr(), _p: PhantomData }
+    }
+
+    #[inline]
+    pub fn advance(&mut self, n: usize) {
+        self.len = u32::try_from(n)
+            .ok()
+            .and_then(|n| self.len.checked_sub(n))
+            .expect("advancing IoSlice beyond its length");
+        unsafe { self.data = self.data.add(n) };
+    }
+
+    #[inline]
+    pub const fn as_slice(&self) -> &'a [u8] {
+        unsafe { slice::from_raw_parts(self.data, self.len as usize) }
+    }
+}
+
+#[repr(C)]
+pub struct IoSliceMut<'a> {
+    len: u32,
+    data: *mut u8,
+    _p: PhantomData<&'a mut [u8]>,
+}
+
+impl<'a> IoSliceMut<'a> {
+    #[inline]
+    pub fn new(buf: &'a mut [u8]) -> IoSliceMut<'a> {
+        let len = buf.len().try_into().unwrap();
+        Self { len, data: buf.as_mut_ptr(), _p: PhantomData }
+    }
+
+    #[inline]
+    pub fn advance(&mut self, n: usize) {
+        self.len = u32::try_from(n)
+            .ok()
+            .and_then(|n| self.len.checked_sub(n))
+            .expect("advancing IoSlice beyond its length");
+        unsafe { self.data = self.data.add(n) };
+    }
+
+    #[inline]
+    pub fn as_slice(&self) -> &[u8] {
+        unsafe { slice::from_raw_parts(self.data, self.len as usize) }
+    }
+
+    #[inline]
+    pub const fn into_slice(self) -> &'a mut [u8] {
+        unsafe { slice::from_raw_parts_mut(self.data, self.len as usize) }
+    }
+
+    #[inline]
+    pub fn as_mut_slice(&mut self) -> &mut [u8] {
+        unsafe { slice::from_raw_parts_mut(self.data, self.len as usize) }
+    }
+}
diff --git a/library/std/src/sys/io/mod.rs b/library/std/src/sys/io/mod.rs
index 4d0365d42fd..ae75f4d97b4 100644
--- a/library/std/src/sys/io/mod.rs
+++ b/library/std/src/sys/io/mod.rs
@@ -11,6 +11,9 @@ mod io_slice {
         } else if #[cfg(target_os = "wasi")] {
             mod wasi;
             pub use wasi::*;
+        } else if #[cfg(target_os = "uefi")] {
+            mod uefi;
+            pub use uefi::*;
         } else {
             mod unsupported;
             pub use unsupported::*;
diff --git a/library/std/src/sys/mod.rs b/library/std/src/sys/mod.rs
index f9a02b522e5..8ec0a0e3302 100644
--- a/library/std/src/sys/mod.rs
+++ b/library/std/src/sys/mod.rs
@@ -1,5 +1,10 @@
 #![allow(unsafe_op_in_unsafe_fn)]
 
+/// The configure builtins provides runtime support compiler-builtin features
+/// which require dynamic intialization to work as expected, e.g. aarch64
+/// outline-atomics.
+mod configure_builtins;
+
 /// The PAL (platform abstraction layer) contains platform-specific abstractions
 /// for implementing the features in the other submodules, e.g. UNIX file
 /// descriptors.
diff --git a/library/std/src/sys/pal/hermit/thread.rs b/library/std/src/sys/pal/hermit/thread.rs
index 95fe4f902d3..cc4734b6819 100644
--- a/library/std/src/sys/pal/hermit/thread.rs
+++ b/library/std/src/sys/pal/hermit/thread.rs
@@ -115,6 +115,10 @@ impl Thread {
     }
 }
 
+pub(crate) fn current_os_id() -> Option<u64> {
+    None
+}
+
 pub fn available_parallelism() -> io::Result<NonZero<usize>> {
     unsafe { Ok(NonZero::new_unchecked(hermit_abi::available_parallelism())) }
 }
diff --git a/library/std/src/sys/pal/hermit/time.rs b/library/std/src/sys/pal/hermit/time.rs
index f76a5f96c87..89a427ab88b 100644
--- a/library/std/src/sys/pal/hermit/time.rs
+++ b/library/std/src/sys/pal/hermit/time.rs
@@ -25,8 +25,15 @@ impl Timespec {
         Timespec { t: timespec { tv_sec, tv_nsec } }
     }
 
-    fn sub_timespec(&self, other: &Timespec) -> Result<Duration, Duration> {
-        if self >= other {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    const fn sub_timespec(&self, other: &Timespec) -> Result<Duration, Duration> {
+        // FIXME: const PartialOrd
+        let mut cmp = self.t.tv_sec - other.t.tv_sec;
+        if cmp == 0 {
+            cmp = self.t.tv_nsec as i64 - other.t.tv_nsec as i64;
+        }
+
+        if cmp >= 0 {
             Ok(if self.t.tv_nsec >= other.t.tv_nsec {
                 Duration::new(
                     (self.t.tv_sec - other.t.tv_sec) as u64,
@@ -46,20 +53,22 @@ impl Timespec {
         }
     }
 
-    fn checked_add_duration(&self, other: &Duration) -> Option<Timespec> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    const fn checked_add_duration(&self, other: &Duration) -> Option<Timespec> {
         let mut secs = self.t.tv_sec.checked_add_unsigned(other.as_secs())?;
 
         // Nano calculations can't overflow because nanos are <1B which fit
         // in a u32.
-        let mut nsec = other.subsec_nanos() + u32::try_from(self.t.tv_nsec).unwrap();
-        if nsec >= NSEC_PER_SEC.try_into().unwrap() {
-            nsec -= u32::try_from(NSEC_PER_SEC).unwrap();
+        let mut nsec = other.subsec_nanos() + self.t.tv_nsec as u32;
+        if nsec >= NSEC_PER_SEC as u32 {
+            nsec -= NSEC_PER_SEC as u32;
             secs = secs.checked_add(1)?;
         }
         Some(Timespec { t: timespec { tv_sec: secs, tv_nsec: nsec as _ } })
     }
 
-    fn checked_sub_duration(&self, other: &Duration) -> Option<Timespec> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    const fn checked_sub_duration(&self, other: &Duration) -> Option<Timespec> {
         let mut secs = self.t.tv_sec.checked_sub_unsigned(other.as_secs())?;
 
         // Similar to above, nanos can't overflow.
@@ -213,15 +222,18 @@ impl SystemTime {
         SystemTime(time)
     }
 
-    pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
         self.0.sub_timespec(&other.0)
     }
 
-    pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
         Some(SystemTime(self.0.checked_add_duration(other)?))
     }
 
-    pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
         Some(SystemTime(self.0.checked_sub_duration(other)?))
     }
 }
diff --git a/library/std/src/sys/pal/itron/thread.rs b/library/std/src/sys/pal/itron/thread.rs
index 0d28051fcc4..4e14cb3cbca 100644
--- a/library/std/src/sys/pal/itron/thread.rs
+++ b/library/std/src/sys/pal/itron/thread.rs
@@ -361,6 +361,10 @@ unsafe fn terminate_and_delete_current_task() -> ! {
     unsafe { crate::hint::unreachable_unchecked() };
 }
 
+pub(crate) fn current_os_id() -> Option<u64> {
+    None
+}
+
 pub fn available_parallelism() -> io::Result<NonZero<usize>> {
     super::unsupported()
 }
diff --git a/library/std/src/sys/pal/sgx/thread.rs b/library/std/src/sys/pal/sgx/thread.rs
index a236c362706..1f613badcd7 100644
--- a/library/std/src/sys/pal/sgx/thread.rs
+++ b/library/std/src/sys/pal/sgx/thread.rs
@@ -1,6 +1,6 @@
 #![cfg_attr(test, allow(dead_code))] // why is this necessary?
 
-use super::abi::usercalls;
+use super::abi::{thread, usercalls};
 use super::unsupported;
 use crate::ffi::CStr;
 use crate::io;
@@ -149,6 +149,10 @@ impl Thread {
     }
 }
 
+pub(crate) fn current_os_id() -> Option<u64> {
+    Some(thread::current().addr().get() as u64)
+}
+
 pub fn available_parallelism() -> io::Result<NonZero<usize>> {
     unsupported()
 }
diff --git a/library/std/src/sys/pal/sgx/time.rs b/library/std/src/sys/pal/sgx/time.rs
index db4cf2804bf..603dae952ab 100644
--- a/library/std/src/sys/pal/sgx/time.rs
+++ b/library/std/src/sys/pal/sgx/time.rs
@@ -32,15 +32,22 @@ impl SystemTime {
         SystemTime(usercalls::insecure_time())
     }
 
-    pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
-        self.0.checked_sub(other.0).ok_or_else(|| other.0 - self.0)
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
+        // FIXME: ok_or_else with const closures
+        match self.0.checked_sub(other.0) {
+            Some(duration) => Ok(duration),
+            None => Err(other.0 - self.0),
+        }
     }
 
-    pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
         Some(SystemTime(self.0.checked_add(*other)?))
     }
 
-    pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
         Some(SystemTime(self.0.checked_sub(*other)?))
     }
 }
diff --git a/library/std/src/sys/pal/solid/time.rs b/library/std/src/sys/pal/solid/time.rs
index c39d715c6a6..e35e60df1a0 100644
--- a/library/std/src/sys/pal/solid/time.rs
+++ b/library/std/src/sys/pal/solid/time.rs
@@ -39,7 +39,8 @@ impl SystemTime {
         Self(t)
     }
 
-    pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
         if self.0 >= other.0 {
             Ok(Duration::from_secs((self.0 as u64).wrapping_sub(other.0 as u64)))
         } else {
@@ -47,11 +48,13 @@ impl SystemTime {
         }
     }
 
-    pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
         Some(SystemTime(self.0.checked_add_unsigned(other.as_secs())?))
     }
 
-    pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
         Some(SystemTime(self.0.checked_sub_unsigned(other.as_secs())?))
     }
 }
diff --git a/library/std/src/sys/pal/teeos/thread.rs b/library/std/src/sys/pal/teeos/thread.rs
index a91d95626e7..1812d11e692 100644
--- a/library/std/src/sys/pal/teeos/thread.rs
+++ b/library/std/src/sys/pal/teeos/thread.rs
@@ -144,6 +144,10 @@ impl Drop for Thread {
     }
 }
 
+pub(crate) fn current_os_id() -> Option<u64> {
+    None
+}
+
 // Note: Both `sched_getaffinity` and `sysconf` are available but not functional on
 // teeos, so this function always returns an Error!
 pub fn available_parallelism() -> io::Result<NonZero<usize>> {
diff --git a/library/std/src/sys/pal/uefi/tests.rs b/library/std/src/sys/pal/uefi/tests.rs
index 38658cc4e9a..56ca999cc7e 100644
--- a/library/std/src/sys/pal/uefi/tests.rs
+++ b/library/std/src/sys/pal/uefi/tests.rs
@@ -1,7 +1,13 @@
+//! These tests are not run automatically right now. Please run these tests manually by copying them
+//! to a separate project when modifying any related code.
+
 use super::alloc::*;
-use super::time::*;
+use super::time::system_time_internal::{from_uefi, to_uefi};
+use crate::io::{IoSlice, IoSliceMut};
 use crate::time::Duration;
 
+const SECS_IN_MINUTE: u64 = 60;
+
 #[test]
 fn align() {
     // UEFI ABI specifies that allocation alignment minimum is always 8. So this can be
@@ -23,19 +29,177 @@ fn align() {
 }
 
 #[test]
-fn epoch() {
-    let t = r_efi::system::Time {
-        year: 1970,
+fn systemtime_start() {
+    let t = r_efi::efi::Time {
+        year: 1900,
         month: 1,
         day: 1,
         hour: 0,
         minute: 0,
         second: 0,
         nanosecond: 0,
-        timezone: r_efi::efi::UNSPECIFIED_TIMEZONE,
+        timezone: -1440,
         daylight: 0,
+        pad2: 0,
+    };
+    assert_eq!(from_uefi(&t), Duration::new(0, 0));
+    assert_eq!(t, to_uefi(&from_uefi(&t), -1440, 0).unwrap());
+    assert!(to_uefi(&from_uefi(&t), 0, 0).is_none());
+}
+
+#[test]
+fn systemtime_utc_start() {
+    let t = r_efi::efi::Time {
+        year: 1900,
+        month: 1,
+        day: 1,
+        hour: 0,
+        minute: 0,
+        second: 0,
         pad1: 0,
+        nanosecond: 0,
+        timezone: 0,
+        daylight: 0,
         pad2: 0,
     };
-    assert_eq!(system_time_internal::uefi_time_to_duration(t), Duration::new(0, 0));
+    assert_eq!(from_uefi(&t), Duration::new(1440 * SECS_IN_MINUTE, 0));
+    assert_eq!(t, to_uefi(&from_uefi(&t), 0, 0).unwrap());
+    assert!(to_uefi(&from_uefi(&t), -1440, 0).is_some());
+}
+
+#[test]
+fn systemtime_end() {
+    let t = r_efi::efi::Time {
+        year: 9999,
+        month: 12,
+        day: 31,
+        hour: 23,
+        minute: 59,
+        second: 59,
+        pad1: 0,
+        nanosecond: 0,
+        timezone: 1440,
+        daylight: 0,
+        pad2: 0,
+    };
+    assert!(to_uefi(&from_uefi(&t), 1440, 0).is_some());
+    assert!(to_uefi(&from_uefi(&t), 1439, 0).is_none());
+}
+
+// UEFI IoSlice and IoSliceMut Tests
+//
+// Strictly speaking, vectored read/write types for UDP4, UDP6, TCP4, TCP6 are defined
+// separately in the UEFI Spec. However, they have the same signature. These tests just ensure
+// that `IoSlice` and `IoSliceMut` are compatible with the vectored types for all the
+// networking protocols.
+
+unsafe fn to_slice<T>(val: &T) -> &[u8] {
+    let len = size_of_val(val);
+    unsafe { crate::slice::from_raw_parts(crate::ptr::from_ref(val).cast(), len) }
+}
+
+#[test]
+fn io_slice_single() {
+    let mut data = [0, 1, 2, 3, 4];
+
+    let tcp4_frag = r_efi::protocols::tcp4::FragmentData {
+        fragment_length: data.len().try_into().unwrap(),
+        fragment_buffer: data.as_mut_ptr().cast(),
+    };
+    let tcp6_frag = r_efi::protocols::tcp6::FragmentData {
+        fragment_length: data.len().try_into().unwrap(),
+        fragment_buffer: data.as_mut_ptr().cast(),
+    };
+    let udp4_frag = r_efi::protocols::udp4::FragmentData {
+        fragment_length: data.len().try_into().unwrap(),
+        fragment_buffer: data.as_mut_ptr().cast(),
+    };
+    let udp6_frag = r_efi::protocols::udp6::FragmentData {
+        fragment_length: data.len().try_into().unwrap(),
+        fragment_buffer: data.as_mut_ptr().cast(),
+    };
+    let io_slice = IoSlice::new(&data);
+
+    unsafe {
+        assert_eq!(to_slice(&io_slice), to_slice(&tcp4_frag));
+        assert_eq!(to_slice(&io_slice), to_slice(&tcp6_frag));
+        assert_eq!(to_slice(&io_slice), to_slice(&udp4_frag));
+        assert_eq!(to_slice(&io_slice), to_slice(&udp6_frag));
+    }
+}
+
+#[test]
+fn io_slice_mut_single() {
+    let mut data = [0, 1, 2, 3, 4];
+
+    let tcp4_frag = r_efi::protocols::tcp4::FragmentData {
+        fragment_length: data.len().try_into().unwrap(),
+        fragment_buffer: data.as_mut_ptr().cast(),
+    };
+    let tcp6_frag = r_efi::protocols::tcp6::FragmentData {
+        fragment_length: data.len().try_into().unwrap(),
+        fragment_buffer: data.as_mut_ptr().cast(),
+    };
+    let udp4_frag = r_efi::protocols::udp4::FragmentData {
+        fragment_length: data.len().try_into().unwrap(),
+        fragment_buffer: data.as_mut_ptr().cast(),
+    };
+    let udp6_frag = r_efi::protocols::udp6::FragmentData {
+        fragment_length: data.len().try_into().unwrap(),
+        fragment_buffer: data.as_mut_ptr().cast(),
+    };
+    let io_slice_mut = IoSliceMut::new(&mut data);
+
+    unsafe {
+        assert_eq!(to_slice(&io_slice_mut), to_slice(&tcp4_frag));
+        assert_eq!(to_slice(&io_slice_mut), to_slice(&tcp6_frag));
+        assert_eq!(to_slice(&io_slice_mut), to_slice(&udp4_frag));
+        assert_eq!(to_slice(&io_slice_mut), to_slice(&udp6_frag));
+    }
+}
+
+#[test]
+fn io_slice_multi() {
+    let mut data = [0, 1, 2, 3, 4];
+
+    let tcp4_frag = r_efi::protocols::tcp4::FragmentData {
+        fragment_length: data.len().try_into().unwrap(),
+        fragment_buffer: data.as_mut_ptr().cast(),
+    };
+    let rhs =
+        [tcp4_frag.clone(), tcp4_frag.clone(), tcp4_frag.clone(), tcp4_frag.clone(), tcp4_frag];
+    let lhs = [
+        IoSlice::new(&data),
+        IoSlice::new(&data),
+        IoSlice::new(&data),
+        IoSlice::new(&data),
+        IoSlice::new(&data),
+    ];
+
+    unsafe {
+        assert_eq!(to_slice(&lhs), to_slice(&rhs));
+    }
+}
+
+#[test]
+fn io_slice_basic() {
+    let data = [0, 1, 2, 3, 4];
+    let mut io_slice = IoSlice::new(&data);
+
+    assert_eq!(data, io_slice.as_slice());
+    io_slice.advance(2);
+    assert_eq!(&data[2..], io_slice.as_slice());
+}
+
+#[test]
+fn io_slice_mut_basic() {
+    let data = [0, 1, 2, 3, 4];
+    let mut data_clone = [0, 1, 2, 3, 4];
+    let mut io_slice_mut = IoSliceMut::new(&mut data_clone);
+
+    assert_eq!(data, io_slice_mut.as_slice());
+    assert_eq!(data, io_slice_mut.as_mut_slice());
+
+    io_slice_mut.advance(2);
+    assert_eq!(&data[2..], io_slice_mut.into_slice());
 }
diff --git a/library/std/src/sys/pal/uefi/thread.rs b/library/std/src/sys/pal/uefi/thread.rs
index 75c364362b2..47a48008c76 100644
--- a/library/std/src/sys/pal/uefi/thread.rs
+++ b/library/std/src/sys/pal/uefi/thread.rs
@@ -56,6 +56,10 @@ impl Thread {
     }
 }
 
+pub(crate) fn current_os_id() -> Option<u64> {
+    None
+}
+
 pub fn available_parallelism() -> io::Result<NonZero<usize>> {
     // UEFI is single threaded
     Ok(NonZero::new(1).unwrap())
diff --git a/library/std/src/sys/pal/uefi/time.rs b/library/std/src/sys/pal/uefi/time.rs
index eeb2c35ffbb..df5611b2ddd 100644
--- a/library/std/src/sys/pal/uefi/time.rs
+++ b/library/std/src/sys/pal/uefi/time.rs
@@ -1,16 +1,42 @@
 use crate::time::Duration;
 
-const SECS_IN_MINUTE: u64 = 60;
-const SECS_IN_HOUR: u64 = SECS_IN_MINUTE * 60;
-const SECS_IN_DAY: u64 = SECS_IN_HOUR * 24;
-
 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
 pub struct Instant(Duration);
 
+/// When a Timezone is specified, the stored Duration is in UTC. If timezone is unspecified, then
+/// the timezone is assumed to be in UTC.
+///
+/// UEFI SystemTime is stored as Duration from 1900-01-01-00:00:00 with timezone -1440 as anchor
 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
 pub struct SystemTime(Duration);
 
-pub const UNIX_EPOCH: SystemTime = SystemTime(Duration::from_secs(0));
+pub const UNIX_EPOCH: SystemTime = SystemTime::from_uefi(r_efi::efi::Time {
+    year: 1970,
+    month: 1,
+    day: 1,
+    hour: 0,
+    minute: 0,
+    second: 0,
+    nanosecond: 0,
+    timezone: 0,
+    daylight: 0,
+    pad1: 0,
+    pad2: 0,
+});
+
+const MAX_UEFI_TIME: SystemTime = SystemTime::from_uefi(r_efi::efi::Time {
+    year: 9999,
+    month: 12,
+    day: 31,
+    hour: 23,
+    minute: 59,
+    second: 59,
+    nanosecond: 999_999_999,
+    timezone: 1440,
+    daylight: 0,
+    pad1: 0,
+    pad2: 0,
+});
 
 impl Instant {
     pub fn now() -> Instant {
@@ -40,20 +66,45 @@ impl Instant {
 }
 
 impl SystemTime {
+    pub(crate) const fn from_uefi(t: r_efi::efi::Time) -> Self {
+        Self(system_time_internal::from_uefi(&t))
+    }
+
+    #[expect(dead_code)]
+    pub(crate) const fn to_uefi(self, timezone: i16, daylight: u8) -> Option<r_efi::efi::Time> {
+        system_time_internal::to_uefi(&self.0, timezone, daylight)
+    }
+
     pub fn now() -> SystemTime {
         system_time_internal::now()
             .unwrap_or_else(|| panic!("time not implemented on this platform"))
     }
 
-    pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
-        self.0.checked_sub(other.0).ok_or_else(|| other.0 - self.0)
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
+        // FIXME: ok_or_else with const closures
+        match self.0.checked_sub(other.0) {
+            Some(duration) => Ok(duration),
+            None => Err(other.0 - self.0),
+        }
     }
 
-    pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
-        Some(SystemTime(self.0.checked_add(*other)?))
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
+        let temp = self.0.checked_add(*other)?;
+
+        // Check if can be represented in UEFI
+        // FIXME: const PartialOrd
+        let mut cmp = temp.as_secs() - MAX_UEFI_TIME.0.as_secs();
+        if cmp == 0 {
+            cmp = temp.subsec_nanos() as u64 - MAX_UEFI_TIME.0.subsec_nanos() as u64;
+        }
+
+        if cmp <= 0 { Some(SystemTime(temp)) } else { None }
     }
 
-    pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
         Some(SystemTime(self.0.checked_sub(*other)?))
     }
 }
@@ -66,51 +117,132 @@ pub(crate) mod system_time_internal {
     use crate::mem::MaybeUninit;
     use crate::ptr::NonNull;
 
+    const SECS_IN_MINUTE: u64 = 60;
+    const SECS_IN_HOUR: u64 = SECS_IN_MINUTE * 60;
+    const SECS_IN_DAY: u64 = SECS_IN_HOUR * 24;
+    const TIMEZONE_DELTA: u64 = 1440 * SECS_IN_MINUTE;
+
     pub fn now() -> Option<SystemTime> {
         let runtime_services: NonNull<RuntimeServices> = helpers::runtime_services()?;
         let mut t: MaybeUninit<Time> = MaybeUninit::uninit();
         let r = unsafe {
             ((*runtime_services.as_ptr()).get_time)(t.as_mut_ptr(), crate::ptr::null_mut())
         };
-
         if r.is_error() {
             return None;
         }
 
         let t = unsafe { t.assume_init() };
 
-        Some(SystemTime(uefi_time_to_duration(t)))
+        Some(SystemTime::from_uefi(t))
     }
 
-    // This algorithm is based on the one described in the post
-    // https://blog.reverberate.org/2020/05/12/optimizing-date-algorithms.html
-    pub(crate) const fn uefi_time_to_duration(t: r_efi::system::Time) -> Duration {
-        assert!(t.month <= 12);
-        assert!(t.month != 0);
+    /// This algorithm is a modified form of the one described in the post
+    /// https://blog.reverberate.org/2020/05/12/optimizing-date-algorithms.html
+    ///
+    /// The changes are to use 1900-01-01-00:00:00 with timezone -1440 as anchor instead of UNIX
+    /// epoch used in the original algorithm.
+    pub(crate) const fn from_uefi(t: &Time) -> Duration {
+        assert!(t.month <= 12 && t.month != 0);
+        assert!(t.year >= 1900 && t.year <= 9999);
+        assert!(t.day <= 31 && t.day != 0);
+
+        assert!(t.second < 60);
+        assert!(t.minute < 60);
+        assert!(t.hour < 24);
+        assert!(t.nanosecond < 1_000_000_000);
+
+        assert!(
+            (t.timezone <= 1440 && t.timezone >= -1440)
+                || t.timezone == r_efi::efi::UNSPECIFIED_TIMEZONE
+        );
 
         const YEAR_BASE: u32 = 4800; /* Before min year, multiple of 400. */
 
-        // Calculate the number of days since 1/1/1970
+        // Calculate the number of days since 1/1/1900. This is the earliest supported date in UEFI
+        // time.
         // Use 1 March as the start
         let (m_adj, overflow): (u32, bool) = (t.month as u32).overflowing_sub(3);
         let (carry, adjust): (u32, u32) = if overflow { (1, 12) } else { (0, 0) };
         let y_adj: u32 = (t.year as u32) + YEAR_BASE - carry;
         let month_days: u32 = (m_adj.wrapping_add(adjust) * 62719 + 769) / 2048;
         let leap_days: u32 = y_adj / 4 - y_adj / 100 + y_adj / 400;
-        let days: u32 = y_adj * 365 + leap_days + month_days + (t.day as u32 - 1) - 2472632;
+        let days: u32 = y_adj * 365 + leap_days + month_days + (t.day as u32 - 1) - 2447065;
 
         let localtime_epoch: u64 = (days as u64) * SECS_IN_DAY
             + (t.second as u64)
             + (t.minute as u64) * SECS_IN_MINUTE
             + (t.hour as u64) * SECS_IN_HOUR;
 
-        let utc_epoch: u64 = if t.timezone == r_efi::efi::UNSPECIFIED_TIMEZONE {
-            localtime_epoch
+        // Calculate the offset from 1/1/1900 at timezone -1440 min
+        let adjusted_localtime_epoc: u64 = localtime_epoch + TIMEZONE_DELTA;
+
+        let epoch: u64 = if t.timezone == r_efi::efi::UNSPECIFIED_TIMEZONE {
+            adjusted_localtime_epoc
         } else {
-            (localtime_epoch as i64 + (t.timezone as i64) * SECS_IN_MINUTE as i64) as u64
+            adjusted_localtime_epoc
+                .checked_add_signed((t.timezone as i64) * SECS_IN_MINUTE as i64)
+                .unwrap()
         };
 
-        Duration::new(utc_epoch, t.nanosecond)
+        Duration::new(epoch, t.nanosecond)
+    }
+
+    /// This algorithm is a modifed version of the one described in the post:
+    /// https://howardhinnant.github.io/date_algorithms.html#clive_from_days
+    ///
+    /// The changes are to use 1900-01-01-00:00:00 with timezone -1440 as anchor instead of UNIX
+    /// epoch used in the original algorithm.
+    pub(crate) const fn to_uefi(dur: &Duration, timezone: i16, daylight: u8) -> Option<Time> {
+        // Check timzone validity
+        assert!(timezone <= 1440 && timezone >= -1440);
+
+        // FIXME(#126043): use checked_sub_signed once stablized
+        let secs =
+            dur.as_secs().checked_add_signed((-timezone as i64) * SECS_IN_MINUTE as i64).unwrap();
+
+        // Convert to seconds since 1900-01-01-00:00:00 in timezone.
+        let Some(secs) = secs.checked_sub(TIMEZONE_DELTA) else { return None };
+
+        let days = secs / SECS_IN_DAY;
+        let remaining_secs = secs % SECS_IN_DAY;
+
+        let z = days + 693901;
+        let era = z / 146097;
+        let doe = z - (era * 146097);
+        let yoe = (doe - doe / 1460 + doe / 36524 - doe / 146096) / 365;
+        let mut y = yoe + era * 400;
+        let doy = doe - (365 * yoe + yoe / 4 - yoe / 100);
+        let mp = (5 * doy + 2) / 153;
+        let d = doy - (153 * mp + 2) / 5 + 1;
+        let m = if mp < 10 { mp + 3 } else { mp - 9 };
+
+        if m <= 2 {
+            y += 1;
+        }
+
+        let hour = (remaining_secs / SECS_IN_HOUR) as u8;
+        let minute = ((remaining_secs % SECS_IN_HOUR) / SECS_IN_MINUTE) as u8;
+        let second = (remaining_secs % SECS_IN_MINUTE) as u8;
+
+        // Check Bounds
+        if y >= 1900 && y <= 9999 {
+            Some(Time {
+                year: y as u16,
+                month: m as u8,
+                day: d as u8,
+                hour,
+                minute,
+                second,
+                nanosecond: dur.subsec_nanos(),
+                timezone,
+                daylight,
+                pad1: 0,
+                pad2: 0,
+            })
+        } else {
+            None
+        }
     }
 }
 
diff --git a/library/std/src/sys/pal/unix/stack_overflow.rs b/library/std/src/sys/pal/unix/stack_overflow.rs
index d89100e6919..0d2100d66bc 100644
--- a/library/std/src/sys/pal/unix/stack_overflow.rs
+++ b/library/std/src/sys/pal/unix/stack_overflow.rs
@@ -119,7 +119,8 @@ mod imp {
                     && thread_info.guard_page_range.contains(&fault_addr)
                 {
                     let name = thread_info.thread_name.as_deref().unwrap_or("<unknown>");
-                    rtprintpanic!("\nthread '{name}' has overflowed its stack\n");
+                    let tid = crate::thread::current_os_id();
+                    rtprintpanic!("\nthread '{name}' ({tid}) has overflowed its stack\n");
                     rtabort!("stack overflow");
                 }
             })
@@ -696,7 +697,8 @@ mod imp {
             if code == c::EXCEPTION_STACK_OVERFLOW {
                 crate::thread::with_current_name(|name| {
                     let name = name.unwrap_or("<unknown>");
-                    rtprintpanic!("\nthread '{name}' has overflowed its stack\n");
+                    let tid = crate::thread::current_os_id();
+                    rtprintpanic!("\nthread '{name}' ({tid}) has overflowed its stack\n");
                 });
             }
             c::EXCEPTION_CONTINUE_SEARCH
diff --git a/library/std/src/sys/pal/unix/thread.rs b/library/std/src/sys/pal/unix/thread.rs
index 7f6440152d4..36e53e7cadc 100644
--- a/library/std/src/sys/pal/unix/thread.rs
+++ b/library/std/src/sys/pal/unix/thread.rs
@@ -398,6 +398,62 @@ impl Drop for Thread {
     }
 }
 
+pub(crate) fn current_os_id() -> Option<u64> {
+    // Most Unix platforms have a way to query an integer ID of the current thread, all with
+    // slightly different spellings.
+    //
+    // The OS thread ID is used rather than `pthread_self` so as to match what will be displayed
+    // for process inspection (debuggers, trace, `top`, etc.).
+    cfg_if::cfg_if! {
+        // Most platforms have a function returning a `pid_t` or int, which is an `i32`.
+        if #[cfg(any(target_os = "android", target_os = "linux"))] {
+            use crate::sys::weak::syscall;
+
+            // `libc::gettid` is only available on glibc 2.30+, but the syscall is available
+            // since Linux 2.4.11.
+            syscall!(fn gettid() -> libc::pid_t;);
+
+            // SAFETY: FFI call with no preconditions.
+            let id: libc::pid_t = unsafe { gettid() };
+            Some(id as u64)
+        } else if #[cfg(target_os = "nto")] {
+            // SAFETY: FFI call with no preconditions.
+            let id: libc::pid_t = unsafe { libc::gettid() };
+            Some(id as u64)
+        } else if #[cfg(target_os = "openbsd")] {
+            // SAFETY: FFI call with no preconditions.
+            let id: libc::pid_t = unsafe { libc::getthrid() };
+            Some(id as u64)
+        } else if #[cfg(target_os = "freebsd")] {
+            // SAFETY: FFI call with no preconditions.
+            let id: libc::c_int = unsafe { libc::pthread_getthreadid_np() };
+            Some(id as u64)
+        } else if #[cfg(target_os = "netbsd")] {
+            // SAFETY: FFI call with no preconditions.
+            let id: libc::lwpid_t = unsafe { libc::_lwp_self() };
+            Some(id as u64)
+        } else if #[cfg(any(target_os = "illumos", target_os = "solaris"))] {
+            // On Illumos and Solaris, the `pthread_t` is the same as the OS thread ID.
+            // SAFETY: FFI call with no preconditions.
+            let id: libc::pthread_t = unsafe { libc::pthread_self() };
+            Some(id as u64)
+        } else if #[cfg(target_vendor = "apple")] {
+            // Apple allows querying arbitrary thread IDs, `thread=NULL` queries the current thread.
+            let mut id = 0u64;
+            // SAFETY: `thread_id` is a valid pointer, no other preconditions.
+            let status: libc::c_int = unsafe { libc::pthread_threadid_np(0, &mut id) };
+            if status == 0 {
+                Some(id)
+            } else {
+                None
+            }
+        } else {
+            // Other platforms don't have an OS thread ID or don't have a way to access it.
+            None
+        }
+    }
+}
+
 #[cfg(any(
     target_os = "linux",
     target_os = "nto",
diff --git a/library/std/src/sys/pal/unix/time.rs b/library/std/src/sys/pal/unix/time.rs
index bd7f74fea6a..328fe0bc960 100644
--- a/library/std/src/sys/pal/unix/time.rs
+++ b/library/std/src/sys/pal/unix/time.rs
@@ -38,15 +38,18 @@ impl SystemTime {
         SystemTime { t: Timespec::now(libc::CLOCK_REALTIME) }
     }
 
-    pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
         self.t.sub_timespec(&other.t)
     }
 
-    pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
         Some(SystemTime { t: self.t.checked_add_duration(other)? })
     }
 
-    pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
         Some(SystemTime { t: self.t.checked_sub_duration(other)? })
     }
 }
@@ -133,8 +136,15 @@ impl Timespec {
         Timespec::new(t.tv_sec as i64, t.tv_nsec as i64).unwrap()
     }
 
-    pub fn sub_timespec(&self, other: &Timespec) -> Result<Duration, Duration> {
-        if self >= other {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn sub_timespec(&self, other: &Timespec) -> Result<Duration, Duration> {
+        // FIXME: const PartialOrd
+        let mut cmp = self.tv_sec - other.tv_sec;
+        if cmp == 0 {
+            cmp = self.tv_nsec.as_inner() as i64 - other.tv_nsec.as_inner() as i64;
+        }
+
+        if cmp >= 0 {
             // NOTE(eddyb) two aspects of this `if`-`else` are required for LLVM
             // to optimize it into a branchless form (see also #75545):
             //
@@ -169,7 +179,8 @@ impl Timespec {
         }
     }
 
-    pub fn checked_add_duration(&self, other: &Duration) -> Option<Timespec> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_add_duration(&self, other: &Duration) -> Option<Timespec> {
         let mut secs = self.tv_sec.checked_add_unsigned(other.as_secs())?;
 
         // Nano calculations can't overflow because nanos are <1B which fit
@@ -179,10 +190,11 @@ impl Timespec {
             nsec -= NSEC_PER_SEC as u32;
             secs = secs.checked_add(1)?;
         }
-        Some(unsafe { Timespec::new_unchecked(secs, nsec.into()) })
+        Some(unsafe { Timespec::new_unchecked(secs, nsec as i64) })
     }
 
-    pub fn checked_sub_duration(&self, other: &Duration) -> Option<Timespec> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_sub_duration(&self, other: &Duration) -> Option<Timespec> {
         let mut secs = self.tv_sec.checked_sub_unsigned(other.as_secs())?;
 
         // Similar to above, nanos can't overflow.
@@ -191,7 +203,7 @@ impl Timespec {
             nsec += NSEC_PER_SEC as i32;
             secs = secs.checked_sub(1)?;
         }
-        Some(unsafe { Timespec::new_unchecked(secs, nsec.into()) })
+        Some(unsafe { Timespec::new_unchecked(secs, nsec as i64) })
     }
 
     #[allow(dead_code)]
diff --git a/library/std/src/sys/pal/unsupported/thread.rs b/library/std/src/sys/pal/unsupported/thread.rs
index 5a1e3fde986..34d9b5ec70c 100644
--- a/library/std/src/sys/pal/unsupported/thread.rs
+++ b/library/std/src/sys/pal/unsupported/thread.rs
@@ -39,6 +39,10 @@ impl Thread {
     }
 }
 
+pub(crate) fn current_os_id() -> Option<u64> {
+    None
+}
+
 pub fn available_parallelism() -> io::Result<NonZero<usize>> {
     unsupported()
 }
diff --git a/library/std/src/sys/pal/unsupported/time.rs b/library/std/src/sys/pal/unsupported/time.rs
index 6d67b538a96..0c387917044 100644
--- a/library/std/src/sys/pal/unsupported/time.rs
+++ b/library/std/src/sys/pal/unsupported/time.rs
@@ -31,15 +31,22 @@ impl SystemTime {
         panic!("time not implemented on this platform")
     }
 
-    pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
-        self.0.checked_sub(other.0).ok_or_else(|| other.0 - self.0)
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
+        // FIXME: ok_or_else with const closures
+        match self.0.checked_sub(other.0) {
+            Some(duration) => Ok(duration),
+            None => Err(other.0 - self.0),
+        }
     }
 
-    pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
         Some(SystemTime(self.0.checked_add(*other)?))
     }
 
-    pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
         Some(SystemTime(self.0.checked_sub(*other)?))
     }
 }
diff --git a/library/std/src/sys/pal/wasi/thread.rs b/library/std/src/sys/pal/wasi/thread.rs
index a46c74630c9..4755e2ef5da 100644
--- a/library/std/src/sys/pal/wasi/thread.rs
+++ b/library/std/src/sys/pal/wasi/thread.rs
@@ -194,6 +194,10 @@ impl Thread {
     }
 }
 
+pub(crate) fn current_os_id() -> Option<u64> {
+    None
+}
+
 pub fn available_parallelism() -> io::Result<NonZero<usize>> {
     cfg_if::cfg_if! {
         if #[cfg(target_feature = "atomics")] {
diff --git a/library/std/src/sys/pal/wasi/time.rs b/library/std/src/sys/pal/wasi/time.rs
index 0d8d0b59ac1..892661b312b 100644
--- a/library/std/src/sys/pal/wasi/time.rs
+++ b/library/std/src/sys/pal/wasi/time.rs
@@ -43,23 +43,34 @@ impl SystemTime {
         SystemTime(current_time(wasi::CLOCKID_REALTIME))
     }
 
-    pub fn from_wasi_timestamp(ts: wasi::Timestamp) -> SystemTime {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn from_wasi_timestamp(ts: wasi::Timestamp) -> SystemTime {
         SystemTime(Duration::from_nanos(ts))
     }
 
-    pub fn to_wasi_timestamp(&self) -> Option<wasi::Timestamp> {
-        self.0.as_nanos().try_into().ok()
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn to_wasi_timestamp(&self) -> Option<wasi::Timestamp> {
+        // FIXME: const TryInto
+        let ns = self.0.as_nanos();
+        if ns <= u64::MAX as u128 { Some(ns as u64) } else { None }
     }
 
-    pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
-        self.0.checked_sub(other.0).ok_or_else(|| other.0 - self.0)
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
+        // FIXME: ok_or_else with const closures
+        match self.0.checked_sub(other.0) {
+            Some(duration) => Ok(duration),
+            None => Err(other.0 - self.0),
+        }
     }
 
-    pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
         Some(SystemTime(self.0.checked_add(*other)?))
     }
 
-    pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
         Some(SystemTime(self.0.checked_sub(*other)?))
     }
 }
diff --git a/library/std/src/sys/pal/wasm/atomics/thread.rs b/library/std/src/sys/pal/wasm/atomics/thread.rs
index ebfabaafc79..42a7dbdf8b8 100644
--- a/library/std/src/sys/pal/wasm/atomics/thread.rs
+++ b/library/std/src/sys/pal/wasm/atomics/thread.rs
@@ -56,6 +56,10 @@ impl Thread {
     pub fn join(self) {}
 }
 
+pub(crate) fn current_os_id() -> Option<u64> {
+    None
+}
+
 pub fn available_parallelism() -> io::Result<NonZero<usize>> {
     unsupported()
 }
diff --git a/library/std/src/sys/pal/windows/c/bindings.txt b/library/std/src/sys/pal/windows/c/bindings.txt
index 827d96e73db..c8e4dca4781 100644
--- a/library/std/src/sys/pal/windows/c/bindings.txt
+++ b/library/std/src/sys/pal/windows/c/bindings.txt
@@ -2185,6 +2185,7 @@ GetSystemInfo
 GetSystemTimeAsFileTime
 GetSystemTimePreciseAsFileTime
 GetTempPathW
+GetThreadId
 GetUserProfileDirectoryW
 GetWindowsDirectoryW
 HANDLE
diff --git a/library/std/src/sys/pal/windows/c/windows_sys.rs b/library/std/src/sys/pal/windows/c/windows_sys.rs
index b2e3aabc633..45a273d241a 100644
--- a/library/std/src/sys/pal/windows/c/windows_sys.rs
+++ b/library/std/src/sys/pal/windows/c/windows_sys.rs
@@ -61,6 +61,7 @@ windows_targets::link!("kernel32.dll" "system" fn GetSystemInfo(lpsysteminfo : *
 windows_targets::link!("kernel32.dll" "system" fn GetSystemTimeAsFileTime(lpsystemtimeasfiletime : *mut FILETIME));
 windows_targets::link!("kernel32.dll" "system" fn GetSystemTimePreciseAsFileTime(lpsystemtimeasfiletime : *mut FILETIME));
 windows_targets::link!("kernel32.dll" "system" fn GetTempPathW(nbufferlength : u32, lpbuffer : PWSTR) -> u32);
+windows_targets::link!("kernel32.dll" "system" fn GetThreadId(thread : HANDLE) -> u32);
 windows_targets::link!("userenv.dll" "system" fn GetUserProfileDirectoryW(htoken : HANDLE, lpprofiledir : PWSTR, lpcchsize : *mut u32) -> BOOL);
 windows_targets::link!("kernel32.dll" "system" fn GetWindowsDirectoryW(lpbuffer : PWSTR, usize : u32) -> u32);
 windows_targets::link!("kernel32.dll" "system" fn InitOnceBeginInitialize(lpinitonce : *mut INIT_ONCE, dwflags : u32, fpending : *mut BOOL, lpcontext : *mut *mut core::ffi::c_void) -> BOOL);
diff --git a/library/std/src/sys/pal/windows/stack_overflow.rs b/library/std/src/sys/pal/windows/stack_overflow.rs
index 734cd30bed0..9a40551b985 100644
--- a/library/std/src/sys/pal/windows/stack_overflow.rs
+++ b/library/std/src/sys/pal/windows/stack_overflow.rs
@@ -20,7 +20,8 @@ unsafe extern "system" fn vectored_handler(ExceptionInfo: *mut c::EXCEPTION_POIN
         if code == c::EXCEPTION_STACK_OVERFLOW {
             thread::with_current_name(|name| {
                 let name = name.unwrap_or("<unknown>");
-                rtprintpanic!("\nthread '{name}' has overflowed its stack\n");
+                let tid = thread::current_os_id();
+                rtprintpanic!("\nthread '{name}' ({tid}) has overflowed its stack\n");
             });
         }
         c::EXCEPTION_CONTINUE_SEARCH
diff --git a/library/std/src/sys/pal/windows/thread.rs b/library/std/src/sys/pal/windows/thread.rs
index b45f76fb546..c708da5af12 100644
--- a/library/std/src/sys/pal/windows/thread.rs
+++ b/library/std/src/sys/pal/windows/thread.rs
@@ -127,6 +127,14 @@ impl Thread {
     }
 }
 
+pub(crate) fn current_os_id() -> Option<u64> {
+    // SAFETY: FFI call with no preconditions.
+    let id: u32 = unsafe { c::GetThreadId(c::GetCurrentThread()) };
+
+    // A return value of 0 indicates failed lookup.
+    if id == 0 { None } else { Some(id.into()) }
+}
+
 pub fn available_parallelism() -> io::Result<NonZero<usize>> {
     let res = unsafe {
         let mut sysinfo: c::SYSTEM_INFO = crate::mem::zeroed();
diff --git a/library/std/src/sys/pal/windows/time.rs b/library/std/src/sys/pal/windows/time.rs
index 68126bd8d2f..a948c07e0a3 100644
--- a/library/std/src/sys/pal/windows/time.rs
+++ b/library/std/src/sys/pal/windows/time.rs
@@ -72,7 +72,8 @@ impl SystemTime {
         }
     }
 
-    fn from_intervals(intervals: i64) -> SystemTime {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    const fn from_intervals(intervals: i64) -> SystemTime {
         SystemTime {
             t: c::FILETIME {
                 dwLowDateTime: intervals as u32,
@@ -81,11 +82,13 @@ impl SystemTime {
         }
     }
 
-    fn intervals(&self) -> i64 {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    const fn intervals(&self) -> i64 {
         (self.t.dwLowDateTime as i64) | ((self.t.dwHighDateTime as i64) << 32)
     }
 
-    pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
         let me = self.intervals();
         let other = other.intervals();
         if me >= other {
@@ -95,12 +98,14 @@ impl SystemTime {
         }
     }
 
-    pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
         let intervals = self.intervals().checked_add(checked_dur2intervals(other)?)?;
         Some(SystemTime::from_intervals(intervals))
     }
 
-    pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
         let intervals = self.intervals().checked_sub(checked_dur2intervals(other)?)?;
         Some(SystemTime::from_intervals(intervals))
     }
@@ -150,15 +155,18 @@ impl Hash for SystemTime {
     }
 }
 
-fn checked_dur2intervals(dur: &Duration) -> Option<i64> {
-    dur.as_secs()
+#[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+const fn checked_dur2intervals(dur: &Duration) -> Option<i64> {
+    // FIXME: const TryInto
+    let secs = dur
+        .as_secs()
         .checked_mul(INTERVALS_PER_SEC)?
-        .checked_add(dur.subsec_nanos() as u64 / 100)?
-        .try_into()
-        .ok()
+        .checked_add(dur.subsec_nanos() as u64 / 100)?;
+    if secs <= i64::MAX as u64 { Some(secs.cast_signed()) } else { None }
 }
 
-fn intervals2dur(intervals: u64) -> Duration {
+#[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+const fn intervals2dur(intervals: u64) -> Duration {
     Duration::new(intervals / INTERVALS_PER_SEC, ((intervals % INTERVALS_PER_SEC) * 100) as u32)
 }
 
diff --git a/library/std/src/sys/pal/xous/thread.rs b/library/std/src/sys/pal/xous/thread.rs
index f2404a62abf..92803c94c6e 100644
--- a/library/std/src/sys/pal/xous/thread.rs
+++ b/library/std/src/sys/pal/xous/thread.rs
@@ -145,6 +145,10 @@ impl Thread {
     }
 }
 
+pub(crate) fn current_os_id() -> Option<u64> {
+    None
+}
+
 pub fn available_parallelism() -> io::Result<NonZero<usize>> {
     // We're unicore right now.
     Ok(unsafe { NonZero::new_unchecked(1) })
diff --git a/library/std/src/sys/pal/xous/time.rs b/library/std/src/sys/pal/xous/time.rs
index ae8be81c0b7..d737416436e 100644
--- a/library/std/src/sys/pal/xous/time.rs
+++ b/library/std/src/sys/pal/xous/time.rs
@@ -43,15 +43,22 @@ impl SystemTime {
         SystemTime { 0: Duration::from_millis((upper as u64) << 32 | lower as u64) }
     }
 
-    pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
-        self.0.checked_sub(other.0).ok_or_else(|| other.0 - self.0)
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
+        // FIXME: ok_or_else with const closures
+        match self.0.checked_sub(other.0) {
+            Some(duration) => Ok(duration),
+            None => Err(other.0 - self.0),
+        }
     }
 
-    pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
         Some(SystemTime(self.0.checked_add(*other)?))
     }
 
-    pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
         Some(SystemTime(self.0.checked_sub(*other)?))
     }
 }
diff --git a/library/std/src/sys/process/windows.rs b/library/std/src/sys/process/windows.rs
index 1ee3fbd285f..f9e15b82475 100644
--- a/library/std/src/sys/process/windows.rs
+++ b/library/std/src/sys/process/windows.rs
@@ -623,16 +623,10 @@ impl Stdio {
             // permissions as well as the ability to be inherited to child
             // processes (as this is about to be inherited).
             Stdio::Null => {
-                let size = size_of::<c::SECURITY_ATTRIBUTES>();
-                let mut sa = c::SECURITY_ATTRIBUTES {
-                    nLength: size as u32,
-                    lpSecurityDescriptor: ptr::null_mut(),
-                    bInheritHandle: 1,
-                };
                 let mut opts = OpenOptions::new();
                 opts.read(stdio_id == c::STD_INPUT_HANDLE);
                 opts.write(stdio_id != c::STD_INPUT_HANDLE);
-                opts.security_attributes(&mut sa);
+                opts.inherit_handle(true);
                 File::open(Path::new(r"\\.\NUL"), &opts).map(|file| file.into_inner())
             }
         }
diff --git a/library/std/src/thread/current.rs b/library/std/src/thread/current.rs
index 414711298f0..5c879903526 100644
--- a/library/std/src/thread/current.rs
+++ b/library/std/src/thread/current.rs
@@ -1,4 +1,4 @@
-use super::{Thread, ThreadId};
+use super::{Thread, ThreadId, imp};
 use crate::mem::ManuallyDrop;
 use crate::ptr;
 use crate::sys::thread_local::local_pointer;
@@ -148,6 +148,17 @@ pub(crate) fn current_id() -> ThreadId {
     id::get_or_init()
 }
 
+/// Gets the OS thread ID of the thread that invokes it, if available. If not, return the Rust
+/// thread ID.
+///
+/// We use a `u64` to all possible platform IDs without excess `cfg`; most use `int`, some use a
+/// pointer, and Apple uses `uint64_t`. This is a "best effort" approach for diagnostics and is
+/// allowed to fall back to a non-OS ID (such as the Rust thread ID) or a non-unique ID (such as a
+/// PID) if the thread ID cannot be retrieved.
+pub(crate) fn current_os_id() -> u64 {
+    imp::current_os_id().unwrap_or_else(|| current_id().as_u64().get())
+}
+
 /// Gets a reference to the handle of the thread that invokes it, if the handle
 /// has been initialized.
 pub(super) fn try_with_current<F, R>(f: F) -> R
diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs
index 8d3ab82ace1..292323d0118 100644
--- a/library/std/src/thread/mod.rs
+++ b/library/std/src/thread/mod.rs
@@ -183,7 +183,7 @@ mod current;
 
 #[stable(feature = "rust1", since = "1.0.0")]
 pub use current::current;
-pub(crate) use current::{current_id, current_or_unnamed, drop_current};
+pub(crate) use current::{current_id, current_or_unnamed, current_os_id, drop_current};
 use current::{set_current, try_with_current};
 
 mod spawnhook;
diff --git a/library/std/src/thread/tests.rs b/library/std/src/thread/tests.rs
index 59ec48a57d1..ae889f1e778 100644
--- a/library/std/src/thread/tests.rs
+++ b/library/std/src/thread/tests.rs
@@ -347,6 +347,13 @@ fn test_thread_id_not_equal() {
 }
 
 #[test]
+fn test_thread_os_id_not_equal() {
+    let spawned_id = thread::spawn(|| thread::current_os_id()).join().unwrap();
+    let current_id = thread::current_os_id();
+    assert!(current_id != spawned_id);
+}
+
+#[test]
 fn test_scoped_threads_drop_result_before_join() {
     let actually_finished = &AtomicBool::new(false);
     struct X<'scope, 'env>(&'scope Scope<'scope, 'env>, &'env AtomicBool);
diff --git a/library/std/src/time.rs b/library/std/src/time.rs
index cd0683f44c9..07bb41f1496 100644
--- a/library/std/src/time.rs
+++ b/library/std/src/time.rs
@@ -551,8 +551,13 @@ impl SystemTime {
     /// println!("{difference:?}");
     /// ```
     #[stable(feature = "time2", since = "1.8.0")]
-    pub fn duration_since(&self, earlier: SystemTime) -> Result<Duration, SystemTimeError> {
-        self.0.sub_time(&earlier.0).map_err(SystemTimeError)
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn duration_since(&self, earlier: SystemTime) -> Result<Duration, SystemTimeError> {
+        // FIXME: map_err in const
+        match self.0.sub_time(&earlier.0) {
+            Ok(time) => Ok(time),
+            Err(err) => Err(SystemTimeError(err)),
+        }
     }
 
     /// Returns the difference from this system time to the
@@ -589,7 +594,8 @@ impl SystemTime {
     /// `SystemTime` (which means it's inside the bounds of the underlying data structure), `None`
     /// otherwise.
     #[stable(feature = "time_checked_add", since = "1.34.0")]
-    pub fn checked_add(&self, duration: Duration) -> Option<SystemTime> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_add(&self, duration: Duration) -> Option<SystemTime> {
         self.0.checked_add_duration(&duration).map(SystemTime)
     }
 
@@ -597,13 +603,15 @@ impl SystemTime {
     /// `SystemTime` (which means it's inside the bounds of the underlying data structure), `None`
     /// otherwise.
     #[stable(feature = "time_checked_add", since = "1.34.0")]
-    pub fn checked_sub(&self, duration: Duration) -> Option<SystemTime> {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn checked_sub(&self, duration: Duration) -> Option<SystemTime> {
         self.0.checked_sub_duration(&duration).map(SystemTime)
     }
 }
 
 #[stable(feature = "time2", since = "1.8.0")]
-impl Add<Duration> for SystemTime {
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl const Add<Duration> for SystemTime {
     type Output = SystemTime;
 
     /// # Panics
@@ -616,14 +624,16 @@ impl Add<Duration> for SystemTime {
 }
 
 #[stable(feature = "time_augmented_assignment", since = "1.9.0")]
-impl AddAssign<Duration> for SystemTime {
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl const AddAssign<Duration> for SystemTime {
     fn add_assign(&mut self, other: Duration) {
         *self = *self + other;
     }
 }
 
 #[stable(feature = "time2", since = "1.8.0")]
-impl Sub<Duration> for SystemTime {
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl const Sub<Duration> for SystemTime {
     type Output = SystemTime;
 
     fn sub(self, dur: Duration) -> SystemTime {
@@ -632,7 +642,8 @@ impl Sub<Duration> for SystemTime {
 }
 
 #[stable(feature = "time_augmented_assignment", since = "1.9.0")]
-impl SubAssign<Duration> for SystemTime {
+#[rustc_const_unstable(feature = "const_ops", issue = "143802")]
+impl const SubAssign<Duration> for SystemTime {
     fn sub_assign(&mut self, other: Duration) {
         *self = *self - other;
     }
@@ -699,7 +710,8 @@ impl SystemTimeError {
     /// ```
     #[must_use]
     #[stable(feature = "time2", since = "1.8.0")]
-    pub fn duration(&self) -> Duration {
+    #[rustc_const_unstable(feature = "const_system_time", issue = "144517")]
+    pub const fn duration(&self) -> Duration {
         self.0
     }
 }
diff --git a/library/std/tests/env.rs b/library/std/tests/env.rs
index e754cf8263b..b53fd69b707 100644
--- a/library/std/tests/env.rs
+++ b/library/std/tests/env.rs
@@ -16,7 +16,7 @@ fn test_self_exe_path() {
 
 #[test]
 fn test() {
-    assert!((!Path::new("test-path").is_absolute()));
+    assert!(!Path::new("test-path").is_absolute());
 
     #[cfg(not(target_env = "sgx"))]
     current_dir().unwrap();
diff --git a/library/std/tests/path.rs b/library/std/tests/path.rs
index 901d2770f20..e1576a0d423 100644
--- a/library/std/tests/path.rs
+++ b/library/std/tests/path.rs
@@ -1,10 +1,4 @@
-#![feature(
-    clone_to_uninit,
-    path_add_extension,
-    path_file_prefix,
-    maybe_uninit_slice,
-    normalize_lexically
-)]
+#![feature(clone_to_uninit, path_add_extension, maybe_uninit_slice, normalize_lexically)]
 
 use std::clone::CloneToUninit;
 use std::ffi::OsStr;
diff --git a/library/stdarch/.github/workflows/rustc-pull.yml b/library/stdarch/.github/workflows/rustc-pull.yml
index 6b90d8a500f..1379bd06b0e 100644
--- a/library/stdarch/.github/workflows/rustc-pull.yml
+++ b/library/stdarch/.github/workflows/rustc-pull.yml
@@ -12,6 +12,7 @@ jobs:
     if: github.repository == 'rust-lang/stdarch'
     uses: rust-lang/josh-sync/.github/workflows/rustc-pull.yml@main
     with:
+      github-app-id: ${{ vars.APP_CLIENT_ID }}
       # https://rust-lang.zulipchat.com/#narrow/channel/208962-t-libs.2Fstdarch/topic/Subtree.20sync.20automation/with/528461782
       zulip-stream-id: 208962
       zulip-bot-email:  "stdarch-ci-bot@rust-lang.zulipchat.com"
@@ -19,4 +20,4 @@ jobs:
       branch-name: rustc-pull
     secrets:
       zulip-api-token: ${{ secrets.ZULIP_API_TOKEN }}
-      token: ${{ secrets.GITHUB_TOKEN }}
+      github-app-secret: ${{ secrets.APP_PRIVATE_KEY }}
diff --git a/library/stdarch/Cargo.lock b/library/stdarch/Cargo.lock
index 21ce304db0d..9df0791b865 100644
--- a/library/stdarch/Cargo.lock
+++ b/library/stdarch/Cargo.lock
@@ -73,7 +73,7 @@ version = "0.1.0"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.104",
+ "syn",
 ]
 
 [[package]]
@@ -90,9 +90,9 @@ checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967"
 
 [[package]]
 name = "cc"
-version = "1.2.30"
+version = "1.2.31"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "deec109607ca693028562ed836a5f1c4b8bd77755c4e132fc5ce11b0b6211ae7"
+checksum = "c3a42d84bb6b69d3a8b3eaacf0d88f179e1929695e1ad012b6cf64d9caaa5fd2"
 dependencies = [
  "shlex",
 ]
@@ -105,9 +105,9 @@ checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268"
 
 [[package]]
 name = "clap"
-version = "4.5.41"
+version = "4.5.42"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "be92d32e80243a54711e5d7ce823c35c41c9d929dc4ab58e1276f625841aadf9"
+checksum = "ed87a9d530bb41a67537289bafcac159cb3ee28460e0a4571123d2a778a6a882"
 dependencies = [
  "clap_builder",
  "clap_derive",
@@ -115,14 +115,14 @@ dependencies = [
 
 [[package]]
 name = "clap_builder"
-version = "4.5.41"
+version = "4.5.42"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "707eab41e9622f9139419d573eca0900137718000c517d47da73045f54331c3d"
+checksum = "64f4f3f3c77c94aff3c7e9aac9a2ca1974a5adf392a8bb751e827d6d127ab966"
 dependencies = [
  "anstream",
  "anstyle",
  "clap_lex",
- "strsim 0.11.1",
+ "strsim",
 ]
 
 [[package]]
@@ -134,7 +134,7 @@ dependencies = [
  "heck",
  "proc-macro2",
  "quote",
- "syn 2.0.104",
+ "syn",
 ]
 
 [[package]]
@@ -183,31 +183,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
 
 [[package]]
-name = "csv"
-version = "1.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf"
-dependencies = [
- "csv-core",
- "itoa",
- "ryu",
- "serde",
-]
-
-[[package]]
-name = "csv-core"
-version = "0.1.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7d02f3b0da4c6504f86e9cd789d8dbafab48c2321be74e9987593de5a894d93d"
-dependencies = [
- "memchr",
-]
-
-[[package]]
 name = "darling"
-version = "0.13.4"
+version = "0.20.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c"
+checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee"
 dependencies = [
  "darling_core",
  "darling_macro",
@@ -215,27 +194,27 @@ dependencies = [
 
 [[package]]
 name = "darling_core"
-version = "0.13.4"
+version = "0.20.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610"
+checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e"
 dependencies = [
  "fnv",
  "ident_case",
  "proc-macro2",
  "quote",
- "strsim 0.10.0",
- "syn 1.0.109",
+ "strsim",
+ "syn",
 ]
 
 [[package]]
 name = "darling_macro"
-version = "0.13.4"
+version = "0.20.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835"
+checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead"
 dependencies = [
  "darling_core",
  "quote",
- "syn 1.0.109",
+ "syn",
 ]
 
 [[package]]
@@ -357,14 +336,11 @@ name = "intrinsic-test"
 version = "0.1.0"
 dependencies = [
  "clap",
- "csv",
  "diff",
  "itertools",
- "lazy_static",
  "log",
  "pretty_env_logger",
  "rayon",
- "regex",
  "serde",
  "serde_json",
 ]
@@ -402,12 +378,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
 
 [[package]]
-name = "lazy_static"
-version = "1.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
-
-[[package]]
 name = "libc"
 version = "0.2.174"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -576,9 +546,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
 
 [[package]]
 name = "rustc-demangle"
-version = "0.1.25"
+version = "0.1.26"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f"
+checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace"
 
 [[package]]
 name = "ryu"
@@ -618,14 +588,14 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.104",
+ "syn",
 ]
 
 [[package]]
 name = "serde_json"
-version = "1.0.140"
+version = "1.0.142"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
+checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7"
 dependencies = [
  "itoa",
  "memchr",
@@ -635,24 +605,25 @@ dependencies = [
 
 [[package]]
 name = "serde_with"
-version = "1.14.0"
+version = "3.14.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff"
+checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5"
 dependencies = [
  "serde",
+ "serde_derive",
  "serde_with_macros",
 ]
 
 [[package]]
 name = "serde_with_macros"
-version = "1.5.2"
+version = "3.14.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082"
+checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f"
 dependencies = [
  "darling",
  "proc-macro2",
  "quote",
- "syn 1.0.109",
+ "syn",
 ]
 
 [[package]]
@@ -679,7 +650,7 @@ version = "0.1.0"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.104",
+ "syn",
 ]
 
 [[package]]
@@ -724,7 +695,7 @@ dependencies = [
  "quote",
  "serde",
  "serde_json",
- "syn 2.0.104",
+ "syn",
 ]
 
 [[package]]
@@ -738,29 +709,12 @@ dependencies = [
 
 [[package]]
 name = "strsim"
-version = "0.10.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
-
-[[package]]
-name = "strsim"
 version = "0.11.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
 
 [[package]]
 name = "syn"
-version = "1.0.109"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
-dependencies = [
- "proc-macro2",
- "quote",
- "unicode-ident",
-]
-
-[[package]]
-name = "syn"
 version = "2.0.104"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40"
@@ -943,5 +897,5 @@ checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.104",
+ "syn",
 ]
diff --git a/library/stdarch/ci/docker/loongarch64-unknown-linux-gnu/Dockerfile b/library/stdarch/ci/docker/loongarch64-unknown-linux-gnu/Dockerfile
index 5ab3431ba27..b5c6874ca52 100644
--- a/library/stdarch/ci/docker/loongarch64-unknown-linux-gnu/Dockerfile
+++ b/library/stdarch/ci/docker/loongarch64-unknown-linux-gnu/Dockerfile
@@ -6,7 +6,7 @@ RUN apt-get update && \
     gcc-loongarch64-linux-gnu libc6-dev-loong64-cross
 
 
-ENV CARGO_TARGET_LOONGARCH64_UNKNOWN_LINUX_GNU_LINKER=loongarch64-linux-gnu-gcc-14 \
+ENV CARGO_TARGET_LOONGARCH64_UNKNOWN_LINUX_GNU_LINKER=loongarch64-linux-gnu-gcc \
     CARGO_TARGET_LOONGARCH64_UNKNOWN_LINUX_GNU_RUNNER="qemu-loongarch64-static -cpu max -L /usr/loongarch64-linux-gnu" \
     OBJDUMP=loongarch64-linux-gnu-objdump \
     STDARCH_TEST_SKIP_FEATURE=frecipe
diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs b/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs
index 4361acdc1fc..cda0ebec677 100644
--- a/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs
+++ b/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs
@@ -6,7058 +6,7059 @@
 // OUT_DIR=`pwd`/crates/core_arch cargo run -p stdarch-gen-loongarch -- crates/stdarch-gen-loongarch/lasx.spec
 // ```
 
+use crate::mem::transmute;
 use super::types::*;
 
 #[allow(improper_ctypes)]
 unsafe extern "unadjusted" {
     #[link_name = "llvm.loongarch.lasx.xvsll.b"]
-    fn __lasx_xvsll_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvsll_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsll.h"]
-    fn __lasx_xvsll_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvsll_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsll.w"]
-    fn __lasx_xvsll_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvsll_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsll.d"]
-    fn __lasx_xvsll_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsll_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvslli.b"]
-    fn __lasx_xvslli_b(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvslli_b(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvslli.h"]
-    fn __lasx_xvslli_h(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvslli_h(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvslli.w"]
-    fn __lasx_xvslli_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvslli_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvslli.d"]
-    fn __lasx_xvslli_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvslli_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsra.b"]
-    fn __lasx_xvsra_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvsra_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsra.h"]
-    fn __lasx_xvsra_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvsra_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsra.w"]
-    fn __lasx_xvsra_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvsra_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsra.d"]
-    fn __lasx_xvsra_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsra_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrai.b"]
-    fn __lasx_xvsrai_b(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvsrai_b(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrai.h"]
-    fn __lasx_xvsrai_h(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvsrai_h(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrai.w"]
-    fn __lasx_xvsrai_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvsrai_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrai.d"]
-    fn __lasx_xvsrai_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvsrai_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrar.b"]
-    fn __lasx_xvsrar_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvsrar_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrar.h"]
-    fn __lasx_xvsrar_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvsrar_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrar.w"]
-    fn __lasx_xvsrar_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvsrar_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrar.d"]
-    fn __lasx_xvsrar_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsrar_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrari.b"]
-    fn __lasx_xvsrari_b(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvsrari_b(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrari.h"]
-    fn __lasx_xvsrari_h(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvsrari_h(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrari.w"]
-    fn __lasx_xvsrari_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvsrari_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrari.d"]
-    fn __lasx_xvsrari_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvsrari_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrl.b"]
-    fn __lasx_xvsrl_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvsrl_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrl.h"]
-    fn __lasx_xvsrl_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvsrl_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrl.w"]
-    fn __lasx_xvsrl_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvsrl_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrl.d"]
-    fn __lasx_xvsrl_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsrl_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrli.b"]
-    fn __lasx_xvsrli_b(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvsrli_b(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrli.h"]
-    fn __lasx_xvsrli_h(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvsrli_h(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrli.w"]
-    fn __lasx_xvsrli_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvsrli_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrli.d"]
-    fn __lasx_xvsrli_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvsrli_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrlr.b"]
-    fn __lasx_xvsrlr_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvsrlr_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrlr.h"]
-    fn __lasx_xvsrlr_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvsrlr_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrlr.w"]
-    fn __lasx_xvsrlr_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvsrlr_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrlr.d"]
-    fn __lasx_xvsrlr_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsrlr_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrlri.b"]
-    fn __lasx_xvsrlri_b(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvsrlri_b(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrlri.h"]
-    fn __lasx_xvsrlri_h(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvsrlri_h(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrlri.w"]
-    fn __lasx_xvsrlri_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvsrlri_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrlri.d"]
-    fn __lasx_xvsrlri_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvsrlri_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvbitclr.b"]
-    fn __lasx_xvbitclr_b(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvbitclr_b(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvbitclr.h"]
-    fn __lasx_xvbitclr_h(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvbitclr_h(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvbitclr.w"]
-    fn __lasx_xvbitclr_w(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvbitclr_w(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvbitclr.d"]
-    fn __lasx_xvbitclr_d(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvbitclr_d(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvbitclri.b"]
-    fn __lasx_xvbitclri_b(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvbitclri_b(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvbitclri.h"]
-    fn __lasx_xvbitclri_h(a: v16u16, b: u32) -> v16u16;
+    fn __lasx_xvbitclri_h(a: __v16u16, b: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvbitclri.w"]
-    fn __lasx_xvbitclri_w(a: v8u32, b: u32) -> v8u32;
+    fn __lasx_xvbitclri_w(a: __v8u32, b: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvbitclri.d"]
-    fn __lasx_xvbitclri_d(a: v4u64, b: u32) -> v4u64;
+    fn __lasx_xvbitclri_d(a: __v4u64, b: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvbitset.b"]
-    fn __lasx_xvbitset_b(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvbitset_b(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvbitset.h"]
-    fn __lasx_xvbitset_h(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvbitset_h(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvbitset.w"]
-    fn __lasx_xvbitset_w(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvbitset_w(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvbitset.d"]
-    fn __lasx_xvbitset_d(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvbitset_d(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvbitseti.b"]
-    fn __lasx_xvbitseti_b(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvbitseti_b(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvbitseti.h"]
-    fn __lasx_xvbitseti_h(a: v16u16, b: u32) -> v16u16;
+    fn __lasx_xvbitseti_h(a: __v16u16, b: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvbitseti.w"]
-    fn __lasx_xvbitseti_w(a: v8u32, b: u32) -> v8u32;
+    fn __lasx_xvbitseti_w(a: __v8u32, b: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvbitseti.d"]
-    fn __lasx_xvbitseti_d(a: v4u64, b: u32) -> v4u64;
+    fn __lasx_xvbitseti_d(a: __v4u64, b: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvbitrev.b"]
-    fn __lasx_xvbitrev_b(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvbitrev_b(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvbitrev.h"]
-    fn __lasx_xvbitrev_h(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvbitrev_h(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvbitrev.w"]
-    fn __lasx_xvbitrev_w(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvbitrev_w(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvbitrev.d"]
-    fn __lasx_xvbitrev_d(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvbitrev_d(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvbitrevi.b"]
-    fn __lasx_xvbitrevi_b(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvbitrevi_b(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvbitrevi.h"]
-    fn __lasx_xvbitrevi_h(a: v16u16, b: u32) -> v16u16;
+    fn __lasx_xvbitrevi_h(a: __v16u16, b: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvbitrevi.w"]
-    fn __lasx_xvbitrevi_w(a: v8u32, b: u32) -> v8u32;
+    fn __lasx_xvbitrevi_w(a: __v8u32, b: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvbitrevi.d"]
-    fn __lasx_xvbitrevi_d(a: v4u64, b: u32) -> v4u64;
+    fn __lasx_xvbitrevi_d(a: __v4u64, b: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvadd.b"]
-    fn __lasx_xvadd_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvadd_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvadd.h"]
-    fn __lasx_xvadd_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvadd_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvadd.w"]
-    fn __lasx_xvadd_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvadd_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvadd.d"]
-    fn __lasx_xvadd_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvadd_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddi.bu"]
-    fn __lasx_xvaddi_bu(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvaddi_bu(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvaddi.hu"]
-    fn __lasx_xvaddi_hu(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvaddi_hu(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvaddi.wu"]
-    fn __lasx_xvaddi_wu(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvaddi_wu(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvaddi.du"]
-    fn __lasx_xvaddi_du(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvaddi_du(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsub.b"]
-    fn __lasx_xvsub_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvsub_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsub.h"]
-    fn __lasx_xvsub_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvsub_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsub.w"]
-    fn __lasx_xvsub_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvsub_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsub.d"]
-    fn __lasx_xvsub_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsub_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsubi.bu"]
-    fn __lasx_xvsubi_bu(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvsubi_bu(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsubi.hu"]
-    fn __lasx_xvsubi_hu(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvsubi_hu(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsubi.wu"]
-    fn __lasx_xvsubi_wu(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvsubi_wu(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsubi.du"]
-    fn __lasx_xvsubi_du(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvsubi_du(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmax.b"]
-    fn __lasx_xvmax_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvmax_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmax.h"]
-    fn __lasx_xvmax_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvmax_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmax.w"]
-    fn __lasx_xvmax_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvmax_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmax.d"]
-    fn __lasx_xvmax_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvmax_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmaxi.b"]
-    fn __lasx_xvmaxi_b(a: v32i8, b: i32) -> v32i8;
+    fn __lasx_xvmaxi_b(a: __v32i8, b: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmaxi.h"]
-    fn __lasx_xvmaxi_h(a: v16i16, b: i32) -> v16i16;
+    fn __lasx_xvmaxi_h(a: __v16i16, b: i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmaxi.w"]
-    fn __lasx_xvmaxi_w(a: v8i32, b: i32) -> v8i32;
+    fn __lasx_xvmaxi_w(a: __v8i32, b: i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmaxi.d"]
-    fn __lasx_xvmaxi_d(a: v4i64, b: i32) -> v4i64;
+    fn __lasx_xvmaxi_d(a: __v4i64, b: i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmax.bu"]
-    fn __lasx_xvmax_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvmax_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvmax.hu"]
-    fn __lasx_xvmax_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvmax_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvmax.wu"]
-    fn __lasx_xvmax_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvmax_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvmax.du"]
-    fn __lasx_xvmax_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvmax_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvmaxi.bu"]
-    fn __lasx_xvmaxi_bu(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvmaxi_bu(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvmaxi.hu"]
-    fn __lasx_xvmaxi_hu(a: v16u16, b: u32) -> v16u16;
+    fn __lasx_xvmaxi_hu(a: __v16u16, b: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvmaxi.wu"]
-    fn __lasx_xvmaxi_wu(a: v8u32, b: u32) -> v8u32;
+    fn __lasx_xvmaxi_wu(a: __v8u32, b: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvmaxi.du"]
-    fn __lasx_xvmaxi_du(a: v4u64, b: u32) -> v4u64;
+    fn __lasx_xvmaxi_du(a: __v4u64, b: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvmin.b"]
-    fn __lasx_xvmin_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvmin_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmin.h"]
-    fn __lasx_xvmin_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvmin_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmin.w"]
-    fn __lasx_xvmin_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvmin_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmin.d"]
-    fn __lasx_xvmin_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvmin_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmini.b"]
-    fn __lasx_xvmini_b(a: v32i8, b: i32) -> v32i8;
+    fn __lasx_xvmini_b(a: __v32i8, b: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmini.h"]
-    fn __lasx_xvmini_h(a: v16i16, b: i32) -> v16i16;
+    fn __lasx_xvmini_h(a: __v16i16, b: i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmini.w"]
-    fn __lasx_xvmini_w(a: v8i32, b: i32) -> v8i32;
+    fn __lasx_xvmini_w(a: __v8i32, b: i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmini.d"]
-    fn __lasx_xvmini_d(a: v4i64, b: i32) -> v4i64;
+    fn __lasx_xvmini_d(a: __v4i64, b: i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmin.bu"]
-    fn __lasx_xvmin_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvmin_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvmin.hu"]
-    fn __lasx_xvmin_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvmin_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvmin.wu"]
-    fn __lasx_xvmin_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvmin_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvmin.du"]
-    fn __lasx_xvmin_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvmin_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvmini.bu"]
-    fn __lasx_xvmini_bu(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvmini_bu(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvmini.hu"]
-    fn __lasx_xvmini_hu(a: v16u16, b: u32) -> v16u16;
+    fn __lasx_xvmini_hu(a: __v16u16, b: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvmini.wu"]
-    fn __lasx_xvmini_wu(a: v8u32, b: u32) -> v8u32;
+    fn __lasx_xvmini_wu(a: __v8u32, b: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvmini.du"]
-    fn __lasx_xvmini_du(a: v4u64, b: u32) -> v4u64;
+    fn __lasx_xvmini_du(a: __v4u64, b: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvseq.b"]
-    fn __lasx_xvseq_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvseq_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvseq.h"]
-    fn __lasx_xvseq_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvseq_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvseq.w"]
-    fn __lasx_xvseq_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvseq_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvseq.d"]
-    fn __lasx_xvseq_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvseq_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvseqi.b"]
-    fn __lasx_xvseqi_b(a: v32i8, b: i32) -> v32i8;
+    fn __lasx_xvseqi_b(a: __v32i8, b: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvseqi.h"]
-    fn __lasx_xvseqi_h(a: v16i16, b: i32) -> v16i16;
+    fn __lasx_xvseqi_h(a: __v16i16, b: i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvseqi.w"]
-    fn __lasx_xvseqi_w(a: v8i32, b: i32) -> v8i32;
+    fn __lasx_xvseqi_w(a: __v8i32, b: i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvseqi.d"]
-    fn __lasx_xvseqi_d(a: v4i64, b: i32) -> v4i64;
+    fn __lasx_xvseqi_d(a: __v4i64, b: i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvslt.b"]
-    fn __lasx_xvslt_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvslt_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvslt.h"]
-    fn __lasx_xvslt_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvslt_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvslt.w"]
-    fn __lasx_xvslt_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvslt_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvslt.d"]
-    fn __lasx_xvslt_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvslt_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvslti.b"]
-    fn __lasx_xvslti_b(a: v32i8, b: i32) -> v32i8;
+    fn __lasx_xvslti_b(a: __v32i8, b: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvslti.h"]
-    fn __lasx_xvslti_h(a: v16i16, b: i32) -> v16i16;
+    fn __lasx_xvslti_h(a: __v16i16, b: i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvslti.w"]
-    fn __lasx_xvslti_w(a: v8i32, b: i32) -> v8i32;
+    fn __lasx_xvslti_w(a: __v8i32, b: i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvslti.d"]
-    fn __lasx_xvslti_d(a: v4i64, b: i32) -> v4i64;
+    fn __lasx_xvslti_d(a: __v4i64, b: i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvslt.bu"]
-    fn __lasx_xvslt_bu(a: v32u8, b: v32u8) -> v32i8;
+    fn __lasx_xvslt_bu(a: __v32u8, b: __v32u8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvslt.hu"]
-    fn __lasx_xvslt_hu(a: v16u16, b: v16u16) -> v16i16;
+    fn __lasx_xvslt_hu(a: __v16u16, b: __v16u16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvslt.wu"]
-    fn __lasx_xvslt_wu(a: v8u32, b: v8u32) -> v8i32;
+    fn __lasx_xvslt_wu(a: __v8u32, b: __v8u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvslt.du"]
-    fn __lasx_xvslt_du(a: v4u64, b: v4u64) -> v4i64;
+    fn __lasx_xvslt_du(a: __v4u64, b: __v4u64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvslti.bu"]
-    fn __lasx_xvslti_bu(a: v32u8, b: u32) -> v32i8;
+    fn __lasx_xvslti_bu(a: __v32u8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvslti.hu"]
-    fn __lasx_xvslti_hu(a: v16u16, b: u32) -> v16i16;
+    fn __lasx_xvslti_hu(a: __v16u16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvslti.wu"]
-    fn __lasx_xvslti_wu(a: v8u32, b: u32) -> v8i32;
+    fn __lasx_xvslti_wu(a: __v8u32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvslti.du"]
-    fn __lasx_xvslti_du(a: v4u64, b: u32) -> v4i64;
+    fn __lasx_xvslti_du(a: __v4u64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsle.b"]
-    fn __lasx_xvsle_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvsle_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsle.h"]
-    fn __lasx_xvsle_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvsle_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsle.w"]
-    fn __lasx_xvsle_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvsle_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsle.d"]
-    fn __lasx_xvsle_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsle_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvslei.b"]
-    fn __lasx_xvslei_b(a: v32i8, b: i32) -> v32i8;
+    fn __lasx_xvslei_b(a: __v32i8, b: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvslei.h"]
-    fn __lasx_xvslei_h(a: v16i16, b: i32) -> v16i16;
+    fn __lasx_xvslei_h(a: __v16i16, b: i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvslei.w"]
-    fn __lasx_xvslei_w(a: v8i32, b: i32) -> v8i32;
+    fn __lasx_xvslei_w(a: __v8i32, b: i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvslei.d"]
-    fn __lasx_xvslei_d(a: v4i64, b: i32) -> v4i64;
+    fn __lasx_xvslei_d(a: __v4i64, b: i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsle.bu"]
-    fn __lasx_xvsle_bu(a: v32u8, b: v32u8) -> v32i8;
+    fn __lasx_xvsle_bu(a: __v32u8, b: __v32u8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsle.hu"]
-    fn __lasx_xvsle_hu(a: v16u16, b: v16u16) -> v16i16;
+    fn __lasx_xvsle_hu(a: __v16u16, b: __v16u16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsle.wu"]
-    fn __lasx_xvsle_wu(a: v8u32, b: v8u32) -> v8i32;
+    fn __lasx_xvsle_wu(a: __v8u32, b: __v8u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsle.du"]
-    fn __lasx_xvsle_du(a: v4u64, b: v4u64) -> v4i64;
+    fn __lasx_xvsle_du(a: __v4u64, b: __v4u64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvslei.bu"]
-    fn __lasx_xvslei_bu(a: v32u8, b: u32) -> v32i8;
+    fn __lasx_xvslei_bu(a: __v32u8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvslei.hu"]
-    fn __lasx_xvslei_hu(a: v16u16, b: u32) -> v16i16;
+    fn __lasx_xvslei_hu(a: __v16u16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvslei.wu"]
-    fn __lasx_xvslei_wu(a: v8u32, b: u32) -> v8i32;
+    fn __lasx_xvslei_wu(a: __v8u32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvslei.du"]
-    fn __lasx_xvslei_du(a: v4u64, b: u32) -> v4i64;
+    fn __lasx_xvslei_du(a: __v4u64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsat.b"]
-    fn __lasx_xvsat_b(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvsat_b(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsat.h"]
-    fn __lasx_xvsat_h(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvsat_h(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsat.w"]
-    fn __lasx_xvsat_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvsat_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsat.d"]
-    fn __lasx_xvsat_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvsat_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsat.bu"]
-    fn __lasx_xvsat_bu(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvsat_bu(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvsat.hu"]
-    fn __lasx_xvsat_hu(a: v16u16, b: u32) -> v16u16;
+    fn __lasx_xvsat_hu(a: __v16u16, b: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvsat.wu"]
-    fn __lasx_xvsat_wu(a: v8u32, b: u32) -> v8u32;
+    fn __lasx_xvsat_wu(a: __v8u32, b: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvsat.du"]
-    fn __lasx_xvsat_du(a: v4u64, b: u32) -> v4u64;
+    fn __lasx_xvsat_du(a: __v4u64, b: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvadda.b"]
-    fn __lasx_xvadda_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvadda_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvadda.h"]
-    fn __lasx_xvadda_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvadda_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvadda.w"]
-    fn __lasx_xvadda_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvadda_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvadda.d"]
-    fn __lasx_xvadda_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvadda_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsadd.b"]
-    fn __lasx_xvsadd_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvsadd_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsadd.h"]
-    fn __lasx_xvsadd_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvsadd_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsadd.w"]
-    fn __lasx_xvsadd_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvsadd_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsadd.d"]
-    fn __lasx_xvsadd_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsadd_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsadd.bu"]
-    fn __lasx_xvsadd_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvsadd_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvsadd.hu"]
-    fn __lasx_xvsadd_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvsadd_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvsadd.wu"]
-    fn __lasx_xvsadd_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvsadd_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvsadd.du"]
-    fn __lasx_xvsadd_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvsadd_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvavg.b"]
-    fn __lasx_xvavg_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvavg_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvavg.h"]
-    fn __lasx_xvavg_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvavg_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvavg.w"]
-    fn __lasx_xvavg_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvavg_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvavg.d"]
-    fn __lasx_xvavg_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvavg_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvavg.bu"]
-    fn __lasx_xvavg_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvavg_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvavg.hu"]
-    fn __lasx_xvavg_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvavg_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvavg.wu"]
-    fn __lasx_xvavg_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvavg_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvavg.du"]
-    fn __lasx_xvavg_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvavg_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvavgr.b"]
-    fn __lasx_xvavgr_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvavgr_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvavgr.h"]
-    fn __lasx_xvavgr_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvavgr_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvavgr.w"]
-    fn __lasx_xvavgr_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvavgr_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvavgr.d"]
-    fn __lasx_xvavgr_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvavgr_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvavgr.bu"]
-    fn __lasx_xvavgr_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvavgr_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvavgr.hu"]
-    fn __lasx_xvavgr_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvavgr_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvavgr.wu"]
-    fn __lasx_xvavgr_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvavgr_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvavgr.du"]
-    fn __lasx_xvavgr_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvavgr_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvssub.b"]
-    fn __lasx_xvssub_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvssub_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvssub.h"]
-    fn __lasx_xvssub_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvssub_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvssub.w"]
-    fn __lasx_xvssub_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvssub_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssub.d"]
-    fn __lasx_xvssub_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvssub_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvssub.bu"]
-    fn __lasx_xvssub_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvssub_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvssub.hu"]
-    fn __lasx_xvssub_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvssub_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvssub.wu"]
-    fn __lasx_xvssub_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvssub_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvssub.du"]
-    fn __lasx_xvssub_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvssub_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvabsd.b"]
-    fn __lasx_xvabsd_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvabsd_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvabsd.h"]
-    fn __lasx_xvabsd_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvabsd_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvabsd.w"]
-    fn __lasx_xvabsd_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvabsd_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvabsd.d"]
-    fn __lasx_xvabsd_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvabsd_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvabsd.bu"]
-    fn __lasx_xvabsd_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvabsd_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvabsd.hu"]
-    fn __lasx_xvabsd_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvabsd_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvabsd.wu"]
-    fn __lasx_xvabsd_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvabsd_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvabsd.du"]
-    fn __lasx_xvabsd_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvabsd_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvmul.b"]
-    fn __lasx_xvmul_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvmul_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmul.h"]
-    fn __lasx_xvmul_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvmul_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmul.w"]
-    fn __lasx_xvmul_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvmul_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmul.d"]
-    fn __lasx_xvmul_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvmul_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmadd.b"]
-    fn __lasx_xvmadd_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8;
+    fn __lasx_xvmadd_b(a: __v32i8, b: __v32i8, c: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmadd.h"]
-    fn __lasx_xvmadd_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16;
+    fn __lasx_xvmadd_h(a: __v16i16, b: __v16i16, c: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmadd.w"]
-    fn __lasx_xvmadd_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32;
+    fn __lasx_xvmadd_w(a: __v8i32, b: __v8i32, c: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmadd.d"]
-    fn __lasx_xvmadd_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64;
+    fn __lasx_xvmadd_d(a: __v4i64, b: __v4i64, c: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmsub.b"]
-    fn __lasx_xvmsub_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8;
+    fn __lasx_xvmsub_b(a: __v32i8, b: __v32i8, c: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmsub.h"]
-    fn __lasx_xvmsub_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16;
+    fn __lasx_xvmsub_h(a: __v16i16, b: __v16i16, c: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmsub.w"]
-    fn __lasx_xvmsub_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32;
+    fn __lasx_xvmsub_w(a: __v8i32, b: __v8i32, c: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmsub.d"]
-    fn __lasx_xvmsub_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64;
+    fn __lasx_xvmsub_d(a: __v4i64, b: __v4i64, c: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvdiv.b"]
-    fn __lasx_xvdiv_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvdiv_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvdiv.h"]
-    fn __lasx_xvdiv_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvdiv_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvdiv.w"]
-    fn __lasx_xvdiv_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvdiv_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvdiv.d"]
-    fn __lasx_xvdiv_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvdiv_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvdiv.bu"]
-    fn __lasx_xvdiv_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvdiv_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvdiv.hu"]
-    fn __lasx_xvdiv_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvdiv_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvdiv.wu"]
-    fn __lasx_xvdiv_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvdiv_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvdiv.du"]
-    fn __lasx_xvdiv_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvdiv_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvhaddw.h.b"]
-    fn __lasx_xvhaddw_h_b(a: v32i8, b: v32i8) -> v16i16;
+    fn __lasx_xvhaddw_h_b(a: __v32i8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvhaddw.w.h"]
-    fn __lasx_xvhaddw_w_h(a: v16i16, b: v16i16) -> v8i32;
+    fn __lasx_xvhaddw_w_h(a: __v16i16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvhaddw.d.w"]
-    fn __lasx_xvhaddw_d_w(a: v8i32, b: v8i32) -> v4i64;
+    fn __lasx_xvhaddw_d_w(a: __v8i32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvhaddw.hu.bu"]
-    fn __lasx_xvhaddw_hu_bu(a: v32u8, b: v32u8) -> v16u16;
+    fn __lasx_xvhaddw_hu_bu(a: __v32u8, b: __v32u8) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvhaddw.wu.hu"]
-    fn __lasx_xvhaddw_wu_hu(a: v16u16, b: v16u16) -> v8u32;
+    fn __lasx_xvhaddw_wu_hu(a: __v16u16, b: __v16u16) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvhaddw.du.wu"]
-    fn __lasx_xvhaddw_du_wu(a: v8u32, b: v8u32) -> v4u64;
+    fn __lasx_xvhaddw_du_wu(a: __v8u32, b: __v8u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvhsubw.h.b"]
-    fn __lasx_xvhsubw_h_b(a: v32i8, b: v32i8) -> v16i16;
+    fn __lasx_xvhsubw_h_b(a: __v32i8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvhsubw.w.h"]
-    fn __lasx_xvhsubw_w_h(a: v16i16, b: v16i16) -> v8i32;
+    fn __lasx_xvhsubw_w_h(a: __v16i16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvhsubw.d.w"]
-    fn __lasx_xvhsubw_d_w(a: v8i32, b: v8i32) -> v4i64;
+    fn __lasx_xvhsubw_d_w(a: __v8i32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvhsubw.hu.bu"]
-    fn __lasx_xvhsubw_hu_bu(a: v32u8, b: v32u8) -> v16i16;
+    fn __lasx_xvhsubw_hu_bu(a: __v32u8, b: __v32u8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvhsubw.wu.hu"]
-    fn __lasx_xvhsubw_wu_hu(a: v16u16, b: v16u16) -> v8i32;
+    fn __lasx_xvhsubw_wu_hu(a: __v16u16, b: __v16u16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvhsubw.du.wu"]
-    fn __lasx_xvhsubw_du_wu(a: v8u32, b: v8u32) -> v4i64;
+    fn __lasx_xvhsubw_du_wu(a: __v8u32, b: __v8u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmod.b"]
-    fn __lasx_xvmod_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvmod_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmod.h"]
-    fn __lasx_xvmod_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvmod_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmod.w"]
-    fn __lasx_xvmod_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvmod_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmod.d"]
-    fn __lasx_xvmod_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvmod_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmod.bu"]
-    fn __lasx_xvmod_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvmod_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvmod.hu"]
-    fn __lasx_xvmod_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvmod_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvmod.wu"]
-    fn __lasx_xvmod_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvmod_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvmod.du"]
-    fn __lasx_xvmod_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvmod_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvrepl128vei.b"]
-    fn __lasx_xvrepl128vei_b(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvrepl128vei_b(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvrepl128vei.h"]
-    fn __lasx_xvrepl128vei_h(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvrepl128vei_h(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvrepl128vei.w"]
-    fn __lasx_xvrepl128vei_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvrepl128vei_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvrepl128vei.d"]
-    fn __lasx_xvrepl128vei_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvrepl128vei_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvpickev.b"]
-    fn __lasx_xvpickev_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvpickev_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvpickev.h"]
-    fn __lasx_xvpickev_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvpickev_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvpickev.w"]
-    fn __lasx_xvpickev_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvpickev_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvpickev.d"]
-    fn __lasx_xvpickev_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvpickev_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvpickod.b"]
-    fn __lasx_xvpickod_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvpickod_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvpickod.h"]
-    fn __lasx_xvpickod_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvpickod_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvpickod.w"]
-    fn __lasx_xvpickod_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvpickod_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvpickod.d"]
-    fn __lasx_xvpickod_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvpickod_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvilvh.b"]
-    fn __lasx_xvilvh_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvilvh_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvilvh.h"]
-    fn __lasx_xvilvh_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvilvh_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvilvh.w"]
-    fn __lasx_xvilvh_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvilvh_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvilvh.d"]
-    fn __lasx_xvilvh_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvilvh_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvilvl.b"]
-    fn __lasx_xvilvl_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvilvl_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvilvl.h"]
-    fn __lasx_xvilvl_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvilvl_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvilvl.w"]
-    fn __lasx_xvilvl_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvilvl_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvilvl.d"]
-    fn __lasx_xvilvl_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvilvl_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvpackev.b"]
-    fn __lasx_xvpackev_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvpackev_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvpackev.h"]
-    fn __lasx_xvpackev_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvpackev_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvpackev.w"]
-    fn __lasx_xvpackev_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvpackev_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvpackev.d"]
-    fn __lasx_xvpackev_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvpackev_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvpackod.b"]
-    fn __lasx_xvpackod_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvpackod_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvpackod.h"]
-    fn __lasx_xvpackod_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvpackod_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvpackod.w"]
-    fn __lasx_xvpackod_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvpackod_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvpackod.d"]
-    fn __lasx_xvpackod_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvpackod_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvshuf.b"]
-    fn __lasx_xvshuf_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8;
+    fn __lasx_xvshuf_b(a: __v32i8, b: __v32i8, c: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvshuf.h"]
-    fn __lasx_xvshuf_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16;
+    fn __lasx_xvshuf_h(a: __v16i16, b: __v16i16, c: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvshuf.w"]
-    fn __lasx_xvshuf_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32;
+    fn __lasx_xvshuf_w(a: __v8i32, b: __v8i32, c: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvshuf.d"]
-    fn __lasx_xvshuf_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64;
+    fn __lasx_xvshuf_d(a: __v4i64, b: __v4i64, c: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvand.v"]
-    fn __lasx_xvand_v(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvand_v(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvandi.b"]
-    fn __lasx_xvandi_b(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvandi_b(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvor.v"]
-    fn __lasx_xvor_v(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvor_v(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvori.b"]
-    fn __lasx_xvori_b(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvori_b(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvnor.v"]
-    fn __lasx_xvnor_v(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvnor_v(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvnori.b"]
-    fn __lasx_xvnori_b(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvnori_b(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvxor.v"]
-    fn __lasx_xvxor_v(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvxor_v(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvxori.b"]
-    fn __lasx_xvxori_b(a: v32u8, b: u32) -> v32u8;
+    fn __lasx_xvxori_b(a: __v32u8, b: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvbitsel.v"]
-    fn __lasx_xvbitsel_v(a: v32u8, b: v32u8, c: v32u8) -> v32u8;
+    fn __lasx_xvbitsel_v(a: __v32u8, b: __v32u8, c: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvbitseli.b"]
-    fn __lasx_xvbitseli_b(a: v32u8, b: v32u8, c: u32) -> v32u8;
+    fn __lasx_xvbitseli_b(a: __v32u8, b: __v32u8, c: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvshuf4i.b"]
-    fn __lasx_xvshuf4i_b(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvshuf4i_b(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvshuf4i.h"]
-    fn __lasx_xvshuf4i_h(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvshuf4i_h(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvshuf4i.w"]
-    fn __lasx_xvshuf4i_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvshuf4i_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvreplgr2vr.b"]
-    fn __lasx_xvreplgr2vr_b(a: i32) -> v32i8;
+    fn __lasx_xvreplgr2vr_b(a: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvreplgr2vr.h"]
-    fn __lasx_xvreplgr2vr_h(a: i32) -> v16i16;
+    fn __lasx_xvreplgr2vr_h(a: i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvreplgr2vr.w"]
-    fn __lasx_xvreplgr2vr_w(a: i32) -> v8i32;
+    fn __lasx_xvreplgr2vr_w(a: i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvreplgr2vr.d"]
-    fn __lasx_xvreplgr2vr_d(a: i64) -> v4i64;
+    fn __lasx_xvreplgr2vr_d(a: i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvpcnt.b"]
-    fn __lasx_xvpcnt_b(a: v32i8) -> v32i8;
+    fn __lasx_xvpcnt_b(a: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvpcnt.h"]
-    fn __lasx_xvpcnt_h(a: v16i16) -> v16i16;
+    fn __lasx_xvpcnt_h(a: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvpcnt.w"]
-    fn __lasx_xvpcnt_w(a: v8i32) -> v8i32;
+    fn __lasx_xvpcnt_w(a: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvpcnt.d"]
-    fn __lasx_xvpcnt_d(a: v4i64) -> v4i64;
+    fn __lasx_xvpcnt_d(a: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvclo.b"]
-    fn __lasx_xvclo_b(a: v32i8) -> v32i8;
+    fn __lasx_xvclo_b(a: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvclo.h"]
-    fn __lasx_xvclo_h(a: v16i16) -> v16i16;
+    fn __lasx_xvclo_h(a: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvclo.w"]
-    fn __lasx_xvclo_w(a: v8i32) -> v8i32;
+    fn __lasx_xvclo_w(a: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvclo.d"]
-    fn __lasx_xvclo_d(a: v4i64) -> v4i64;
+    fn __lasx_xvclo_d(a: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvclz.b"]
-    fn __lasx_xvclz_b(a: v32i8) -> v32i8;
+    fn __lasx_xvclz_b(a: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvclz.h"]
-    fn __lasx_xvclz_h(a: v16i16) -> v16i16;
+    fn __lasx_xvclz_h(a: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvclz.w"]
-    fn __lasx_xvclz_w(a: v8i32) -> v8i32;
+    fn __lasx_xvclz_w(a: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvclz.d"]
-    fn __lasx_xvclz_d(a: v4i64) -> v4i64;
+    fn __lasx_xvclz_d(a: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfadd.s"]
-    fn __lasx_xvfadd_s(a: v8f32, b: v8f32) -> v8f32;
+    fn __lasx_xvfadd_s(a: __v8f32, b: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfadd.d"]
-    fn __lasx_xvfadd_d(a: v4f64, b: v4f64) -> v4f64;
+    fn __lasx_xvfadd_d(a: __v4f64, b: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfsub.s"]
-    fn __lasx_xvfsub_s(a: v8f32, b: v8f32) -> v8f32;
+    fn __lasx_xvfsub_s(a: __v8f32, b: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfsub.d"]
-    fn __lasx_xvfsub_d(a: v4f64, b: v4f64) -> v4f64;
+    fn __lasx_xvfsub_d(a: __v4f64, b: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfmul.s"]
-    fn __lasx_xvfmul_s(a: v8f32, b: v8f32) -> v8f32;
+    fn __lasx_xvfmul_s(a: __v8f32, b: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfmul.d"]
-    fn __lasx_xvfmul_d(a: v4f64, b: v4f64) -> v4f64;
+    fn __lasx_xvfmul_d(a: __v4f64, b: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfdiv.s"]
-    fn __lasx_xvfdiv_s(a: v8f32, b: v8f32) -> v8f32;
+    fn __lasx_xvfdiv_s(a: __v8f32, b: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfdiv.d"]
-    fn __lasx_xvfdiv_d(a: v4f64, b: v4f64) -> v4f64;
+    fn __lasx_xvfdiv_d(a: __v4f64, b: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfcvt.h.s"]
-    fn __lasx_xvfcvt_h_s(a: v8f32, b: v8f32) -> v16i16;
+    fn __lasx_xvfcvt_h_s(a: __v8f32, b: __v8f32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvfcvt.s.d"]
-    fn __lasx_xvfcvt_s_d(a: v4f64, b: v4f64) -> v8f32;
+    fn __lasx_xvfcvt_s_d(a: __v4f64, b: __v4f64) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfmin.s"]
-    fn __lasx_xvfmin_s(a: v8f32, b: v8f32) -> v8f32;
+    fn __lasx_xvfmin_s(a: __v8f32, b: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfmin.d"]
-    fn __lasx_xvfmin_d(a: v4f64, b: v4f64) -> v4f64;
+    fn __lasx_xvfmin_d(a: __v4f64, b: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfmina.s"]
-    fn __lasx_xvfmina_s(a: v8f32, b: v8f32) -> v8f32;
+    fn __lasx_xvfmina_s(a: __v8f32, b: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfmina.d"]
-    fn __lasx_xvfmina_d(a: v4f64, b: v4f64) -> v4f64;
+    fn __lasx_xvfmina_d(a: __v4f64, b: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfmax.s"]
-    fn __lasx_xvfmax_s(a: v8f32, b: v8f32) -> v8f32;
+    fn __lasx_xvfmax_s(a: __v8f32, b: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfmax.d"]
-    fn __lasx_xvfmax_d(a: v4f64, b: v4f64) -> v4f64;
+    fn __lasx_xvfmax_d(a: __v4f64, b: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfmaxa.s"]
-    fn __lasx_xvfmaxa_s(a: v8f32, b: v8f32) -> v8f32;
+    fn __lasx_xvfmaxa_s(a: __v8f32, b: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfmaxa.d"]
-    fn __lasx_xvfmaxa_d(a: v4f64, b: v4f64) -> v4f64;
+    fn __lasx_xvfmaxa_d(a: __v4f64, b: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfclass.s"]
-    fn __lasx_xvfclass_s(a: v8f32) -> v8i32;
+    fn __lasx_xvfclass_s(a: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfclass.d"]
-    fn __lasx_xvfclass_d(a: v4f64) -> v4i64;
+    fn __lasx_xvfclass_d(a: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfsqrt.s"]
-    fn __lasx_xvfsqrt_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfsqrt_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfsqrt.d"]
-    fn __lasx_xvfsqrt_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfsqrt_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfrecip.s"]
-    fn __lasx_xvfrecip_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfrecip_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfrecip.d"]
-    fn __lasx_xvfrecip_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfrecip_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfrecipe.s"]
-    fn __lasx_xvfrecipe_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfrecipe_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfrecipe.d"]
-    fn __lasx_xvfrecipe_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfrecipe_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfrsqrte.s"]
-    fn __lasx_xvfrsqrte_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfrsqrte_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfrsqrte.d"]
-    fn __lasx_xvfrsqrte_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfrsqrte_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfrint.s"]
-    fn __lasx_xvfrint_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfrint_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfrint.d"]
-    fn __lasx_xvfrint_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfrint_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfrsqrt.s"]
-    fn __lasx_xvfrsqrt_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfrsqrt_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfrsqrt.d"]
-    fn __lasx_xvfrsqrt_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfrsqrt_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvflogb.s"]
-    fn __lasx_xvflogb_s(a: v8f32) -> v8f32;
+    fn __lasx_xvflogb_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvflogb.d"]
-    fn __lasx_xvflogb_d(a: v4f64) -> v4f64;
+    fn __lasx_xvflogb_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfcvth.s.h"]
-    fn __lasx_xvfcvth_s_h(a: v16i16) -> v8f32;
+    fn __lasx_xvfcvth_s_h(a: __v16i16) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfcvth.d.s"]
-    fn __lasx_xvfcvth_d_s(a: v8f32) -> v4f64;
+    fn __lasx_xvfcvth_d_s(a: __v8f32) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfcvtl.s.h"]
-    fn __lasx_xvfcvtl_s_h(a: v16i16) -> v8f32;
+    fn __lasx_xvfcvtl_s_h(a: __v16i16) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfcvtl.d.s"]
-    fn __lasx_xvfcvtl_d_s(a: v8f32) -> v4f64;
+    fn __lasx_xvfcvtl_d_s(a: __v8f32) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvftint.w.s"]
-    fn __lasx_xvftint_w_s(a: v8f32) -> v8i32;
+    fn __lasx_xvftint_w_s(a: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvftint.l.d"]
-    fn __lasx_xvftint_l_d(a: v4f64) -> v4i64;
+    fn __lasx_xvftint_l_d(a: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftint.wu.s"]
-    fn __lasx_xvftint_wu_s(a: v8f32) -> v8u32;
+    fn __lasx_xvftint_wu_s(a: __v8f32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvftint.lu.d"]
-    fn __lasx_xvftint_lu_d(a: v4f64) -> v4u64;
+    fn __lasx_xvftint_lu_d(a: __v4f64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvftintrz.w.s"]
-    fn __lasx_xvftintrz_w_s(a: v8f32) -> v8i32;
+    fn __lasx_xvftintrz_w_s(a: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvftintrz.l.d"]
-    fn __lasx_xvftintrz_l_d(a: v4f64) -> v4i64;
+    fn __lasx_xvftintrz_l_d(a: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrz.wu.s"]
-    fn __lasx_xvftintrz_wu_s(a: v8f32) -> v8u32;
+    fn __lasx_xvftintrz_wu_s(a: __v8f32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvftintrz.lu.d"]
-    fn __lasx_xvftintrz_lu_d(a: v4f64) -> v4u64;
+    fn __lasx_xvftintrz_lu_d(a: __v4f64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvffint.s.w"]
-    fn __lasx_xvffint_s_w(a: v8i32) -> v8f32;
+    fn __lasx_xvffint_s_w(a: __v8i32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvffint.d.l"]
-    fn __lasx_xvffint_d_l(a: v4i64) -> v4f64;
+    fn __lasx_xvffint_d_l(a: __v4i64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvffint.s.wu"]
-    fn __lasx_xvffint_s_wu(a: v8u32) -> v8f32;
+    fn __lasx_xvffint_s_wu(a: __v8u32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvffint.d.lu"]
-    fn __lasx_xvffint_d_lu(a: v4u64) -> v4f64;
+    fn __lasx_xvffint_d_lu(a: __v4u64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvreplve.b"]
-    fn __lasx_xvreplve_b(a: v32i8, b: i32) -> v32i8;
+    fn __lasx_xvreplve_b(a: __v32i8, b: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvreplve.h"]
-    fn __lasx_xvreplve_h(a: v16i16, b: i32) -> v16i16;
+    fn __lasx_xvreplve_h(a: __v16i16, b: i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvreplve.w"]
-    fn __lasx_xvreplve_w(a: v8i32, b: i32) -> v8i32;
+    fn __lasx_xvreplve_w(a: __v8i32, b: i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvreplve.d"]
-    fn __lasx_xvreplve_d(a: v4i64, b: i32) -> v4i64;
+    fn __lasx_xvreplve_d(a: __v4i64, b: i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvpermi.w"]
-    fn __lasx_xvpermi_w(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvpermi_w(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvandn.v"]
-    fn __lasx_xvandn_v(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvandn_v(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvneg.b"]
-    fn __lasx_xvneg_b(a: v32i8) -> v32i8;
+    fn __lasx_xvneg_b(a: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvneg.h"]
-    fn __lasx_xvneg_h(a: v16i16) -> v16i16;
+    fn __lasx_xvneg_h(a: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvneg.w"]
-    fn __lasx_xvneg_w(a: v8i32) -> v8i32;
+    fn __lasx_xvneg_w(a: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvneg.d"]
-    fn __lasx_xvneg_d(a: v4i64) -> v4i64;
+    fn __lasx_xvneg_d(a: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmuh.b"]
-    fn __lasx_xvmuh_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvmuh_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmuh.h"]
-    fn __lasx_xvmuh_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvmuh_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmuh.w"]
-    fn __lasx_xvmuh_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvmuh_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmuh.d"]
-    fn __lasx_xvmuh_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvmuh_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmuh.bu"]
-    fn __lasx_xvmuh_bu(a: v32u8, b: v32u8) -> v32u8;
+    fn __lasx_xvmuh_bu(a: __v32u8, b: __v32u8) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvmuh.hu"]
-    fn __lasx_xvmuh_hu(a: v16u16, b: v16u16) -> v16u16;
+    fn __lasx_xvmuh_hu(a: __v16u16, b: __v16u16) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvmuh.wu"]
-    fn __lasx_xvmuh_wu(a: v8u32, b: v8u32) -> v8u32;
+    fn __lasx_xvmuh_wu(a: __v8u32, b: __v8u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvmuh.du"]
-    fn __lasx_xvmuh_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvmuh_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvsllwil.h.b"]
-    fn __lasx_xvsllwil_h_b(a: v32i8, b: u32) -> v16i16;
+    fn __lasx_xvsllwil_h_b(a: __v32i8, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsllwil.w.h"]
-    fn __lasx_xvsllwil_w_h(a: v16i16, b: u32) -> v8i32;
+    fn __lasx_xvsllwil_w_h(a: __v16i16, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsllwil.d.w"]
-    fn __lasx_xvsllwil_d_w(a: v8i32, b: u32) -> v4i64;
+    fn __lasx_xvsllwil_d_w(a: __v8i32, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsllwil.hu.bu"]
-    fn __lasx_xvsllwil_hu_bu(a: v32u8, b: u32) -> v16u16;
+    fn __lasx_xvsllwil_hu_bu(a: __v32u8, b: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvsllwil.wu.hu"]
-    fn __lasx_xvsllwil_wu_hu(a: v16u16, b: u32) -> v8u32;
+    fn __lasx_xvsllwil_wu_hu(a: __v16u16, b: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvsllwil.du.wu"]
-    fn __lasx_xvsllwil_du_wu(a: v8u32, b: u32) -> v4u64;
+    fn __lasx_xvsllwil_du_wu(a: __v8u32, b: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvsran.b.h"]
-    fn __lasx_xvsran_b_h(a: v16i16, b: v16i16) -> v32i8;
+    fn __lasx_xvsran_b_h(a: __v16i16, b: __v16i16) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsran.h.w"]
-    fn __lasx_xvsran_h_w(a: v8i32, b: v8i32) -> v16i16;
+    fn __lasx_xvsran_h_w(a: __v8i32, b: __v8i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsran.w.d"]
-    fn __lasx_xvsran_w_d(a: v4i64, b: v4i64) -> v8i32;
+    fn __lasx_xvsran_w_d(a: __v4i64, b: __v4i64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssran.b.h"]
-    fn __lasx_xvssran_b_h(a: v16i16, b: v16i16) -> v32i8;
+    fn __lasx_xvssran_b_h(a: __v16i16, b: __v16i16) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvssran.h.w"]
-    fn __lasx_xvssran_h_w(a: v8i32, b: v8i32) -> v16i16;
+    fn __lasx_xvssran_h_w(a: __v8i32, b: __v8i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvssran.w.d"]
-    fn __lasx_xvssran_w_d(a: v4i64, b: v4i64) -> v8i32;
+    fn __lasx_xvssran_w_d(a: __v4i64, b: __v4i64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssran.bu.h"]
-    fn __lasx_xvssran_bu_h(a: v16u16, b: v16u16) -> v32u8;
+    fn __lasx_xvssran_bu_h(a: __v16u16, b: __v16u16) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvssran.hu.w"]
-    fn __lasx_xvssran_hu_w(a: v8u32, b: v8u32) -> v16u16;
+    fn __lasx_xvssran_hu_w(a: __v8u32, b: __v8u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvssran.wu.d"]
-    fn __lasx_xvssran_wu_d(a: v4u64, b: v4u64) -> v8u32;
+    fn __lasx_xvssran_wu_d(a: __v4u64, b: __v4u64) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvsrarn.b.h"]
-    fn __lasx_xvsrarn_b_h(a: v16i16, b: v16i16) -> v32i8;
+    fn __lasx_xvsrarn_b_h(a: __v16i16, b: __v16i16) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrarn.h.w"]
-    fn __lasx_xvsrarn_h_w(a: v8i32, b: v8i32) -> v16i16;
+    fn __lasx_xvsrarn_h_w(a: __v8i32, b: __v8i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrarn.w.d"]
-    fn __lasx_xvsrarn_w_d(a: v4i64, b: v4i64) -> v8i32;
+    fn __lasx_xvsrarn_w_d(a: __v4i64, b: __v4i64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssrarn.b.h"]
-    fn __lasx_xvssrarn_b_h(a: v16i16, b: v16i16) -> v32i8;
+    fn __lasx_xvssrarn_b_h(a: __v16i16, b: __v16i16) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvssrarn.h.w"]
-    fn __lasx_xvssrarn_h_w(a: v8i32, b: v8i32) -> v16i16;
+    fn __lasx_xvssrarn_h_w(a: __v8i32, b: __v8i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvssrarn.w.d"]
-    fn __lasx_xvssrarn_w_d(a: v4i64, b: v4i64) -> v8i32;
+    fn __lasx_xvssrarn_w_d(a: __v4i64, b: __v4i64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssrarn.bu.h"]
-    fn __lasx_xvssrarn_bu_h(a: v16u16, b: v16u16) -> v32u8;
+    fn __lasx_xvssrarn_bu_h(a: __v16u16, b: __v16u16) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvssrarn.hu.w"]
-    fn __lasx_xvssrarn_hu_w(a: v8u32, b: v8u32) -> v16u16;
+    fn __lasx_xvssrarn_hu_w(a: __v8u32, b: __v8u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvssrarn.wu.d"]
-    fn __lasx_xvssrarn_wu_d(a: v4u64, b: v4u64) -> v8u32;
+    fn __lasx_xvssrarn_wu_d(a: __v4u64, b: __v4u64) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvsrln.b.h"]
-    fn __lasx_xvsrln_b_h(a: v16i16, b: v16i16) -> v32i8;
+    fn __lasx_xvsrln_b_h(a: __v16i16, b: __v16i16) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrln.h.w"]
-    fn __lasx_xvsrln_h_w(a: v8i32, b: v8i32) -> v16i16;
+    fn __lasx_xvsrln_h_w(a: __v8i32, b: __v8i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrln.w.d"]
-    fn __lasx_xvsrln_w_d(a: v4i64, b: v4i64) -> v8i32;
+    fn __lasx_xvsrln_w_d(a: __v4i64, b: __v4i64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssrln.bu.h"]
-    fn __lasx_xvssrln_bu_h(a: v16u16, b: v16u16) -> v32u8;
+    fn __lasx_xvssrln_bu_h(a: __v16u16, b: __v16u16) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvssrln.hu.w"]
-    fn __lasx_xvssrln_hu_w(a: v8u32, b: v8u32) -> v16u16;
+    fn __lasx_xvssrln_hu_w(a: __v8u32, b: __v8u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvssrln.wu.d"]
-    fn __lasx_xvssrln_wu_d(a: v4u64, b: v4u64) -> v8u32;
+    fn __lasx_xvssrln_wu_d(a: __v4u64, b: __v4u64) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvsrlrn.b.h"]
-    fn __lasx_xvsrlrn_b_h(a: v16i16, b: v16i16) -> v32i8;
+    fn __lasx_xvsrlrn_b_h(a: __v16i16, b: __v16i16) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrlrn.h.w"]
-    fn __lasx_xvsrlrn_h_w(a: v8i32, b: v8i32) -> v16i16;
+    fn __lasx_xvsrlrn_h_w(a: __v8i32, b: __v8i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrlrn.w.d"]
-    fn __lasx_xvsrlrn_w_d(a: v4i64, b: v4i64) -> v8i32;
+    fn __lasx_xvsrlrn_w_d(a: __v4i64, b: __v4i64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssrlrn.bu.h"]
-    fn __lasx_xvssrlrn_bu_h(a: v16u16, b: v16u16) -> v32u8;
+    fn __lasx_xvssrlrn_bu_h(a: __v16u16, b: __v16u16) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvssrlrn.hu.w"]
-    fn __lasx_xvssrlrn_hu_w(a: v8u32, b: v8u32) -> v16u16;
+    fn __lasx_xvssrlrn_hu_w(a: __v8u32, b: __v8u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvssrlrn.wu.d"]
-    fn __lasx_xvssrlrn_wu_d(a: v4u64, b: v4u64) -> v8u32;
+    fn __lasx_xvssrlrn_wu_d(a: __v4u64, b: __v4u64) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvfrstpi.b"]
-    fn __lasx_xvfrstpi_b(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvfrstpi_b(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvfrstpi.h"]
-    fn __lasx_xvfrstpi_h(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvfrstpi_h(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvfrstp.b"]
-    fn __lasx_xvfrstp_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8;
+    fn __lasx_xvfrstp_b(a: __v32i8, b: __v32i8, c: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvfrstp.h"]
-    fn __lasx_xvfrstp_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16;
+    fn __lasx_xvfrstp_h(a: __v16i16, b: __v16i16, c: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvshuf4i.d"]
-    fn __lasx_xvshuf4i_d(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvshuf4i_d(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvbsrl.v"]
-    fn __lasx_xvbsrl_v(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvbsrl_v(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvbsll.v"]
-    fn __lasx_xvbsll_v(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvbsll_v(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvextrins.b"]
-    fn __lasx_xvextrins_b(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvextrins_b(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvextrins.h"]
-    fn __lasx_xvextrins_h(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvextrins_h(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvextrins.w"]
-    fn __lasx_xvextrins_w(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvextrins_w(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvextrins.d"]
-    fn __lasx_xvextrins_d(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvextrins_d(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmskltz.b"]
-    fn __lasx_xvmskltz_b(a: v32i8) -> v32i8;
+    fn __lasx_xvmskltz_b(a: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmskltz.h"]
-    fn __lasx_xvmskltz_h(a: v16i16) -> v16i16;
+    fn __lasx_xvmskltz_h(a: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmskltz.w"]
-    fn __lasx_xvmskltz_w(a: v8i32) -> v8i32;
+    fn __lasx_xvmskltz_w(a: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmskltz.d"]
-    fn __lasx_xvmskltz_d(a: v4i64) -> v4i64;
+    fn __lasx_xvmskltz_d(a: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsigncov.b"]
-    fn __lasx_xvsigncov_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvsigncov_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsigncov.h"]
-    fn __lasx_xvsigncov_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvsigncov_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsigncov.w"]
-    fn __lasx_xvsigncov_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvsigncov_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsigncov.d"]
-    fn __lasx_xvsigncov_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsigncov_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfmadd.s"]
-    fn __lasx_xvfmadd_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32;
+    fn __lasx_xvfmadd_s(a: __v8f32, b: __v8f32, c: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfmadd.d"]
-    fn __lasx_xvfmadd_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64;
+    fn __lasx_xvfmadd_d(a: __v4f64, b: __v4f64, c: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfmsub.s"]
-    fn __lasx_xvfmsub_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32;
+    fn __lasx_xvfmsub_s(a: __v8f32, b: __v8f32, c: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfmsub.d"]
-    fn __lasx_xvfmsub_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64;
+    fn __lasx_xvfmsub_d(a: __v4f64, b: __v4f64, c: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfnmadd.s"]
-    fn __lasx_xvfnmadd_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32;
+    fn __lasx_xvfnmadd_s(a: __v8f32, b: __v8f32, c: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfnmadd.d"]
-    fn __lasx_xvfnmadd_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64;
+    fn __lasx_xvfnmadd_d(a: __v4f64, b: __v4f64, c: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfnmsub.s"]
-    fn __lasx_xvfnmsub_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32;
+    fn __lasx_xvfnmsub_s(a: __v8f32, b: __v8f32, c: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfnmsub.d"]
-    fn __lasx_xvfnmsub_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64;
+    fn __lasx_xvfnmsub_d(a: __v4f64, b: __v4f64, c: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvftintrne.w.s"]
-    fn __lasx_xvftintrne_w_s(a: v8f32) -> v8i32;
+    fn __lasx_xvftintrne_w_s(a: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvftintrne.l.d"]
-    fn __lasx_xvftintrne_l_d(a: v4f64) -> v4i64;
+    fn __lasx_xvftintrne_l_d(a: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrp.w.s"]
-    fn __lasx_xvftintrp_w_s(a: v8f32) -> v8i32;
+    fn __lasx_xvftintrp_w_s(a: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvftintrp.l.d"]
-    fn __lasx_xvftintrp_l_d(a: v4f64) -> v4i64;
+    fn __lasx_xvftintrp_l_d(a: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrm.w.s"]
-    fn __lasx_xvftintrm_w_s(a: v8f32) -> v8i32;
+    fn __lasx_xvftintrm_w_s(a: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvftintrm.l.d"]
-    fn __lasx_xvftintrm_l_d(a: v4f64) -> v4i64;
+    fn __lasx_xvftintrm_l_d(a: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftint.w.d"]
-    fn __lasx_xvftint_w_d(a: v4f64, b: v4f64) -> v8i32;
+    fn __lasx_xvftint_w_d(a: __v4f64, b: __v4f64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvffint.s.l"]
-    fn __lasx_xvffint_s_l(a: v4i64, b: v4i64) -> v8f32;
+    fn __lasx_xvffint_s_l(a: __v4i64, b: __v4i64) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvftintrz.w.d"]
-    fn __lasx_xvftintrz_w_d(a: v4f64, b: v4f64) -> v8i32;
+    fn __lasx_xvftintrz_w_d(a: __v4f64, b: __v4f64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvftintrp.w.d"]
-    fn __lasx_xvftintrp_w_d(a: v4f64, b: v4f64) -> v8i32;
+    fn __lasx_xvftintrp_w_d(a: __v4f64, b: __v4f64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvftintrm.w.d"]
-    fn __lasx_xvftintrm_w_d(a: v4f64, b: v4f64) -> v8i32;
+    fn __lasx_xvftintrm_w_d(a: __v4f64, b: __v4f64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvftintrne.w.d"]
-    fn __lasx_xvftintrne_w_d(a: v4f64, b: v4f64) -> v8i32;
+    fn __lasx_xvftintrne_w_d(a: __v4f64, b: __v4f64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvftinth.l.s"]
-    fn __lasx_xvftinth_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftinth_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintl.l.s"]
-    fn __lasx_xvftintl_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftintl_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvffinth.d.w"]
-    fn __lasx_xvffinth_d_w(a: v8i32) -> v4f64;
+    fn __lasx_xvffinth_d_w(a: __v8i32) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvffintl.d.w"]
-    fn __lasx_xvffintl_d_w(a: v8i32) -> v4f64;
+    fn __lasx_xvffintl_d_w(a: __v8i32) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvftintrzh.l.s"]
-    fn __lasx_xvftintrzh_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftintrzh_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrzl.l.s"]
-    fn __lasx_xvftintrzl_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftintrzl_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrph.l.s"]
-    fn __lasx_xvftintrph_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftintrph_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrpl.l.s"]
-    fn __lasx_xvftintrpl_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftintrpl_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrmh.l.s"]
-    fn __lasx_xvftintrmh_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftintrmh_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrml.l.s"]
-    fn __lasx_xvftintrml_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftintrml_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrneh.l.s"]
-    fn __lasx_xvftintrneh_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftintrneh_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvftintrnel.l.s"]
-    fn __lasx_xvftintrnel_l_s(a: v8f32) -> v4i64;
+    fn __lasx_xvftintrnel_l_s(a: __v8f32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfrintrne.s"]
-    fn __lasx_xvfrintrne_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfrintrne_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfrintrne.d"]
-    fn __lasx_xvfrintrne_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfrintrne_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfrintrz.s"]
-    fn __lasx_xvfrintrz_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfrintrz_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfrintrz.d"]
-    fn __lasx_xvfrintrz_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfrintrz_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfrintrp.s"]
-    fn __lasx_xvfrintrp_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfrintrp_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfrintrp.d"]
-    fn __lasx_xvfrintrp_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfrintrp_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvfrintrm.s"]
-    fn __lasx_xvfrintrm_s(a: v8f32) -> v8f32;
+    fn __lasx_xvfrintrm_s(a: __v8f32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvfrintrm.d"]
-    fn __lasx_xvfrintrm_d(a: v4f64) -> v4f64;
+    fn __lasx_xvfrintrm_d(a: __v4f64) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvld"]
-    fn __lasx_xvld(a: *const i8, b: i32) -> v32i8;
+    fn __lasx_xvld(a: *const i8, b: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvst"]
-    fn __lasx_xvst(a: v32i8, b: *mut i8, c: i32);
+    fn __lasx_xvst(a: __v32i8, b: *mut i8, c: i32);
     #[link_name = "llvm.loongarch.lasx.xvstelm.b"]
-    fn __lasx_xvstelm_b(a: v32i8, b: *mut i8, c: i32, d: u32);
+    fn __lasx_xvstelm_b(a: __v32i8, b: *mut i8, c: i32, d: u32);
     #[link_name = "llvm.loongarch.lasx.xvstelm.h"]
-    fn __lasx_xvstelm_h(a: v16i16, b: *mut i8, c: i32, d: u32);
+    fn __lasx_xvstelm_h(a: __v16i16, b: *mut i8, c: i32, d: u32);
     #[link_name = "llvm.loongarch.lasx.xvstelm.w"]
-    fn __lasx_xvstelm_w(a: v8i32, b: *mut i8, c: i32, d: u32);
+    fn __lasx_xvstelm_w(a: __v8i32, b: *mut i8, c: i32, d: u32);
     #[link_name = "llvm.loongarch.lasx.xvstelm.d"]
-    fn __lasx_xvstelm_d(a: v4i64, b: *mut i8, c: i32, d: u32);
+    fn __lasx_xvstelm_d(a: __v4i64, b: *mut i8, c: i32, d: u32);
     #[link_name = "llvm.loongarch.lasx.xvinsve0.w"]
-    fn __lasx_xvinsve0_w(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvinsve0_w(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvinsve0.d"]
-    fn __lasx_xvinsve0_d(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvinsve0_d(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvpickve.w"]
-    fn __lasx_xvpickve_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvpickve_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvpickve.d"]
-    fn __lasx_xvpickve_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvpickve_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvssrlrn.b.h"]
-    fn __lasx_xvssrlrn_b_h(a: v16i16, b: v16i16) -> v32i8;
+    fn __lasx_xvssrlrn_b_h(a: __v16i16, b: __v16i16) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvssrlrn.h.w"]
-    fn __lasx_xvssrlrn_h_w(a: v8i32, b: v8i32) -> v16i16;
+    fn __lasx_xvssrlrn_h_w(a: __v8i32, b: __v8i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvssrlrn.w.d"]
-    fn __lasx_xvssrlrn_w_d(a: v4i64, b: v4i64) -> v8i32;
+    fn __lasx_xvssrlrn_w_d(a: __v4i64, b: __v4i64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssrln.b.h"]
-    fn __lasx_xvssrln_b_h(a: v16i16, b: v16i16) -> v32i8;
+    fn __lasx_xvssrln_b_h(a: __v16i16, b: __v16i16) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvssrln.h.w"]
-    fn __lasx_xvssrln_h_w(a: v8i32, b: v8i32) -> v16i16;
+    fn __lasx_xvssrln_h_w(a: __v8i32, b: __v8i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvssrln.w.d"]
-    fn __lasx_xvssrln_w_d(a: v4i64, b: v4i64) -> v8i32;
+    fn __lasx_xvssrln_w_d(a: __v4i64, b: __v4i64) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvorn.v"]
-    fn __lasx_xvorn_v(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvorn_v(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvldi"]
-    fn __lasx_xvldi(a: i32) -> v4i64;
+    fn __lasx_xvldi(a: i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvldx"]
-    fn __lasx_xvldx(a: *const i8, b: i64) -> v32i8;
+    fn __lasx_xvldx(a: *const i8, b: i64) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvstx"]
-    fn __lasx_xvstx(a: v32i8, b: *mut i8, c: i64);
+    fn __lasx_xvstx(a: __v32i8, b: *mut i8, c: i64);
     #[link_name = "llvm.loongarch.lasx.xvextl.qu.du"]
-    fn __lasx_xvextl_qu_du(a: v4u64) -> v4u64;
+    fn __lasx_xvextl_qu_du(a: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvinsgr2vr.w"]
-    fn __lasx_xvinsgr2vr_w(a: v8i32, b: i32, c: u32) -> v8i32;
+    fn __lasx_xvinsgr2vr_w(a: __v8i32, b: i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvinsgr2vr.d"]
-    fn __lasx_xvinsgr2vr_d(a: v4i64, b: i64, c: u32) -> v4i64;
+    fn __lasx_xvinsgr2vr_d(a: __v4i64, b: i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvreplve0.b"]
-    fn __lasx_xvreplve0_b(a: v32i8) -> v32i8;
+    fn __lasx_xvreplve0_b(a: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvreplve0.h"]
-    fn __lasx_xvreplve0_h(a: v16i16) -> v16i16;
+    fn __lasx_xvreplve0_h(a: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvreplve0.w"]
-    fn __lasx_xvreplve0_w(a: v8i32) -> v8i32;
+    fn __lasx_xvreplve0_w(a: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvreplve0.d"]
-    fn __lasx_xvreplve0_d(a: v4i64) -> v4i64;
+    fn __lasx_xvreplve0_d(a: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvreplve0.q"]
-    fn __lasx_xvreplve0_q(a: v32i8) -> v32i8;
+    fn __lasx_xvreplve0_q(a: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.vext2xv.h.b"]
-    fn __lasx_vext2xv_h_b(a: v32i8) -> v16i16;
+    fn __lasx_vext2xv_h_b(a: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.vext2xv.w.h"]
-    fn __lasx_vext2xv_w_h(a: v16i16) -> v8i32;
+    fn __lasx_vext2xv_w_h(a: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.vext2xv.d.w"]
-    fn __lasx_vext2xv_d_w(a: v8i32) -> v4i64;
+    fn __lasx_vext2xv_d_w(a: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.vext2xv.w.b"]
-    fn __lasx_vext2xv_w_b(a: v32i8) -> v8i32;
+    fn __lasx_vext2xv_w_b(a: __v32i8) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.vext2xv.d.h"]
-    fn __lasx_vext2xv_d_h(a: v16i16) -> v4i64;
+    fn __lasx_vext2xv_d_h(a: __v16i16) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.vext2xv.d.b"]
-    fn __lasx_vext2xv_d_b(a: v32i8) -> v4i64;
+    fn __lasx_vext2xv_d_b(a: __v32i8) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.vext2xv.hu.bu"]
-    fn __lasx_vext2xv_hu_bu(a: v32i8) -> v16i16;
+    fn __lasx_vext2xv_hu_bu(a: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.vext2xv.wu.hu"]
-    fn __lasx_vext2xv_wu_hu(a: v16i16) -> v8i32;
+    fn __lasx_vext2xv_wu_hu(a: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.vext2xv.du.wu"]
-    fn __lasx_vext2xv_du_wu(a: v8i32) -> v4i64;
+    fn __lasx_vext2xv_du_wu(a: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.vext2xv.wu.bu"]
-    fn __lasx_vext2xv_wu_bu(a: v32i8) -> v8i32;
+    fn __lasx_vext2xv_wu_bu(a: __v32i8) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.vext2xv.du.hu"]
-    fn __lasx_vext2xv_du_hu(a: v16i16) -> v4i64;
+    fn __lasx_vext2xv_du_hu(a: __v16i16) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.vext2xv.du.bu"]
-    fn __lasx_vext2xv_du_bu(a: v32i8) -> v4i64;
+    fn __lasx_vext2xv_du_bu(a: __v32i8) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvpermi.q"]
-    fn __lasx_xvpermi_q(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvpermi_q(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvpermi.d"]
-    fn __lasx_xvpermi_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvpermi_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvperm.w"]
-    fn __lasx_xvperm_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvperm_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvldrepl.b"]
-    fn __lasx_xvldrepl_b(a: *const i8, b: i32) -> v32i8;
+    fn __lasx_xvldrepl_b(a: *const i8, b: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvldrepl.h"]
-    fn __lasx_xvldrepl_h(a: *const i8, b: i32) -> v16i16;
+    fn __lasx_xvldrepl_h(a: *const i8, b: i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvldrepl.w"]
-    fn __lasx_xvldrepl_w(a: *const i8, b: i32) -> v8i32;
+    fn __lasx_xvldrepl_w(a: *const i8, b: i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvldrepl.d"]
-    fn __lasx_xvldrepl_d(a: *const i8, b: i32) -> v4i64;
+    fn __lasx_xvldrepl_d(a: *const i8, b: i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvpickve2gr.w"]
-    fn __lasx_xvpickve2gr_w(a: v8i32, b: u32) -> i32;
+    fn __lasx_xvpickve2gr_w(a: __v8i32, b: u32) -> i32;
     #[link_name = "llvm.loongarch.lasx.xvpickve2gr.wu"]
-    fn __lasx_xvpickve2gr_wu(a: v8i32, b: u32) -> u32;
+    fn __lasx_xvpickve2gr_wu(a: __v8i32, b: u32) -> u32;
     #[link_name = "llvm.loongarch.lasx.xvpickve2gr.d"]
-    fn __lasx_xvpickve2gr_d(a: v4i64, b: u32) -> i64;
+    fn __lasx_xvpickve2gr_d(a: __v4i64, b: u32) -> i64;
     #[link_name = "llvm.loongarch.lasx.xvpickve2gr.du"]
-    fn __lasx_xvpickve2gr_du(a: v4i64, b: u32) -> u64;
+    fn __lasx_xvpickve2gr_du(a: __v4i64, b: u32) -> u64;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.q.d"]
-    fn __lasx_xvaddwev_q_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvaddwev_q_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.d.w"]
-    fn __lasx_xvaddwev_d_w(a: v8i32, b: v8i32) -> v4i64;
+    fn __lasx_xvaddwev_d_w(a: __v8i32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.w.h"]
-    fn __lasx_xvaddwev_w_h(a: v16i16, b: v16i16) -> v8i32;
+    fn __lasx_xvaddwev_w_h(a: __v16i16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.h.b"]
-    fn __lasx_xvaddwev_h_b(a: v32i8, b: v32i8) -> v16i16;
+    fn __lasx_xvaddwev_h_b(a: __v32i8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.q.du"]
-    fn __lasx_xvaddwev_q_du(a: v4u64, b: v4u64) -> v4i64;
+    fn __lasx_xvaddwev_q_du(a: __v4u64, b: __v4u64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.d.wu"]
-    fn __lasx_xvaddwev_d_wu(a: v8u32, b: v8u32) -> v4i64;
+    fn __lasx_xvaddwev_d_wu(a: __v8u32, b: __v8u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.w.hu"]
-    fn __lasx_xvaddwev_w_hu(a: v16u16, b: v16u16) -> v8i32;
+    fn __lasx_xvaddwev_w_hu(a: __v16u16, b: __v16u16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.h.bu"]
-    fn __lasx_xvaddwev_h_bu(a: v32u8, b: v32u8) -> v16i16;
+    fn __lasx_xvaddwev_h_bu(a: __v32u8, b: __v32u8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsubwev.q.d"]
-    fn __lasx_xvsubwev_q_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsubwev_q_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsubwev.d.w"]
-    fn __lasx_xvsubwev_d_w(a: v8i32, b: v8i32) -> v4i64;
+    fn __lasx_xvsubwev_d_w(a: __v8i32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsubwev.w.h"]
-    fn __lasx_xvsubwev_w_h(a: v16i16, b: v16i16) -> v8i32;
+    fn __lasx_xvsubwev_w_h(a: __v16i16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsubwev.h.b"]
-    fn __lasx_xvsubwev_h_b(a: v32i8, b: v32i8) -> v16i16;
+    fn __lasx_xvsubwev_h_b(a: __v32i8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsubwev.q.du"]
-    fn __lasx_xvsubwev_q_du(a: v4u64, b: v4u64) -> v4i64;
+    fn __lasx_xvsubwev_q_du(a: __v4u64, b: __v4u64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsubwev.d.wu"]
-    fn __lasx_xvsubwev_d_wu(a: v8u32, b: v8u32) -> v4i64;
+    fn __lasx_xvsubwev_d_wu(a: __v8u32, b: __v8u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsubwev.w.hu"]
-    fn __lasx_xvsubwev_w_hu(a: v16u16, b: v16u16) -> v8i32;
+    fn __lasx_xvsubwev_w_hu(a: __v16u16, b: __v16u16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsubwev.h.bu"]
-    fn __lasx_xvsubwev_h_bu(a: v32u8, b: v32u8) -> v16i16;
+    fn __lasx_xvsubwev_h_bu(a: __v32u8, b: __v32u8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.q.d"]
-    fn __lasx_xvmulwev_q_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvmulwev_q_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.d.w"]
-    fn __lasx_xvmulwev_d_w(a: v8i32, b: v8i32) -> v4i64;
+    fn __lasx_xvmulwev_d_w(a: __v8i32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.w.h"]
-    fn __lasx_xvmulwev_w_h(a: v16i16, b: v16i16) -> v8i32;
+    fn __lasx_xvmulwev_w_h(a: __v16i16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.h.b"]
-    fn __lasx_xvmulwev_h_b(a: v32i8, b: v32i8) -> v16i16;
+    fn __lasx_xvmulwev_h_b(a: __v32i8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.q.du"]
-    fn __lasx_xvmulwev_q_du(a: v4u64, b: v4u64) -> v4i64;
+    fn __lasx_xvmulwev_q_du(a: __v4u64, b: __v4u64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.d.wu"]
-    fn __lasx_xvmulwev_d_wu(a: v8u32, b: v8u32) -> v4i64;
+    fn __lasx_xvmulwev_d_wu(a: __v8u32, b: __v8u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.w.hu"]
-    fn __lasx_xvmulwev_w_hu(a: v16u16, b: v16u16) -> v8i32;
+    fn __lasx_xvmulwev_w_hu(a: __v16u16, b: __v16u16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.h.bu"]
-    fn __lasx_xvmulwev_h_bu(a: v32u8, b: v32u8) -> v16i16;
+    fn __lasx_xvmulwev_h_bu(a: __v32u8, b: __v32u8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.q.d"]
-    fn __lasx_xvaddwod_q_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvaddwod_q_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.d.w"]
-    fn __lasx_xvaddwod_d_w(a: v8i32, b: v8i32) -> v4i64;
+    fn __lasx_xvaddwod_d_w(a: __v8i32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.w.h"]
-    fn __lasx_xvaddwod_w_h(a: v16i16, b: v16i16) -> v8i32;
+    fn __lasx_xvaddwod_w_h(a: __v16i16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.h.b"]
-    fn __lasx_xvaddwod_h_b(a: v32i8, b: v32i8) -> v16i16;
+    fn __lasx_xvaddwod_h_b(a: __v32i8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.q.du"]
-    fn __lasx_xvaddwod_q_du(a: v4u64, b: v4u64) -> v4i64;
+    fn __lasx_xvaddwod_q_du(a: __v4u64, b: __v4u64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.d.wu"]
-    fn __lasx_xvaddwod_d_wu(a: v8u32, b: v8u32) -> v4i64;
+    fn __lasx_xvaddwod_d_wu(a: __v8u32, b: __v8u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.w.hu"]
-    fn __lasx_xvaddwod_w_hu(a: v16u16, b: v16u16) -> v8i32;
+    fn __lasx_xvaddwod_w_hu(a: __v16u16, b: __v16u16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.h.bu"]
-    fn __lasx_xvaddwod_h_bu(a: v32u8, b: v32u8) -> v16i16;
+    fn __lasx_xvaddwod_h_bu(a: __v32u8, b: __v32u8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsubwod.q.d"]
-    fn __lasx_xvsubwod_q_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsubwod_q_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsubwod.d.w"]
-    fn __lasx_xvsubwod_d_w(a: v8i32, b: v8i32) -> v4i64;
+    fn __lasx_xvsubwod_d_w(a: __v8i32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsubwod.w.h"]
-    fn __lasx_xvsubwod_w_h(a: v16i16, b: v16i16) -> v8i32;
+    fn __lasx_xvsubwod_w_h(a: __v16i16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsubwod.h.b"]
-    fn __lasx_xvsubwod_h_b(a: v32i8, b: v32i8) -> v16i16;
+    fn __lasx_xvsubwod_h_b(a: __v32i8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsubwod.q.du"]
-    fn __lasx_xvsubwod_q_du(a: v4u64, b: v4u64) -> v4i64;
+    fn __lasx_xvsubwod_q_du(a: __v4u64, b: __v4u64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsubwod.d.wu"]
-    fn __lasx_xvsubwod_d_wu(a: v8u32, b: v8u32) -> v4i64;
+    fn __lasx_xvsubwod_d_wu(a: __v8u32, b: __v8u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsubwod.w.hu"]
-    fn __lasx_xvsubwod_w_hu(a: v16u16, b: v16u16) -> v8i32;
+    fn __lasx_xvsubwod_w_hu(a: __v16u16, b: __v16u16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsubwod.h.bu"]
-    fn __lasx_xvsubwod_h_bu(a: v32u8, b: v32u8) -> v16i16;
+    fn __lasx_xvsubwod_h_bu(a: __v32u8, b: __v32u8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.q.d"]
-    fn __lasx_xvmulwod_q_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvmulwod_q_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.d.w"]
-    fn __lasx_xvmulwod_d_w(a: v8i32, b: v8i32) -> v4i64;
+    fn __lasx_xvmulwod_d_w(a: __v8i32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.w.h"]
-    fn __lasx_xvmulwod_w_h(a: v16i16, b: v16i16) -> v8i32;
+    fn __lasx_xvmulwod_w_h(a: __v16i16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.h.b"]
-    fn __lasx_xvmulwod_h_b(a: v32i8, b: v32i8) -> v16i16;
+    fn __lasx_xvmulwod_h_b(a: __v32i8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.q.du"]
-    fn __lasx_xvmulwod_q_du(a: v4u64, b: v4u64) -> v4i64;
+    fn __lasx_xvmulwod_q_du(a: __v4u64, b: __v4u64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.d.wu"]
-    fn __lasx_xvmulwod_d_wu(a: v8u32, b: v8u32) -> v4i64;
+    fn __lasx_xvmulwod_d_wu(a: __v8u32, b: __v8u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.w.hu"]
-    fn __lasx_xvmulwod_w_hu(a: v16u16, b: v16u16) -> v8i32;
+    fn __lasx_xvmulwod_w_hu(a: __v16u16, b: __v16u16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.h.bu"]
-    fn __lasx_xvmulwod_h_bu(a: v32u8, b: v32u8) -> v16i16;
+    fn __lasx_xvmulwod_h_bu(a: __v32u8, b: __v32u8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.d.wu.w"]
-    fn __lasx_xvaddwev_d_wu_w(a: v8u32, b: v8i32) -> v4i64;
+    fn __lasx_xvaddwev_d_wu_w(a: __v8u32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.w.hu.h"]
-    fn __lasx_xvaddwev_w_hu_h(a: v16u16, b: v16i16) -> v8i32;
+    fn __lasx_xvaddwev_w_hu_h(a: __v16u16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.h.bu.b"]
-    fn __lasx_xvaddwev_h_bu_b(a: v32u8, b: v32i8) -> v16i16;
+    fn __lasx_xvaddwev_h_bu_b(a: __v32u8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.d.wu.w"]
-    fn __lasx_xvmulwev_d_wu_w(a: v8u32, b: v8i32) -> v4i64;
+    fn __lasx_xvmulwev_d_wu_w(a: __v8u32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.w.hu.h"]
-    fn __lasx_xvmulwev_w_hu_h(a: v16u16, b: v16i16) -> v8i32;
+    fn __lasx_xvmulwev_w_hu_h(a: __v16u16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.h.bu.b"]
-    fn __lasx_xvmulwev_h_bu_b(a: v32u8, b: v32i8) -> v16i16;
+    fn __lasx_xvmulwev_h_bu_b(a: __v32u8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.d.wu.w"]
-    fn __lasx_xvaddwod_d_wu_w(a: v8u32, b: v8i32) -> v4i64;
+    fn __lasx_xvaddwod_d_wu_w(a: __v8u32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.w.hu.h"]
-    fn __lasx_xvaddwod_w_hu_h(a: v16u16, b: v16i16) -> v8i32;
+    fn __lasx_xvaddwod_w_hu_h(a: __v16u16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.h.bu.b"]
-    fn __lasx_xvaddwod_h_bu_b(a: v32u8, b: v32i8) -> v16i16;
+    fn __lasx_xvaddwod_h_bu_b(a: __v32u8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.d.wu.w"]
-    fn __lasx_xvmulwod_d_wu_w(a: v8u32, b: v8i32) -> v4i64;
+    fn __lasx_xvmulwod_d_wu_w(a: __v8u32, b: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.w.hu.h"]
-    fn __lasx_xvmulwod_w_hu_h(a: v16u16, b: v16i16) -> v8i32;
+    fn __lasx_xvmulwod_w_hu_h(a: __v16u16, b: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.h.bu.b"]
-    fn __lasx_xvmulwod_h_bu_b(a: v32u8, b: v32i8) -> v16i16;
+    fn __lasx_xvmulwod_h_bu_b(a: __v32u8, b: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvhaddw.q.d"]
-    fn __lasx_xvhaddw_q_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvhaddw_q_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvhaddw.qu.du"]
-    fn __lasx_xvhaddw_qu_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvhaddw_qu_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvhsubw.q.d"]
-    fn __lasx_xvhsubw_q_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvhsubw_q_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvhsubw.qu.du"]
-    fn __lasx_xvhsubw_qu_du(a: v4u64, b: v4u64) -> v4u64;
+    fn __lasx_xvhsubw_qu_du(a: __v4u64, b: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.q.d"]
-    fn __lasx_xvmaddwev_q_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64;
+    fn __lasx_xvmaddwev_q_d(a: __v4i64, b: __v4i64, c: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.d.w"]
-    fn __lasx_xvmaddwev_d_w(a: v4i64, b: v8i32, c: v8i32) -> v4i64;
+    fn __lasx_xvmaddwev_d_w(a: __v4i64, b: __v8i32, c: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.w.h"]
-    fn __lasx_xvmaddwev_w_h(a: v8i32, b: v16i16, c: v16i16) -> v8i32;
+    fn __lasx_xvmaddwev_w_h(a: __v8i32, b: __v16i16, c: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.h.b"]
-    fn __lasx_xvmaddwev_h_b(a: v16i16, b: v32i8, c: v32i8) -> v16i16;
+    fn __lasx_xvmaddwev_h_b(a: __v16i16, b: __v32i8, c: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.q.du"]
-    fn __lasx_xvmaddwev_q_du(a: v4u64, b: v4u64, c: v4u64) -> v4u64;
+    fn __lasx_xvmaddwev_q_du(a: __v4u64, b: __v4u64, c: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.d.wu"]
-    fn __lasx_xvmaddwev_d_wu(a: v4u64, b: v8u32, c: v8u32) -> v4u64;
+    fn __lasx_xvmaddwev_d_wu(a: __v4u64, b: __v8u32, c: __v8u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.w.hu"]
-    fn __lasx_xvmaddwev_w_hu(a: v8u32, b: v16u16, c: v16u16) -> v8u32;
+    fn __lasx_xvmaddwev_w_hu(a: __v8u32, b: __v16u16, c: __v16u16) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.h.bu"]
-    fn __lasx_xvmaddwev_h_bu(a: v16u16, b: v32u8, c: v32u8) -> v16u16;
+    fn __lasx_xvmaddwev_h_bu(a: __v16u16, b: __v32u8, c: __v32u8) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.q.d"]
-    fn __lasx_xvmaddwod_q_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64;
+    fn __lasx_xvmaddwod_q_d(a: __v4i64, b: __v4i64, c: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.d.w"]
-    fn __lasx_xvmaddwod_d_w(a: v4i64, b: v8i32, c: v8i32) -> v4i64;
+    fn __lasx_xvmaddwod_d_w(a: __v4i64, b: __v8i32, c: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.w.h"]
-    fn __lasx_xvmaddwod_w_h(a: v8i32, b: v16i16, c: v16i16) -> v8i32;
+    fn __lasx_xvmaddwod_w_h(a: __v8i32, b: __v16i16, c: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.h.b"]
-    fn __lasx_xvmaddwod_h_b(a: v16i16, b: v32i8, c: v32i8) -> v16i16;
+    fn __lasx_xvmaddwod_h_b(a: __v16i16, b: __v32i8, c: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.q.du"]
-    fn __lasx_xvmaddwod_q_du(a: v4u64, b: v4u64, c: v4u64) -> v4u64;
+    fn __lasx_xvmaddwod_q_du(a: __v4u64, b: __v4u64, c: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.d.wu"]
-    fn __lasx_xvmaddwod_d_wu(a: v4u64, b: v8u32, c: v8u32) -> v4u64;
+    fn __lasx_xvmaddwod_d_wu(a: __v4u64, b: __v8u32, c: __v8u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.w.hu"]
-    fn __lasx_xvmaddwod_w_hu(a: v8u32, b: v16u16, c: v16u16) -> v8u32;
+    fn __lasx_xvmaddwod_w_hu(a: __v8u32, b: __v16u16, c: __v16u16) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.h.bu"]
-    fn __lasx_xvmaddwod_h_bu(a: v16u16, b: v32u8, c: v32u8) -> v16u16;
+    fn __lasx_xvmaddwod_h_bu(a: __v16u16, b: __v32u8, c: __v32u8) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.q.du.d"]
-    fn __lasx_xvmaddwev_q_du_d(a: v4i64, b: v4u64, c: v4i64) -> v4i64;
+    fn __lasx_xvmaddwev_q_du_d(a: __v4i64, b: __v4u64, c: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.d.wu.w"]
-    fn __lasx_xvmaddwev_d_wu_w(a: v4i64, b: v8u32, c: v8i32) -> v4i64;
+    fn __lasx_xvmaddwev_d_wu_w(a: __v4i64, b: __v8u32, c: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.w.hu.h"]
-    fn __lasx_xvmaddwev_w_hu_h(a: v8i32, b: v16u16, c: v16i16) -> v8i32;
+    fn __lasx_xvmaddwev_w_hu_h(a: __v8i32, b: __v16u16, c: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmaddwev.h.bu.b"]
-    fn __lasx_xvmaddwev_h_bu_b(a: v16i16, b: v32u8, c: v32i8) -> v16i16;
+    fn __lasx_xvmaddwev_h_bu_b(a: __v16i16, b: __v32u8, c: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.q.du.d"]
-    fn __lasx_xvmaddwod_q_du_d(a: v4i64, b: v4u64, c: v4i64) -> v4i64;
+    fn __lasx_xvmaddwod_q_du_d(a: __v4i64, b: __v4u64, c: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.d.wu.w"]
-    fn __lasx_xvmaddwod_d_wu_w(a: v4i64, b: v8u32, c: v8i32) -> v4i64;
+    fn __lasx_xvmaddwod_d_wu_w(a: __v4i64, b: __v8u32, c: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.w.hu.h"]
-    fn __lasx_xvmaddwod_w_hu_h(a: v8i32, b: v16u16, c: v16i16) -> v8i32;
+    fn __lasx_xvmaddwod_w_hu_h(a: __v8i32, b: __v16u16, c: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvmaddwod.h.bu.b"]
-    fn __lasx_xvmaddwod_h_bu_b(a: v16i16, b: v32u8, c: v32i8) -> v16i16;
+    fn __lasx_xvmaddwod_h_bu_b(a: __v16i16, b: __v32u8, c: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvrotr.b"]
-    fn __lasx_xvrotr_b(a: v32i8, b: v32i8) -> v32i8;
+    fn __lasx_xvrotr_b(a: __v32i8, b: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvrotr.h"]
-    fn __lasx_xvrotr_h(a: v16i16, b: v16i16) -> v16i16;
+    fn __lasx_xvrotr_h(a: __v16i16, b: __v16i16) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvrotr.w"]
-    fn __lasx_xvrotr_w(a: v8i32, b: v8i32) -> v8i32;
+    fn __lasx_xvrotr_w(a: __v8i32, b: __v8i32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvrotr.d"]
-    fn __lasx_xvrotr_d(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvrotr_d(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvadd.q"]
-    fn __lasx_xvadd_q(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvadd_q(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsub.q"]
-    fn __lasx_xvsub_q(a: v4i64, b: v4i64) -> v4i64;
+    fn __lasx_xvsub_q(a: __v4i64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwev.q.du.d"]
-    fn __lasx_xvaddwev_q_du_d(a: v4u64, b: v4i64) -> v4i64;
+    fn __lasx_xvaddwev_q_du_d(a: __v4u64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvaddwod.q.du.d"]
-    fn __lasx_xvaddwod_q_du_d(a: v4u64, b: v4i64) -> v4i64;
+    fn __lasx_xvaddwod_q_du_d(a: __v4u64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwev.q.du.d"]
-    fn __lasx_xvmulwev_q_du_d(a: v4u64, b: v4i64) -> v4i64;
+    fn __lasx_xvmulwev_q_du_d(a: __v4u64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmulwod.q.du.d"]
-    fn __lasx_xvmulwod_q_du_d(a: v4u64, b: v4i64) -> v4i64;
+    fn __lasx_xvmulwod_q_du_d(a: __v4u64, b: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvmskgez.b"]
-    fn __lasx_xvmskgez_b(a: v32i8) -> v32i8;
+    fn __lasx_xvmskgez_b(a: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvmsknz.b"]
-    fn __lasx_xvmsknz_b(a: v32i8) -> v32i8;
+    fn __lasx_xvmsknz_b(a: __v32i8) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvexth.h.b"]
-    fn __lasx_xvexth_h_b(a: v32i8) -> v16i16;
+    fn __lasx_xvexth_h_b(a: __v32i8) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvexth.w.h"]
-    fn __lasx_xvexth_w_h(a: v16i16) -> v8i32;
+    fn __lasx_xvexth_w_h(a: __v16i16) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvexth.d.w"]
-    fn __lasx_xvexth_d_w(a: v8i32) -> v4i64;
+    fn __lasx_xvexth_d_w(a: __v8i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvexth.q.d"]
-    fn __lasx_xvexth_q_d(a: v4i64) -> v4i64;
+    fn __lasx_xvexth_q_d(a: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvexth.hu.bu"]
-    fn __lasx_xvexth_hu_bu(a: v32u8) -> v16u16;
+    fn __lasx_xvexth_hu_bu(a: __v32u8) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvexth.wu.hu"]
-    fn __lasx_xvexth_wu_hu(a: v16u16) -> v8u32;
+    fn __lasx_xvexth_wu_hu(a: __v16u16) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvexth.du.wu"]
-    fn __lasx_xvexth_du_wu(a: v8u32) -> v4u64;
+    fn __lasx_xvexth_du_wu(a: __v8u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvexth.qu.du"]
-    fn __lasx_xvexth_qu_du(a: v4u64) -> v4u64;
+    fn __lasx_xvexth_qu_du(a: __v4u64) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvrotri.b"]
-    fn __lasx_xvrotri_b(a: v32i8, b: u32) -> v32i8;
+    fn __lasx_xvrotri_b(a: __v32i8, b: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvrotri.h"]
-    fn __lasx_xvrotri_h(a: v16i16, b: u32) -> v16i16;
+    fn __lasx_xvrotri_h(a: __v16i16, b: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvrotri.w"]
-    fn __lasx_xvrotri_w(a: v8i32, b: u32) -> v8i32;
+    fn __lasx_xvrotri_w(a: __v8i32, b: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvrotri.d"]
-    fn __lasx_xvrotri_d(a: v4i64, b: u32) -> v4i64;
+    fn __lasx_xvrotri_d(a: __v4i64, b: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvextl.q.d"]
-    fn __lasx_xvextl_q_d(a: v4i64) -> v4i64;
+    fn __lasx_xvextl_q_d(a: __v4i64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrlni.b.h"]
-    fn __lasx_xvsrlni_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvsrlni_b_h(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrlni.h.w"]
-    fn __lasx_xvsrlni_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvsrlni_h_w(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrlni.w.d"]
-    fn __lasx_xvsrlni_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvsrlni_w_d(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrlni.d.q"]
-    fn __lasx_xvsrlni_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvsrlni_d_q(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrlrni.b.h"]
-    fn __lasx_xvsrlrni_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvsrlrni_b_h(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrlrni.h.w"]
-    fn __lasx_xvsrlrni_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvsrlrni_h_w(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrlrni.w.d"]
-    fn __lasx_xvsrlrni_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvsrlrni_w_d(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrlrni.d.q"]
-    fn __lasx_xvsrlrni_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvsrlrni_d_q(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvssrlni.b.h"]
-    fn __lasx_xvssrlni_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvssrlni_b_h(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvssrlni.h.w"]
-    fn __lasx_xvssrlni_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvssrlni_h_w(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvssrlni.w.d"]
-    fn __lasx_xvssrlni_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvssrlni_w_d(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssrlni.d.q"]
-    fn __lasx_xvssrlni_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvssrlni_d_q(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvssrlni.bu.h"]
-    fn __lasx_xvssrlni_bu_h(a: v32u8, b: v32i8, c: u32) -> v32u8;
+    fn __lasx_xvssrlni_bu_h(a: __v32u8, b: __v32i8, c: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvssrlni.hu.w"]
-    fn __lasx_xvssrlni_hu_w(a: v16u16, b: v16i16, c: u32) -> v16u16;
+    fn __lasx_xvssrlni_hu_w(a: __v16u16, b: __v16i16, c: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvssrlni.wu.d"]
-    fn __lasx_xvssrlni_wu_d(a: v8u32, b: v8i32, c: u32) -> v8u32;
+    fn __lasx_xvssrlni_wu_d(a: __v8u32, b: __v8i32, c: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvssrlni.du.q"]
-    fn __lasx_xvssrlni_du_q(a: v4u64, b: v4i64, c: u32) -> v4u64;
+    fn __lasx_xvssrlni_du_q(a: __v4u64, b: __v4i64, c: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvssrlrni.b.h"]
-    fn __lasx_xvssrlrni_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvssrlrni_b_h(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvssrlrni.h.w"]
-    fn __lasx_xvssrlrni_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvssrlrni_h_w(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvssrlrni.w.d"]
-    fn __lasx_xvssrlrni_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvssrlrni_w_d(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssrlrni.d.q"]
-    fn __lasx_xvssrlrni_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvssrlrni_d_q(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvssrlrni.bu.h"]
-    fn __lasx_xvssrlrni_bu_h(a: v32u8, b: v32i8, c: u32) -> v32u8;
+    fn __lasx_xvssrlrni_bu_h(a: __v32u8, b: __v32i8, c: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvssrlrni.hu.w"]
-    fn __lasx_xvssrlrni_hu_w(a: v16u16, b: v16i16, c: u32) -> v16u16;
+    fn __lasx_xvssrlrni_hu_w(a: __v16u16, b: __v16i16, c: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvssrlrni.wu.d"]
-    fn __lasx_xvssrlrni_wu_d(a: v8u32, b: v8i32, c: u32) -> v8u32;
+    fn __lasx_xvssrlrni_wu_d(a: __v8u32, b: __v8i32, c: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvssrlrni.du.q"]
-    fn __lasx_xvssrlrni_du_q(a: v4u64, b: v4i64, c: u32) -> v4u64;
+    fn __lasx_xvssrlrni_du_q(a: __v4u64, b: __v4i64, c: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvsrani.b.h"]
-    fn __lasx_xvsrani_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvsrani_b_h(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrani.h.w"]
-    fn __lasx_xvsrani_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvsrani_h_w(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrani.w.d"]
-    fn __lasx_xvsrani_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvsrani_w_d(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrani.d.q"]
-    fn __lasx_xvsrani_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvsrani_d_q(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvsrarni.b.h"]
-    fn __lasx_xvsrarni_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvsrarni_b_h(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvsrarni.h.w"]
-    fn __lasx_xvsrarni_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvsrarni_h_w(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvsrarni.w.d"]
-    fn __lasx_xvsrarni_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvsrarni_w_d(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvsrarni.d.q"]
-    fn __lasx_xvsrarni_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvsrarni_d_q(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvssrani.b.h"]
-    fn __lasx_xvssrani_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvssrani_b_h(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvssrani.h.w"]
-    fn __lasx_xvssrani_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvssrani_h_w(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvssrani.w.d"]
-    fn __lasx_xvssrani_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvssrani_w_d(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssrani.d.q"]
-    fn __lasx_xvssrani_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvssrani_d_q(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvssrani.bu.h"]
-    fn __lasx_xvssrani_bu_h(a: v32u8, b: v32i8, c: u32) -> v32u8;
+    fn __lasx_xvssrani_bu_h(a: __v32u8, b: __v32i8, c: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvssrani.hu.w"]
-    fn __lasx_xvssrani_hu_w(a: v16u16, b: v16i16, c: u32) -> v16u16;
+    fn __lasx_xvssrani_hu_w(a: __v16u16, b: __v16i16, c: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvssrani.wu.d"]
-    fn __lasx_xvssrani_wu_d(a: v8u32, b: v8i32, c: u32) -> v8u32;
+    fn __lasx_xvssrani_wu_d(a: __v8u32, b: __v8i32, c: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvssrani.du.q"]
-    fn __lasx_xvssrani_du_q(a: v4u64, b: v4i64, c: u32) -> v4u64;
+    fn __lasx_xvssrani_du_q(a: __v4u64, b: __v4i64, c: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xvssrarni.b.h"]
-    fn __lasx_xvssrarni_b_h(a: v32i8, b: v32i8, c: u32) -> v32i8;
+    fn __lasx_xvssrarni_b_h(a: __v32i8, b: __v32i8, c: u32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvssrarni.h.w"]
-    fn __lasx_xvssrarni_h_w(a: v16i16, b: v16i16, c: u32) -> v16i16;
+    fn __lasx_xvssrarni_h_w(a: __v16i16, b: __v16i16, c: u32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvssrarni.w.d"]
-    fn __lasx_xvssrarni_w_d(a: v8i32, b: v8i32, c: u32) -> v8i32;
+    fn __lasx_xvssrarni_w_d(a: __v8i32, b: __v8i32, c: u32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvssrarni.d.q"]
-    fn __lasx_xvssrarni_d_q(a: v4i64, b: v4i64, c: u32) -> v4i64;
+    fn __lasx_xvssrarni_d_q(a: __v4i64, b: __v4i64, c: u32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvssrarni.bu.h"]
-    fn __lasx_xvssrarni_bu_h(a: v32u8, b: v32i8, c: u32) -> v32u8;
+    fn __lasx_xvssrarni_bu_h(a: __v32u8, b: __v32i8, c: u32) -> __v32u8;
     #[link_name = "llvm.loongarch.lasx.xvssrarni.hu.w"]
-    fn __lasx_xvssrarni_hu_w(a: v16u16, b: v16i16, c: u32) -> v16u16;
+    fn __lasx_xvssrarni_hu_w(a: __v16u16, b: __v16i16, c: u32) -> __v16u16;
     #[link_name = "llvm.loongarch.lasx.xvssrarni.wu.d"]
-    fn __lasx_xvssrarni_wu_d(a: v8u32, b: v8i32, c: u32) -> v8u32;
+    fn __lasx_xvssrarni_wu_d(a: __v8u32, b: __v8i32, c: u32) -> __v8u32;
     #[link_name = "llvm.loongarch.lasx.xvssrarni.du.q"]
-    fn __lasx_xvssrarni_du_q(a: v4u64, b: v4i64, c: u32) -> v4u64;
+    fn __lasx_xvssrarni_du_q(a: __v4u64, b: __v4i64, c: u32) -> __v4u64;
     #[link_name = "llvm.loongarch.lasx.xbnz.b"]
-    fn __lasx_xbnz_b(a: v32u8) -> i32;
+    fn __lasx_xbnz_b(a: __v32u8) -> i32;
     #[link_name = "llvm.loongarch.lasx.xbnz.d"]
-    fn __lasx_xbnz_d(a: v4u64) -> i32;
+    fn __lasx_xbnz_d(a: __v4u64) -> i32;
     #[link_name = "llvm.loongarch.lasx.xbnz.h"]
-    fn __lasx_xbnz_h(a: v16u16) -> i32;
+    fn __lasx_xbnz_h(a: __v16u16) -> i32;
     #[link_name = "llvm.loongarch.lasx.xbnz.v"]
-    fn __lasx_xbnz_v(a: v32u8) -> i32;
+    fn __lasx_xbnz_v(a: __v32u8) -> i32;
     #[link_name = "llvm.loongarch.lasx.xbnz.w"]
-    fn __lasx_xbnz_w(a: v8u32) -> i32;
+    fn __lasx_xbnz_w(a: __v8u32) -> i32;
     #[link_name = "llvm.loongarch.lasx.xbz.b"]
-    fn __lasx_xbz_b(a: v32u8) -> i32;
+    fn __lasx_xbz_b(a: __v32u8) -> i32;
     #[link_name = "llvm.loongarch.lasx.xbz.d"]
-    fn __lasx_xbz_d(a: v4u64) -> i32;
+    fn __lasx_xbz_d(a: __v4u64) -> i32;
     #[link_name = "llvm.loongarch.lasx.xbz.h"]
-    fn __lasx_xbz_h(a: v16u16) -> i32;
+    fn __lasx_xbz_h(a: __v16u16) -> i32;
     #[link_name = "llvm.loongarch.lasx.xbz.v"]
-    fn __lasx_xbz_v(a: v32u8) -> i32;
+    fn __lasx_xbz_v(a: __v32u8) -> i32;
     #[link_name = "llvm.loongarch.lasx.xbz.w"]
-    fn __lasx_xbz_w(a: v8u32) -> i32;
+    fn __lasx_xbz_w(a: __v8u32) -> i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.caf.d"]
-    fn __lasx_xvfcmp_caf_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_caf_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.caf.s"]
-    fn __lasx_xvfcmp_caf_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_caf_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.ceq.d"]
-    fn __lasx_xvfcmp_ceq_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_ceq_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.ceq.s"]
-    fn __lasx_xvfcmp_ceq_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_ceq_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cle.d"]
-    fn __lasx_xvfcmp_cle_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_cle_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cle.s"]
-    fn __lasx_xvfcmp_cle_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_cle_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.clt.d"]
-    fn __lasx_xvfcmp_clt_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_clt_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.clt.s"]
-    fn __lasx_xvfcmp_clt_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_clt_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cne.d"]
-    fn __lasx_xvfcmp_cne_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_cne_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cne.s"]
-    fn __lasx_xvfcmp_cne_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_cne_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cor.d"]
-    fn __lasx_xvfcmp_cor_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_cor_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cor.s"]
-    fn __lasx_xvfcmp_cor_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_cor_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cueq.d"]
-    fn __lasx_xvfcmp_cueq_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_cueq_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cueq.s"]
-    fn __lasx_xvfcmp_cueq_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_cueq_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cule.d"]
-    fn __lasx_xvfcmp_cule_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_cule_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cule.s"]
-    fn __lasx_xvfcmp_cule_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_cule_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cult.d"]
-    fn __lasx_xvfcmp_cult_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_cult_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cult.s"]
-    fn __lasx_xvfcmp_cult_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_cult_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cun.d"]
-    fn __lasx_xvfcmp_cun_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_cun_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cune.d"]
-    fn __lasx_xvfcmp_cune_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_cune_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cune.s"]
-    fn __lasx_xvfcmp_cune_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_cune_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.cun.s"]
-    fn __lasx_xvfcmp_cun_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_cun_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.saf.d"]
-    fn __lasx_xvfcmp_saf_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_saf_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.saf.s"]
-    fn __lasx_xvfcmp_saf_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_saf_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.seq.d"]
-    fn __lasx_xvfcmp_seq_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_seq_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.seq.s"]
-    fn __lasx_xvfcmp_seq_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_seq_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sle.d"]
-    fn __lasx_xvfcmp_sle_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_sle_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sle.s"]
-    fn __lasx_xvfcmp_sle_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_sle_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.slt.d"]
-    fn __lasx_xvfcmp_slt_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_slt_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.slt.s"]
-    fn __lasx_xvfcmp_slt_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_slt_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sne.d"]
-    fn __lasx_xvfcmp_sne_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_sne_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sne.s"]
-    fn __lasx_xvfcmp_sne_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_sne_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sor.d"]
-    fn __lasx_xvfcmp_sor_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_sor_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sor.s"]
-    fn __lasx_xvfcmp_sor_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_sor_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sueq.d"]
-    fn __lasx_xvfcmp_sueq_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_sueq_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sueq.s"]
-    fn __lasx_xvfcmp_sueq_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_sueq_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sule.d"]
-    fn __lasx_xvfcmp_sule_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_sule_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sule.s"]
-    fn __lasx_xvfcmp_sule_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_sule_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sult.d"]
-    fn __lasx_xvfcmp_sult_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_sult_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sult.s"]
-    fn __lasx_xvfcmp_sult_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_sult_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sun.d"]
-    fn __lasx_xvfcmp_sun_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_sun_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sune.d"]
-    fn __lasx_xvfcmp_sune_d(a: v4f64, b: v4f64) -> v4i64;
+    fn __lasx_xvfcmp_sune_d(a: __v4f64, b: __v4f64) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sune.s"]
-    fn __lasx_xvfcmp_sune_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_sune_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvfcmp.sun.s"]
-    fn __lasx_xvfcmp_sun_s(a: v8f32, b: v8f32) -> v8i32;
+    fn __lasx_xvfcmp_sun_s(a: __v8f32, b: __v8f32) -> __v8i32;
     #[link_name = "llvm.loongarch.lasx.xvpickve.d.f"]
-    fn __lasx_xvpickve_d_f(a: v4f64, b: u32) -> v4f64;
+    fn __lasx_xvpickve_d_f(a: __v4f64, b: u32) -> __v4f64;
     #[link_name = "llvm.loongarch.lasx.xvpickve.w.f"]
-    fn __lasx_xvpickve_w_f(a: v8f32, b: u32) -> v8f32;
+    fn __lasx_xvpickve_w_f(a: __v8f32, b: u32) -> __v8f32;
     #[link_name = "llvm.loongarch.lasx.xvrepli.b"]
-    fn __lasx_xvrepli_b(a: i32) -> v32i8;
+    fn __lasx_xvrepli_b(a: i32) -> __v32i8;
     #[link_name = "llvm.loongarch.lasx.xvrepli.d"]
-    fn __lasx_xvrepli_d(a: i32) -> v4i64;
+    fn __lasx_xvrepli_d(a: i32) -> __v4i64;
     #[link_name = "llvm.loongarch.lasx.xvrepli.h"]
-    fn __lasx_xvrepli_h(a: i32) -> v16i16;
+    fn __lasx_xvrepli_h(a: i32) -> __v16i16;
     #[link_name = "llvm.loongarch.lasx.xvrepli.w"]
-    fn __lasx_xvrepli_w(a: i32) -> v8i32;
+    fn __lasx_xvrepli_w(a: i32) -> __v8i32;
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsll_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvsll_b(a, b) }
+pub fn lasx_xvsll_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsll_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsll_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvsll_h(a, b) }
+pub fn lasx_xvsll_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsll_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsll_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvsll_w(a, b) }
+pub fn lasx_xvsll_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsll_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsll_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsll_d(a, b) }
+pub fn lasx_xvsll_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsll_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslli_b<const IMM3: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvslli_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvslli_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvslli_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslli_h<const IMM4: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvslli_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvslli_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvslli_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslli_w<const IMM5: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvslli_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvslli_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvslli_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslli_d<const IMM6: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvslli_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvslli_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvslli_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsra_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvsra_b(a, b) }
+pub fn lasx_xvsra_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsra_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsra_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvsra_h(a, b) }
+pub fn lasx_xvsra_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsra_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsra_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvsra_w(a, b) }
+pub fn lasx_xvsra_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsra_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsra_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsra_d(a, b) }
+pub fn lasx_xvsra_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsra_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrai_b<const IMM3: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvsrai_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvsrai_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvsrai_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrai_h<const IMM4: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvsrai_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsrai_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvsrai_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrai_w<const IMM5: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvsrai_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsrai_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvsrai_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrai_d<const IMM6: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvsrai_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsrai_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvsrai_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrar_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvsrar_b(a, b) }
+pub fn lasx_xvsrar_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrar_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrar_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvsrar_h(a, b) }
+pub fn lasx_xvsrar_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrar_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrar_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvsrar_w(a, b) }
+pub fn lasx_xvsrar_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrar_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrar_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsrar_d(a, b) }
+pub fn lasx_xvsrar_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrar_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrari_b<const IMM3: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvsrari_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvsrari_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvsrari_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrari_h<const IMM4: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvsrari_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsrari_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvsrari_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrari_w<const IMM5: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvsrari_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsrari_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvsrari_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrari_d<const IMM6: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvsrari_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsrari_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvsrari_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrl_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvsrl_b(a, b) }
+pub fn lasx_xvsrl_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrl_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrl_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvsrl_h(a, b) }
+pub fn lasx_xvsrl_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrl_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrl_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvsrl_w(a, b) }
+pub fn lasx_xvsrl_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrl_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrl_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsrl_d(a, b) }
+pub fn lasx_xvsrl_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrl_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrli_b<const IMM3: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvsrli_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvsrli_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvsrli_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrli_h<const IMM4: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvsrli_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsrli_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvsrli_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrli_w<const IMM5: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvsrli_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsrli_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvsrli_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrli_d<const IMM6: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvsrli_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsrli_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvsrli_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlr_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvsrlr_b(a, b) }
+pub fn lasx_xvsrlr_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrlr_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlr_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvsrlr_h(a, b) }
+pub fn lasx_xvsrlr_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrlr_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlr_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvsrlr_w(a, b) }
+pub fn lasx_xvsrlr_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrlr_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlr_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsrlr_d(a, b) }
+pub fn lasx_xvsrlr_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrlr_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlri_b<const IMM3: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvsrlri_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvsrlri_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvsrlri_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlri_h<const IMM4: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvsrlri_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsrlri_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvsrlri_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlri_w<const IMM5: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvsrlri_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsrlri_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvsrlri_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlri_d<const IMM6: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvsrlri_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsrlri_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvsrlri_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitclr_b(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvbitclr_b(a, b) }
+pub fn lasx_xvbitclr_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitclr_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitclr_h(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvbitclr_h(a, b) }
+pub fn lasx_xvbitclr_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitclr_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitclr_w(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvbitclr_w(a, b) }
+pub fn lasx_xvbitclr_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitclr_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitclr_d(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvbitclr_d(a, b) }
+pub fn lasx_xvbitclr_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitclr_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitclri_b<const IMM3: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvbitclri_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvbitclri_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvbitclri_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitclri_h<const IMM4: u32>(a: v16u16) -> v16u16 {
+pub fn lasx_xvbitclri_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvbitclri_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvbitclri_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitclri_w<const IMM5: u32>(a: v8u32) -> v8u32 {
+pub fn lasx_xvbitclri_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvbitclri_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvbitclri_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitclri_d<const IMM6: u32>(a: v4u64) -> v4u64 {
+pub fn lasx_xvbitclri_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvbitclri_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvbitclri_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitset_b(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvbitset_b(a, b) }
+pub fn lasx_xvbitset_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitset_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitset_h(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvbitset_h(a, b) }
+pub fn lasx_xvbitset_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitset_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitset_w(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvbitset_w(a, b) }
+pub fn lasx_xvbitset_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitset_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitset_d(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvbitset_d(a, b) }
+pub fn lasx_xvbitset_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitset_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitseti_b<const IMM3: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvbitseti_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvbitseti_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvbitseti_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitseti_h<const IMM4: u32>(a: v16u16) -> v16u16 {
+pub fn lasx_xvbitseti_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvbitseti_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvbitseti_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitseti_w<const IMM5: u32>(a: v8u32) -> v8u32 {
+pub fn lasx_xvbitseti_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvbitseti_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvbitseti_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitseti_d<const IMM6: u32>(a: v4u64) -> v4u64 {
+pub fn lasx_xvbitseti_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvbitseti_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvbitseti_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitrev_b(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvbitrev_b(a, b) }
+pub fn lasx_xvbitrev_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitrev_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitrev_h(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvbitrev_h(a, b) }
+pub fn lasx_xvbitrev_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitrev_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitrev_w(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvbitrev_w(a, b) }
+pub fn lasx_xvbitrev_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitrev_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitrev_d(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvbitrev_d(a, b) }
+pub fn lasx_xvbitrev_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitrev_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitrevi_b<const IMM3: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvbitrevi_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvbitrevi_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvbitrevi_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitrevi_h<const IMM4: u32>(a: v16u16) -> v16u16 {
+pub fn lasx_xvbitrevi_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvbitrevi_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvbitrevi_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitrevi_w<const IMM5: u32>(a: v8u32) -> v8u32 {
+pub fn lasx_xvbitrevi_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvbitrevi_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvbitrevi_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitrevi_d<const IMM6: u32>(a: v4u64) -> v4u64 {
+pub fn lasx_xvbitrevi_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvbitrevi_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvbitrevi_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvadd_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvadd_b(a, b) }
+pub fn lasx_xvadd_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvadd_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvadd_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvadd_h(a, b) }
+pub fn lasx_xvadd_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvadd_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvadd_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvadd_w(a, b) }
+pub fn lasx_xvadd_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvadd_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvadd_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvadd_d(a, b) }
+pub fn lasx_xvadd_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvadd_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddi_bu<const IMM5: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvaddi_bu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvaddi_bu(a, IMM5) }
+    unsafe { transmute(__lasx_xvaddi_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddi_hu<const IMM5: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvaddi_hu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvaddi_hu(a, IMM5) }
+    unsafe { transmute(__lasx_xvaddi_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddi_wu<const IMM5: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvaddi_wu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvaddi_wu(a, IMM5) }
+    unsafe { transmute(__lasx_xvaddi_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddi_du<const IMM5: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvaddi_du<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvaddi_du(a, IMM5) }
+    unsafe { transmute(__lasx_xvaddi_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsub_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvsub_b(a, b) }
+pub fn lasx_xvsub_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsub_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsub_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvsub_h(a, b) }
+pub fn lasx_xvsub_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsub_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsub_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvsub_w(a, b) }
+pub fn lasx_xvsub_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsub_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsub_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsub_d(a, b) }
+pub fn lasx_xvsub_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsub_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubi_bu<const IMM5: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvsubi_bu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsubi_bu(a, IMM5) }
+    unsafe { transmute(__lasx_xvsubi_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubi_hu<const IMM5: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvsubi_hu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsubi_hu(a, IMM5) }
+    unsafe { transmute(__lasx_xvsubi_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubi_wu<const IMM5: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvsubi_wu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsubi_wu(a, IMM5) }
+    unsafe { transmute(__lasx_xvsubi_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubi_du<const IMM5: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvsubi_du<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsubi_du(a, IMM5) }
+    unsafe { transmute(__lasx_xvsubi_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmax_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmax_b(a, b) }
+pub fn lasx_xvmax_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmax_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmax_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvmax_h(a, b) }
+pub fn lasx_xvmax_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmax_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmax_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvmax_w(a, b) }
+pub fn lasx_xvmax_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmax_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmax_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmax_d(a, b) }
+pub fn lasx_xvmax_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmax_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaxi_b<const IMM_S5: i32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvmaxi_b<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvmaxi_b(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvmaxi_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaxi_h<const IMM_S5: i32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvmaxi_h<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvmaxi_h(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvmaxi_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaxi_w<const IMM_S5: i32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvmaxi_w<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvmaxi_w(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvmaxi_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaxi_d<const IMM_S5: i32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvmaxi_d<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvmaxi_d(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvmaxi_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmax_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvmax_bu(a, b) }
+pub fn lasx_xvmax_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmax_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmax_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvmax_hu(a, b) }
+pub fn lasx_xvmax_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmax_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmax_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvmax_wu(a, b) }
+pub fn lasx_xvmax_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmax_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmax_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvmax_du(a, b) }
+pub fn lasx_xvmax_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmax_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaxi_bu<const IMM5: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvmaxi_bu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvmaxi_bu(a, IMM5) }
+    unsafe { transmute(__lasx_xvmaxi_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaxi_hu<const IMM5: u32>(a: v16u16) -> v16u16 {
+pub fn lasx_xvmaxi_hu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvmaxi_hu(a, IMM5) }
+    unsafe { transmute(__lasx_xvmaxi_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaxi_wu<const IMM5: u32>(a: v8u32) -> v8u32 {
+pub fn lasx_xvmaxi_wu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvmaxi_wu(a, IMM5) }
+    unsafe { transmute(__lasx_xvmaxi_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaxi_du<const IMM5: u32>(a: v4u64) -> v4u64 {
+pub fn lasx_xvmaxi_du<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvmaxi_du(a, IMM5) }
+    unsafe { transmute(__lasx_xvmaxi_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmin_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmin_b(a, b) }
+pub fn lasx_xvmin_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmin_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmin_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvmin_h(a, b) }
+pub fn lasx_xvmin_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmin_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmin_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvmin_w(a, b) }
+pub fn lasx_xvmin_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmin_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmin_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmin_d(a, b) }
+pub fn lasx_xvmin_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmin_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmini_b<const IMM_S5: i32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvmini_b<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvmini_b(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvmini_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmini_h<const IMM_S5: i32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvmini_h<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvmini_h(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvmini_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmini_w<const IMM_S5: i32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvmini_w<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvmini_w(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvmini_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmini_d<const IMM_S5: i32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvmini_d<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvmini_d(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvmini_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmin_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvmin_bu(a, b) }
+pub fn lasx_xvmin_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmin_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmin_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvmin_hu(a, b) }
+pub fn lasx_xvmin_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmin_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmin_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvmin_wu(a, b) }
+pub fn lasx_xvmin_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmin_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmin_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvmin_du(a, b) }
+pub fn lasx_xvmin_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmin_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmini_bu<const IMM5: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvmini_bu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvmini_bu(a, IMM5) }
+    unsafe { transmute(__lasx_xvmini_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmini_hu<const IMM5: u32>(a: v16u16) -> v16u16 {
+pub fn lasx_xvmini_hu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvmini_hu(a, IMM5) }
+    unsafe { transmute(__lasx_xvmini_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmini_wu<const IMM5: u32>(a: v8u32) -> v8u32 {
+pub fn lasx_xvmini_wu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvmini_wu(a, IMM5) }
+    unsafe { transmute(__lasx_xvmini_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmini_du<const IMM5: u32>(a: v4u64) -> v4u64 {
+pub fn lasx_xvmini_du<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvmini_du(a, IMM5) }
+    unsafe { transmute(__lasx_xvmini_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvseq_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvseq_b(a, b) }
+pub fn lasx_xvseq_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvseq_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvseq_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvseq_h(a, b) }
+pub fn lasx_xvseq_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvseq_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvseq_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvseq_w(a, b) }
+pub fn lasx_xvseq_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvseq_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvseq_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvseq_d(a, b) }
+pub fn lasx_xvseq_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvseq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvseqi_b<const IMM_S5: i32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvseqi_b<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvseqi_b(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvseqi_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvseqi_h<const IMM_S5: i32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvseqi_h<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvseqi_h(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvseqi_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvseqi_w<const IMM_S5: i32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvseqi_w<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvseqi_w(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvseqi_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvseqi_d<const IMM_S5: i32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvseqi_d<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvseqi_d(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvseqi_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslt_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvslt_b(a, b) }
+pub fn lasx_xvslt_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvslt_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslt_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvslt_h(a, b) }
+pub fn lasx_xvslt_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvslt_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslt_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvslt_w(a, b) }
+pub fn lasx_xvslt_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvslt_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslt_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvslt_d(a, b) }
+pub fn lasx_xvslt_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvslt_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslti_b<const IMM_S5: i32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvslti_b<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvslti_b(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvslti_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslti_h<const IMM_S5: i32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvslti_h<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvslti_h(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvslti_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslti_w<const IMM_S5: i32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvslti_w<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvslti_w(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvslti_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslti_d<const IMM_S5: i32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvslti_d<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvslti_d(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvslti_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslt_bu(a: v32u8, b: v32u8) -> v32i8 {
-    unsafe { __lasx_xvslt_bu(a, b) }
+pub fn lasx_xvslt_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvslt_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslt_hu(a: v16u16, b: v16u16) -> v16i16 {
-    unsafe { __lasx_xvslt_hu(a, b) }
+pub fn lasx_xvslt_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvslt_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslt_wu(a: v8u32, b: v8u32) -> v8i32 {
-    unsafe { __lasx_xvslt_wu(a, b) }
+pub fn lasx_xvslt_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvslt_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslt_du(a: v4u64, b: v4u64) -> v4i64 {
-    unsafe { __lasx_xvslt_du(a, b) }
+pub fn lasx_xvslt_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvslt_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslti_bu<const IMM5: u32>(a: v32u8) -> v32i8 {
+pub fn lasx_xvslti_bu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvslti_bu(a, IMM5) }
+    unsafe { transmute(__lasx_xvslti_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslti_hu<const IMM5: u32>(a: v16u16) -> v16i16 {
+pub fn lasx_xvslti_hu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvslti_hu(a, IMM5) }
+    unsafe { transmute(__lasx_xvslti_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslti_wu<const IMM5: u32>(a: v8u32) -> v8i32 {
+pub fn lasx_xvslti_wu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvslti_wu(a, IMM5) }
+    unsafe { transmute(__lasx_xvslti_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslti_du<const IMM5: u32>(a: v4u64) -> v4i64 {
+pub fn lasx_xvslti_du<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvslti_du(a, IMM5) }
+    unsafe { transmute(__lasx_xvslti_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsle_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvsle_b(a, b) }
+pub fn lasx_xvsle_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsle_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsle_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvsle_h(a, b) }
+pub fn lasx_xvsle_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsle_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsle_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvsle_w(a, b) }
+pub fn lasx_xvsle_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsle_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsle_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsle_d(a, b) }
+pub fn lasx_xvsle_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsle_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslei_b<const IMM_S5: i32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvslei_b<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvslei_b(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvslei_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslei_h<const IMM_S5: i32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvslei_h<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvslei_h(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvslei_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslei_w<const IMM_S5: i32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvslei_w<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvslei_w(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvslei_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslei_d<const IMM_S5: i32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvslei_d<const IMM_S5: i32>(a: m256i) -> m256i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lasx_xvslei_d(a, IMM_S5) }
+    unsafe { transmute(__lasx_xvslei_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsle_bu(a: v32u8, b: v32u8) -> v32i8 {
-    unsafe { __lasx_xvsle_bu(a, b) }
+pub fn lasx_xvsle_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsle_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsle_hu(a: v16u16, b: v16u16) -> v16i16 {
-    unsafe { __lasx_xvsle_hu(a, b) }
+pub fn lasx_xvsle_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsle_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsle_wu(a: v8u32, b: v8u32) -> v8i32 {
-    unsafe { __lasx_xvsle_wu(a, b) }
+pub fn lasx_xvsle_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsle_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsle_du(a: v4u64, b: v4u64) -> v4i64 {
-    unsafe { __lasx_xvsle_du(a, b) }
+pub fn lasx_xvsle_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsle_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslei_bu<const IMM5: u32>(a: v32u8) -> v32i8 {
+pub fn lasx_xvslei_bu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvslei_bu(a, IMM5) }
+    unsafe { transmute(__lasx_xvslei_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslei_hu<const IMM5: u32>(a: v16u16) -> v16i16 {
+pub fn lasx_xvslei_hu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvslei_hu(a, IMM5) }
+    unsafe { transmute(__lasx_xvslei_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslei_wu<const IMM5: u32>(a: v8u32) -> v8i32 {
+pub fn lasx_xvslei_wu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvslei_wu(a, IMM5) }
+    unsafe { transmute(__lasx_xvslei_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvslei_du<const IMM5: u32>(a: v4u64) -> v4i64 {
+pub fn lasx_xvslei_du<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvslei_du(a, IMM5) }
+    unsafe { transmute(__lasx_xvslei_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsat_b<const IMM3: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvsat_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvsat_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvsat_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsat_h<const IMM4: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvsat_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsat_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvsat_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsat_w<const IMM5: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvsat_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsat_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvsat_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsat_d<const IMM6: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvsat_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsat_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvsat_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsat_bu<const IMM3: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvsat_bu<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvsat_bu(a, IMM3) }
+    unsafe { transmute(__lasx_xvsat_bu(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsat_hu<const IMM4: u32>(a: v16u16) -> v16u16 {
+pub fn lasx_xvsat_hu<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsat_hu(a, IMM4) }
+    unsafe { transmute(__lasx_xvsat_hu(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsat_wu<const IMM5: u32>(a: v8u32) -> v8u32 {
+pub fn lasx_xvsat_wu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsat_wu(a, IMM5) }
+    unsafe { transmute(__lasx_xvsat_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsat_du<const IMM6: u32>(a: v4u64) -> v4u64 {
+pub fn lasx_xvsat_du<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsat_du(a, IMM6) }
+    unsafe { transmute(__lasx_xvsat_du(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvadda_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvadda_b(a, b) }
+pub fn lasx_xvadda_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvadda_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvadda_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvadda_h(a, b) }
+pub fn lasx_xvadda_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvadda_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvadda_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvadda_w(a, b) }
+pub fn lasx_xvadda_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvadda_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvadda_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvadda_d(a, b) }
+pub fn lasx_xvadda_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvadda_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsadd_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvsadd_b(a, b) }
+pub fn lasx_xvsadd_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsadd_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsadd_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvsadd_h(a, b) }
+pub fn lasx_xvsadd_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsadd_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsadd_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvsadd_w(a, b) }
+pub fn lasx_xvsadd_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsadd_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsadd_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsadd_d(a, b) }
+pub fn lasx_xvsadd_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsadd_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsadd_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvsadd_bu(a, b) }
+pub fn lasx_xvsadd_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsadd_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsadd_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvsadd_hu(a, b) }
+pub fn lasx_xvsadd_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsadd_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsadd_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvsadd_wu(a, b) }
+pub fn lasx_xvsadd_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsadd_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsadd_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvsadd_du(a, b) }
+pub fn lasx_xvsadd_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsadd_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavg_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvavg_b(a, b) }
+pub fn lasx_xvavg_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavg_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavg_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvavg_h(a, b) }
+pub fn lasx_xvavg_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavg_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavg_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvavg_w(a, b) }
+pub fn lasx_xvavg_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavg_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavg_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvavg_d(a, b) }
+pub fn lasx_xvavg_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavg_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavg_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvavg_bu(a, b) }
+pub fn lasx_xvavg_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavg_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavg_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvavg_hu(a, b) }
+pub fn lasx_xvavg_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavg_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavg_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvavg_wu(a, b) }
+pub fn lasx_xvavg_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavg_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavg_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvavg_du(a, b) }
+pub fn lasx_xvavg_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavg_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavgr_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvavgr_b(a, b) }
+pub fn lasx_xvavgr_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavgr_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavgr_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvavgr_h(a, b) }
+pub fn lasx_xvavgr_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavgr_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavgr_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvavgr_w(a, b) }
+pub fn lasx_xvavgr_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavgr_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavgr_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvavgr_d(a, b) }
+pub fn lasx_xvavgr_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavgr_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavgr_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvavgr_bu(a, b) }
+pub fn lasx_xvavgr_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavgr_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavgr_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvavgr_hu(a, b) }
+pub fn lasx_xvavgr_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavgr_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavgr_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvavgr_wu(a, b) }
+pub fn lasx_xvavgr_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavgr_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvavgr_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvavgr_du(a, b) }
+pub fn lasx_xvavgr_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvavgr_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssub_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvssub_b(a, b) }
+pub fn lasx_xvssub_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssub_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssub_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvssub_h(a, b) }
+pub fn lasx_xvssub_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssub_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssub_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvssub_w(a, b) }
+pub fn lasx_xvssub_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssub_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssub_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvssub_d(a, b) }
+pub fn lasx_xvssub_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssub_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssub_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvssub_bu(a, b) }
+pub fn lasx_xvssub_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssub_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssub_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvssub_hu(a, b) }
+pub fn lasx_xvssub_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssub_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssub_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvssub_wu(a, b) }
+pub fn lasx_xvssub_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssub_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssub_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvssub_du(a, b) }
+pub fn lasx_xvssub_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssub_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvabsd_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvabsd_b(a, b) }
+pub fn lasx_xvabsd_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvabsd_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvabsd_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvabsd_h(a, b) }
+pub fn lasx_xvabsd_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvabsd_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvabsd_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvabsd_w(a, b) }
+pub fn lasx_xvabsd_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvabsd_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvabsd_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvabsd_d(a, b) }
+pub fn lasx_xvabsd_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvabsd_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvabsd_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvabsd_bu(a, b) }
+pub fn lasx_xvabsd_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvabsd_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvabsd_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvabsd_hu(a, b) }
+pub fn lasx_xvabsd_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvabsd_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvabsd_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvabsd_wu(a, b) }
+pub fn lasx_xvabsd_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvabsd_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvabsd_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvabsd_du(a, b) }
+pub fn lasx_xvabsd_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvabsd_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmul_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmul_b(a, b) }
+pub fn lasx_xvmul_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmul_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmul_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvmul_h(a, b) }
+pub fn lasx_xvmul_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmul_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmul_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvmul_w(a, b) }
+pub fn lasx_xvmul_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmul_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmul_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmul_d(a, b) }
+pub fn lasx_xvmul_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmul_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmadd_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmadd_b(a, b, c) }
+pub fn lasx_xvmadd_b(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmadd_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmadd_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 {
-    unsafe { __lasx_xvmadd_h(a, b, c) }
+pub fn lasx_xvmadd_h(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmadd_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmadd_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32 {
-    unsafe { __lasx_xvmadd_w(a, b, c) }
+pub fn lasx_xvmadd_w(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmadd_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmadd_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmadd_d(a, b, c) }
+pub fn lasx_xvmadd_d(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmadd_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmsub_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmsub_b(a, b, c) }
+pub fn lasx_xvmsub_b(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmsub_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmsub_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 {
-    unsafe { __lasx_xvmsub_h(a, b, c) }
+pub fn lasx_xvmsub_h(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmsub_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmsub_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32 {
-    unsafe { __lasx_xvmsub_w(a, b, c) }
+pub fn lasx_xvmsub_w(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmsub_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmsub_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmsub_d(a, b, c) }
+pub fn lasx_xvmsub_d(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmsub_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvdiv_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvdiv_b(a, b) }
+pub fn lasx_xvdiv_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvdiv_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvdiv_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvdiv_h(a, b) }
+pub fn lasx_xvdiv_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvdiv_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvdiv_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvdiv_w(a, b) }
+pub fn lasx_xvdiv_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvdiv_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvdiv_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvdiv_d(a, b) }
+pub fn lasx_xvdiv_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvdiv_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvdiv_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvdiv_bu(a, b) }
+pub fn lasx_xvdiv_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvdiv_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvdiv_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvdiv_hu(a, b) }
+pub fn lasx_xvdiv_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvdiv_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvdiv_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvdiv_wu(a, b) }
+pub fn lasx_xvdiv_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvdiv_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvdiv_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvdiv_du(a, b) }
+pub fn lasx_xvdiv_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvdiv_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhaddw_h_b(a: v32i8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvhaddw_h_b(a, b) }
+pub fn lasx_xvhaddw_h_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhaddw_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhaddw_w_h(a: v16i16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvhaddw_w_h(a, b) }
+pub fn lasx_xvhaddw_w_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhaddw_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhaddw_d_w(a: v8i32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvhaddw_d_w(a, b) }
+pub fn lasx_xvhaddw_d_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhaddw_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhaddw_hu_bu(a: v32u8, b: v32u8) -> v16u16 {
-    unsafe { __lasx_xvhaddw_hu_bu(a, b) }
+pub fn lasx_xvhaddw_hu_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhaddw_hu_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhaddw_wu_hu(a: v16u16, b: v16u16) -> v8u32 {
-    unsafe { __lasx_xvhaddw_wu_hu(a, b) }
+pub fn lasx_xvhaddw_wu_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhaddw_wu_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhaddw_du_wu(a: v8u32, b: v8u32) -> v4u64 {
-    unsafe { __lasx_xvhaddw_du_wu(a, b) }
+pub fn lasx_xvhaddw_du_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhaddw_du_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhsubw_h_b(a: v32i8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvhsubw_h_b(a, b) }
+pub fn lasx_xvhsubw_h_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhsubw_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhsubw_w_h(a: v16i16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvhsubw_w_h(a, b) }
+pub fn lasx_xvhsubw_w_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhsubw_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhsubw_d_w(a: v8i32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvhsubw_d_w(a, b) }
+pub fn lasx_xvhsubw_d_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhsubw_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhsubw_hu_bu(a: v32u8, b: v32u8) -> v16i16 {
-    unsafe { __lasx_xvhsubw_hu_bu(a, b) }
+pub fn lasx_xvhsubw_hu_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhsubw_hu_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhsubw_wu_hu(a: v16u16, b: v16u16) -> v8i32 {
-    unsafe { __lasx_xvhsubw_wu_hu(a, b) }
+pub fn lasx_xvhsubw_wu_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhsubw_wu_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhsubw_du_wu(a: v8u32, b: v8u32) -> v4i64 {
-    unsafe { __lasx_xvhsubw_du_wu(a, b) }
+pub fn lasx_xvhsubw_du_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhsubw_du_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmod_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmod_b(a, b) }
+pub fn lasx_xvmod_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmod_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmod_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvmod_h(a, b) }
+pub fn lasx_xvmod_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmod_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmod_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvmod_w(a, b) }
+pub fn lasx_xvmod_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmod_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmod_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmod_d(a, b) }
+pub fn lasx_xvmod_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmod_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmod_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvmod_bu(a, b) }
+pub fn lasx_xvmod_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmod_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmod_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvmod_hu(a, b) }
+pub fn lasx_xvmod_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmod_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmod_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvmod_wu(a, b) }
+pub fn lasx_xvmod_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmod_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmod_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvmod_du(a, b) }
+pub fn lasx_xvmod_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmod_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrepl128vei_b<const IMM4: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvrepl128vei_b<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvrepl128vei_b(a, IMM4) }
+    unsafe { transmute(__lasx_xvrepl128vei_b(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrepl128vei_h<const IMM3: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvrepl128vei_h<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvrepl128vei_h(a, IMM3) }
+    unsafe { transmute(__lasx_xvrepl128vei_h(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrepl128vei_w<const IMM2: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvrepl128vei_w<const IMM2: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lasx_xvrepl128vei_w(a, IMM2) }
+    unsafe { transmute(__lasx_xvrepl128vei_w(transmute(a), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrepl128vei_d<const IMM1: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvrepl128vei_d<const IMM1: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM1, 1);
-    unsafe { __lasx_xvrepl128vei_d(a, IMM1) }
+    unsafe { transmute(__lasx_xvrepl128vei_d(transmute(a), IMM1)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickev_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvpickev_b(a, b) }
+pub fn lasx_xvpickev_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpickev_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickev_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvpickev_h(a, b) }
+pub fn lasx_xvpickev_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpickev_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickev_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvpickev_w(a, b) }
+pub fn lasx_xvpickev_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpickev_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickev_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvpickev_d(a, b) }
+pub fn lasx_xvpickev_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpickev_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickod_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvpickod_b(a, b) }
+pub fn lasx_xvpickod_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpickod_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickod_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvpickod_h(a, b) }
+pub fn lasx_xvpickod_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpickod_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickod_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvpickod_w(a, b) }
+pub fn lasx_xvpickod_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpickod_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickod_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvpickod_d(a, b) }
+pub fn lasx_xvpickod_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpickod_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvilvh_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvilvh_b(a, b) }
+pub fn lasx_xvilvh_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvilvh_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvilvh_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvilvh_h(a, b) }
+pub fn lasx_xvilvh_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvilvh_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvilvh_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvilvh_w(a, b) }
+pub fn lasx_xvilvh_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvilvh_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvilvh_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvilvh_d(a, b) }
+pub fn lasx_xvilvh_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvilvh_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvilvl_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvilvl_b(a, b) }
+pub fn lasx_xvilvl_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvilvl_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvilvl_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvilvl_h(a, b) }
+pub fn lasx_xvilvl_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvilvl_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvilvl_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvilvl_w(a, b) }
+pub fn lasx_xvilvl_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvilvl_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvilvl_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvilvl_d(a, b) }
+pub fn lasx_xvilvl_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvilvl_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpackev_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvpackev_b(a, b) }
+pub fn lasx_xvpackev_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpackev_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpackev_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvpackev_h(a, b) }
+pub fn lasx_xvpackev_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpackev_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpackev_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvpackev_w(a, b) }
+pub fn lasx_xvpackev_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpackev_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpackev_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvpackev_d(a, b) }
+pub fn lasx_xvpackev_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpackev_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpackod_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvpackod_b(a, b) }
+pub fn lasx_xvpackod_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpackod_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpackod_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvpackod_h(a, b) }
+pub fn lasx_xvpackod_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpackod_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpackod_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvpackod_w(a, b) }
+pub fn lasx_xvpackod_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpackod_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpackod_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvpackod_d(a, b) }
+pub fn lasx_xvpackod_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpackod_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvshuf_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 {
-    unsafe { __lasx_xvshuf_b(a, b, c) }
+pub fn lasx_xvshuf_b(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvshuf_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvshuf_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 {
-    unsafe { __lasx_xvshuf_h(a, b, c) }
+pub fn lasx_xvshuf_h(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvshuf_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvshuf_w(a: v8i32, b: v8i32, c: v8i32) -> v8i32 {
-    unsafe { __lasx_xvshuf_w(a, b, c) }
+pub fn lasx_xvshuf_w(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvshuf_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvshuf_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 {
-    unsafe { __lasx_xvshuf_d(a, b, c) }
+pub fn lasx_xvshuf_d(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvshuf_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvand_v(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvand_v(a, b) }
+pub fn lasx_xvand_v(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvand_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvandi_b<const IMM8: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvandi_b<const IMM8: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvandi_b(a, IMM8) }
+    unsafe { transmute(__lasx_xvandi_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvor_v(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvor_v(a, b) }
+pub fn lasx_xvor_v(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvor_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvori_b<const IMM8: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvori_b<const IMM8: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvori_b(a, IMM8) }
+    unsafe { transmute(__lasx_xvori_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvnor_v(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvnor_v(a, b) }
+pub fn lasx_xvnor_v(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvnor_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvnori_b<const IMM8: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvnori_b<const IMM8: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvnori_b(a, IMM8) }
+    unsafe { transmute(__lasx_xvnori_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvxor_v(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvxor_v(a, b) }
+pub fn lasx_xvxor_v(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvxor_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvxori_b<const IMM8: u32>(a: v32u8) -> v32u8 {
+pub fn lasx_xvxori_b<const IMM8: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvxori_b(a, IMM8) }
+    unsafe { transmute(__lasx_xvxori_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitsel_v(a: v32u8, b: v32u8, c: v32u8) -> v32u8 {
-    unsafe { __lasx_xvbitsel_v(a, b, c) }
+pub fn lasx_xvbitsel_v(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvbitsel_v(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbitseli_b<const IMM8: u32>(a: v32u8, b: v32u8) -> v32u8 {
+pub fn lasx_xvbitseli_b<const IMM8: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvbitseli_b(a, b, IMM8) }
+    unsafe { transmute(__lasx_xvbitseli_b(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvshuf4i_b<const IMM8: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvshuf4i_b<const IMM8: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvshuf4i_b(a, IMM8) }
+    unsafe { transmute(__lasx_xvshuf4i_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvshuf4i_h<const IMM8: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvshuf4i_h<const IMM8: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvshuf4i_h(a, IMM8) }
+    unsafe { transmute(__lasx_xvshuf4i_h(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvshuf4i_w<const IMM8: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvshuf4i_w<const IMM8: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvshuf4i_w(a, IMM8) }
+    unsafe { transmute(__lasx_xvshuf4i_w(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplgr2vr_b(a: i32) -> v32i8 {
-    unsafe { __lasx_xvreplgr2vr_b(a) }
+pub fn lasx_xvreplgr2vr_b(a: i32) -> m256i {
+    unsafe { transmute(__lasx_xvreplgr2vr_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplgr2vr_h(a: i32) -> v16i16 {
-    unsafe { __lasx_xvreplgr2vr_h(a) }
+pub fn lasx_xvreplgr2vr_h(a: i32) -> m256i {
+    unsafe { transmute(__lasx_xvreplgr2vr_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplgr2vr_w(a: i32) -> v8i32 {
-    unsafe { __lasx_xvreplgr2vr_w(a) }
+pub fn lasx_xvreplgr2vr_w(a: i32) -> m256i {
+    unsafe { transmute(__lasx_xvreplgr2vr_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplgr2vr_d(a: i64) -> v4i64 {
-    unsafe { __lasx_xvreplgr2vr_d(a) }
+pub fn lasx_xvreplgr2vr_d(a: i64) -> m256i {
+    unsafe { transmute(__lasx_xvreplgr2vr_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpcnt_b(a: v32i8) -> v32i8 {
-    unsafe { __lasx_xvpcnt_b(a) }
+pub fn lasx_xvpcnt_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpcnt_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpcnt_h(a: v16i16) -> v16i16 {
-    unsafe { __lasx_xvpcnt_h(a) }
+pub fn lasx_xvpcnt_h(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpcnt_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpcnt_w(a: v8i32) -> v8i32 {
-    unsafe { __lasx_xvpcnt_w(a) }
+pub fn lasx_xvpcnt_w(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpcnt_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpcnt_d(a: v4i64) -> v4i64 {
-    unsafe { __lasx_xvpcnt_d(a) }
+pub fn lasx_xvpcnt_d(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvpcnt_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvclo_b(a: v32i8) -> v32i8 {
-    unsafe { __lasx_xvclo_b(a) }
+pub fn lasx_xvclo_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvclo_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvclo_h(a: v16i16) -> v16i16 {
-    unsafe { __lasx_xvclo_h(a) }
+pub fn lasx_xvclo_h(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvclo_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvclo_w(a: v8i32) -> v8i32 {
-    unsafe { __lasx_xvclo_w(a) }
+pub fn lasx_xvclo_w(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvclo_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvclo_d(a: v4i64) -> v4i64 {
-    unsafe { __lasx_xvclo_d(a) }
+pub fn lasx_xvclo_d(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvclo_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvclz_b(a: v32i8) -> v32i8 {
-    unsafe { __lasx_xvclz_b(a) }
+pub fn lasx_xvclz_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvclz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvclz_h(a: v16i16) -> v16i16 {
-    unsafe { __lasx_xvclz_h(a) }
+pub fn lasx_xvclz_h(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvclz_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvclz_w(a: v8i32) -> v8i32 {
-    unsafe { __lasx_xvclz_w(a) }
+pub fn lasx_xvclz_w(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvclz_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvclz_d(a: v4i64) -> v4i64 {
-    unsafe { __lasx_xvclz_d(a) }
+pub fn lasx_xvclz_d(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvclz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfadd_s(a: v8f32, b: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfadd_s(a, b) }
+pub fn lasx_xvfadd_s(a: m256, b: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfadd_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfadd_d(a: v4f64, b: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfadd_d(a, b) }
+pub fn lasx_xvfadd_d(a: m256d, b: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfadd_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfsub_s(a: v8f32, b: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfsub_s(a, b) }
+pub fn lasx_xvfsub_s(a: m256, b: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfsub_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfsub_d(a: v4f64, b: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfsub_d(a, b) }
+pub fn lasx_xvfsub_d(a: m256d, b: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfsub_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmul_s(a: v8f32, b: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfmul_s(a, b) }
+pub fn lasx_xvfmul_s(a: m256, b: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfmul_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmul_d(a: v4f64, b: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfmul_d(a, b) }
+pub fn lasx_xvfmul_d(a: m256d, b: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfmul_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfdiv_s(a: v8f32, b: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfdiv_s(a, b) }
+pub fn lasx_xvfdiv_s(a: m256, b: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfdiv_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfdiv_d(a: v4f64, b: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfdiv_d(a, b) }
+pub fn lasx_xvfdiv_d(a: m256d, b: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfdiv_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcvt_h_s(a: v8f32, b: v8f32) -> v16i16 {
-    unsafe { __lasx_xvfcvt_h_s(a, b) }
+pub fn lasx_xvfcvt_h_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcvt_h_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcvt_s_d(a: v4f64, b: v4f64) -> v8f32 {
-    unsafe { __lasx_xvfcvt_s_d(a, b) }
+pub fn lasx_xvfcvt_s_d(a: m256d, b: m256d) -> m256 {
+    unsafe { transmute(__lasx_xvfcvt_s_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmin_s(a: v8f32, b: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfmin_s(a, b) }
+pub fn lasx_xvfmin_s(a: m256, b: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfmin_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmin_d(a: v4f64, b: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfmin_d(a, b) }
+pub fn lasx_xvfmin_d(a: m256d, b: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfmin_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmina_s(a: v8f32, b: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfmina_s(a, b) }
+pub fn lasx_xvfmina_s(a: m256, b: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfmina_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmina_d(a: v4f64, b: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfmina_d(a, b) }
+pub fn lasx_xvfmina_d(a: m256d, b: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfmina_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmax_s(a: v8f32, b: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfmax_s(a, b) }
+pub fn lasx_xvfmax_s(a: m256, b: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfmax_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmax_d(a: v4f64, b: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfmax_d(a, b) }
+pub fn lasx_xvfmax_d(a: m256d, b: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfmax_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmaxa_s(a: v8f32, b: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfmaxa_s(a, b) }
+pub fn lasx_xvfmaxa_s(a: m256, b: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfmaxa_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmaxa_d(a: v4f64, b: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfmaxa_d(a, b) }
+pub fn lasx_xvfmaxa_d(a: m256d, b: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfmaxa_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfclass_s(a: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfclass_s(a) }
+pub fn lasx_xvfclass_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfclass_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfclass_d(a: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfclass_d(a) }
+pub fn lasx_xvfclass_d(a: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfclass_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfsqrt_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfsqrt_s(a) }
+pub fn lasx_xvfsqrt_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfsqrt_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfsqrt_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfsqrt_d(a) }
+pub fn lasx_xvfsqrt_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfsqrt_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrecip_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfrecip_s(a) }
+pub fn lasx_xvfrecip_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfrecip_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrecip_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfrecip_d(a) }
+pub fn lasx_xvfrecip_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfrecip_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx,frecipe")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrecipe_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfrecipe_s(a) }
+pub fn lasx_xvfrecipe_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfrecipe_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx,frecipe")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrecipe_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfrecipe_d(a) }
+pub fn lasx_xvfrecipe_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfrecipe_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx,frecipe")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrsqrte_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfrsqrte_s(a) }
+pub fn lasx_xvfrsqrte_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfrsqrte_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx,frecipe")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrsqrte_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfrsqrte_d(a) }
+pub fn lasx_xvfrsqrte_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfrsqrte_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrint_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfrint_s(a) }
+pub fn lasx_xvfrint_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfrint_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrint_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfrint_d(a) }
+pub fn lasx_xvfrint_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfrint_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrsqrt_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfrsqrt_s(a) }
+pub fn lasx_xvfrsqrt_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfrsqrt_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrsqrt_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfrsqrt_d(a) }
+pub fn lasx_xvfrsqrt_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfrsqrt_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvflogb_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvflogb_s(a) }
+pub fn lasx_xvflogb_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvflogb_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvflogb_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvflogb_d(a) }
+pub fn lasx_xvflogb_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvflogb_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcvth_s_h(a: v16i16) -> v8f32 {
-    unsafe { __lasx_xvfcvth_s_h(a) }
+pub fn lasx_xvfcvth_s_h(a: m256i) -> m256 {
+    unsafe { transmute(__lasx_xvfcvth_s_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcvth_d_s(a: v8f32) -> v4f64 {
-    unsafe { __lasx_xvfcvth_d_s(a) }
+pub fn lasx_xvfcvth_d_s(a: m256) -> m256d {
+    unsafe { transmute(__lasx_xvfcvth_d_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcvtl_s_h(a: v16i16) -> v8f32 {
-    unsafe { __lasx_xvfcvtl_s_h(a) }
+pub fn lasx_xvfcvtl_s_h(a: m256i) -> m256 {
+    unsafe { transmute(__lasx_xvfcvtl_s_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcvtl_d_s(a: v8f32) -> v4f64 {
-    unsafe { __lasx_xvfcvtl_d_s(a) }
+pub fn lasx_xvfcvtl_d_s(a: m256) -> m256d {
+    unsafe { transmute(__lasx_xvfcvtl_d_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftint_w_s(a: v8f32) -> v8i32 {
-    unsafe { __lasx_xvftint_w_s(a) }
+pub fn lasx_xvftint_w_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftint_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftint_l_d(a: v4f64) -> v4i64 {
-    unsafe { __lasx_xvftint_l_d(a) }
+pub fn lasx_xvftint_l_d(a: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftint_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftint_wu_s(a: v8f32) -> v8u32 {
-    unsafe { __lasx_xvftint_wu_s(a) }
+pub fn lasx_xvftint_wu_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftint_wu_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftint_lu_d(a: v4f64) -> v4u64 {
-    unsafe { __lasx_xvftint_lu_d(a) }
+pub fn lasx_xvftint_lu_d(a: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftint_lu_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrz_w_s(a: v8f32) -> v8i32 {
-    unsafe { __lasx_xvftintrz_w_s(a) }
+pub fn lasx_xvftintrz_w_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrz_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrz_l_d(a: v4f64) -> v4i64 {
-    unsafe { __lasx_xvftintrz_l_d(a) }
+pub fn lasx_xvftintrz_l_d(a: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftintrz_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrz_wu_s(a: v8f32) -> v8u32 {
-    unsafe { __lasx_xvftintrz_wu_s(a) }
+pub fn lasx_xvftintrz_wu_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrz_wu_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrz_lu_d(a: v4f64) -> v4u64 {
-    unsafe { __lasx_xvftintrz_lu_d(a) }
+pub fn lasx_xvftintrz_lu_d(a: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftintrz_lu_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvffint_s_w(a: v8i32) -> v8f32 {
-    unsafe { __lasx_xvffint_s_w(a) }
+pub fn lasx_xvffint_s_w(a: m256i) -> m256 {
+    unsafe { transmute(__lasx_xvffint_s_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvffint_d_l(a: v4i64) -> v4f64 {
-    unsafe { __lasx_xvffint_d_l(a) }
+pub fn lasx_xvffint_d_l(a: m256i) -> m256d {
+    unsafe { transmute(__lasx_xvffint_d_l(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvffint_s_wu(a: v8u32) -> v8f32 {
-    unsafe { __lasx_xvffint_s_wu(a) }
+pub fn lasx_xvffint_s_wu(a: m256i) -> m256 {
+    unsafe { transmute(__lasx_xvffint_s_wu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvffint_d_lu(a: v4u64) -> v4f64 {
-    unsafe { __lasx_xvffint_d_lu(a) }
+pub fn lasx_xvffint_d_lu(a: m256i) -> m256d {
+    unsafe { transmute(__lasx_xvffint_d_lu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplve_b(a: v32i8, b: i32) -> v32i8 {
-    unsafe { __lasx_xvreplve_b(a, b) }
+pub fn lasx_xvreplve_b(a: m256i, b: i32) -> m256i {
+    unsafe { transmute(__lasx_xvreplve_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplve_h(a: v16i16, b: i32) -> v16i16 {
-    unsafe { __lasx_xvreplve_h(a, b) }
+pub fn lasx_xvreplve_h(a: m256i, b: i32) -> m256i {
+    unsafe { transmute(__lasx_xvreplve_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplve_w(a: v8i32, b: i32) -> v8i32 {
-    unsafe { __lasx_xvreplve_w(a, b) }
+pub fn lasx_xvreplve_w(a: m256i, b: i32) -> m256i {
+    unsafe { transmute(__lasx_xvreplve_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplve_d(a: v4i64, b: i32) -> v4i64 {
-    unsafe { __lasx_xvreplve_d(a, b) }
+pub fn lasx_xvreplve_d(a: m256i, b: i32) -> m256i {
+    unsafe { transmute(__lasx_xvreplve_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpermi_w<const IMM8: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvpermi_w<const IMM8: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvpermi_w(a, b, IMM8) }
+    unsafe { transmute(__lasx_xvpermi_w(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvandn_v(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvandn_v(a, b) }
+pub fn lasx_xvandn_v(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvandn_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvneg_b(a: v32i8) -> v32i8 {
-    unsafe { __lasx_xvneg_b(a) }
+pub fn lasx_xvneg_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvneg_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvneg_h(a: v16i16) -> v16i16 {
-    unsafe { __lasx_xvneg_h(a) }
+pub fn lasx_xvneg_h(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvneg_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvneg_w(a: v8i32) -> v8i32 {
-    unsafe { __lasx_xvneg_w(a) }
+pub fn lasx_xvneg_w(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvneg_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvneg_d(a: v4i64) -> v4i64 {
-    unsafe { __lasx_xvneg_d(a) }
+pub fn lasx_xvneg_d(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvneg_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmuh_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmuh_b(a, b) }
+pub fn lasx_xvmuh_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmuh_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmuh_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvmuh_h(a, b) }
+pub fn lasx_xvmuh_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmuh_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmuh_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvmuh_w(a, b) }
+pub fn lasx_xvmuh_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmuh_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmuh_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmuh_d(a, b) }
+pub fn lasx_xvmuh_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmuh_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmuh_bu(a: v32u8, b: v32u8) -> v32u8 {
-    unsafe { __lasx_xvmuh_bu(a, b) }
+pub fn lasx_xvmuh_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmuh_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmuh_hu(a: v16u16, b: v16u16) -> v16u16 {
-    unsafe { __lasx_xvmuh_hu(a, b) }
+pub fn lasx_xvmuh_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmuh_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmuh_wu(a: v8u32, b: v8u32) -> v8u32 {
-    unsafe { __lasx_xvmuh_wu(a, b) }
+pub fn lasx_xvmuh_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmuh_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmuh_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvmuh_du(a, b) }
+pub fn lasx_xvmuh_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmuh_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsllwil_h_b<const IMM3: u32>(a: v32i8) -> v16i16 {
+pub fn lasx_xvsllwil_h_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvsllwil_h_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvsllwil_h_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsllwil_w_h<const IMM4: u32>(a: v16i16) -> v8i32 {
+pub fn lasx_xvsllwil_w_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsllwil_w_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvsllwil_w_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsllwil_d_w<const IMM5: u32>(a: v8i32) -> v4i64 {
+pub fn lasx_xvsllwil_d_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsllwil_d_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvsllwil_d_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsllwil_hu_bu<const IMM3: u32>(a: v32u8) -> v16u16 {
+pub fn lasx_xvsllwil_hu_bu<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvsllwil_hu_bu(a, IMM3) }
+    unsafe { transmute(__lasx_xvsllwil_hu_bu(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsllwil_wu_hu<const IMM4: u32>(a: v16u16) -> v8u32 {
+pub fn lasx_xvsllwil_wu_hu<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsllwil_wu_hu(a, IMM4) }
+    unsafe { transmute(__lasx_xvsllwil_wu_hu(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsllwil_du_wu<const IMM5: u32>(a: v8u32) -> v4u64 {
+pub fn lasx_xvsllwil_du_wu<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsllwil_du_wu(a, IMM5) }
+    unsafe { transmute(__lasx_xvsllwil_du_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsran_b_h(a: v16i16, b: v16i16) -> v32i8 {
-    unsafe { __lasx_xvsran_b_h(a, b) }
+pub fn lasx_xvsran_b_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsran_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsran_h_w(a: v8i32, b: v8i32) -> v16i16 {
-    unsafe { __lasx_xvsran_h_w(a, b) }
+pub fn lasx_xvsran_h_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsran_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsran_w_d(a: v4i64, b: v4i64) -> v8i32 {
-    unsafe { __lasx_xvsran_w_d(a, b) }
+pub fn lasx_xvsran_w_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsran_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssran_b_h(a: v16i16, b: v16i16) -> v32i8 {
-    unsafe { __lasx_xvssran_b_h(a, b) }
+pub fn lasx_xvssran_b_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssran_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssran_h_w(a: v8i32, b: v8i32) -> v16i16 {
-    unsafe { __lasx_xvssran_h_w(a, b) }
+pub fn lasx_xvssran_h_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssran_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssran_w_d(a: v4i64, b: v4i64) -> v8i32 {
-    unsafe { __lasx_xvssran_w_d(a, b) }
+pub fn lasx_xvssran_w_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssran_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssran_bu_h(a: v16u16, b: v16u16) -> v32u8 {
-    unsafe { __lasx_xvssran_bu_h(a, b) }
+pub fn lasx_xvssran_bu_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssran_bu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssran_hu_w(a: v8u32, b: v8u32) -> v16u16 {
-    unsafe { __lasx_xvssran_hu_w(a, b) }
+pub fn lasx_xvssran_hu_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssran_hu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssran_wu_d(a: v4u64, b: v4u64) -> v8u32 {
-    unsafe { __lasx_xvssran_wu_d(a, b) }
+pub fn lasx_xvssran_wu_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssran_wu_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrarn_b_h(a: v16i16, b: v16i16) -> v32i8 {
-    unsafe { __lasx_xvsrarn_b_h(a, b) }
+pub fn lasx_xvsrarn_b_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrarn_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrarn_h_w(a: v8i32, b: v8i32) -> v16i16 {
-    unsafe { __lasx_xvsrarn_h_w(a, b) }
+pub fn lasx_xvsrarn_h_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrarn_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrarn_w_d(a: v4i64, b: v4i64) -> v8i32 {
-    unsafe { __lasx_xvsrarn_w_d(a, b) }
+pub fn lasx_xvsrarn_w_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrarn_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarn_b_h(a: v16i16, b: v16i16) -> v32i8 {
-    unsafe { __lasx_xvssrarn_b_h(a, b) }
+pub fn lasx_xvssrarn_b_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrarn_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarn_h_w(a: v8i32, b: v8i32) -> v16i16 {
-    unsafe { __lasx_xvssrarn_h_w(a, b) }
+pub fn lasx_xvssrarn_h_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrarn_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarn_w_d(a: v4i64, b: v4i64) -> v8i32 {
-    unsafe { __lasx_xvssrarn_w_d(a, b) }
+pub fn lasx_xvssrarn_w_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrarn_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarn_bu_h(a: v16u16, b: v16u16) -> v32u8 {
-    unsafe { __lasx_xvssrarn_bu_h(a, b) }
+pub fn lasx_xvssrarn_bu_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrarn_bu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarn_hu_w(a: v8u32, b: v8u32) -> v16u16 {
-    unsafe { __lasx_xvssrarn_hu_w(a, b) }
+pub fn lasx_xvssrarn_hu_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrarn_hu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarn_wu_d(a: v4u64, b: v4u64) -> v8u32 {
-    unsafe { __lasx_xvssrarn_wu_d(a, b) }
+pub fn lasx_xvssrarn_wu_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrarn_wu_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrln_b_h(a: v16i16, b: v16i16) -> v32i8 {
-    unsafe { __lasx_xvsrln_b_h(a, b) }
+pub fn lasx_xvsrln_b_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrln_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrln_h_w(a: v8i32, b: v8i32) -> v16i16 {
-    unsafe { __lasx_xvsrln_h_w(a, b) }
+pub fn lasx_xvsrln_h_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrln_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrln_w_d(a: v4i64, b: v4i64) -> v8i32 {
-    unsafe { __lasx_xvsrln_w_d(a, b) }
+pub fn lasx_xvsrln_w_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrln_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrln_bu_h(a: v16u16, b: v16u16) -> v32u8 {
-    unsafe { __lasx_xvssrln_bu_h(a, b) }
+pub fn lasx_xvssrln_bu_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrln_bu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrln_hu_w(a: v8u32, b: v8u32) -> v16u16 {
-    unsafe { __lasx_xvssrln_hu_w(a, b) }
+pub fn lasx_xvssrln_hu_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrln_hu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrln_wu_d(a: v4u64, b: v4u64) -> v8u32 {
-    unsafe { __lasx_xvssrln_wu_d(a, b) }
+pub fn lasx_xvssrln_wu_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrln_wu_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlrn_b_h(a: v16i16, b: v16i16) -> v32i8 {
-    unsafe { __lasx_xvsrlrn_b_h(a, b) }
+pub fn lasx_xvsrlrn_b_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrlrn_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlrn_h_w(a: v8i32, b: v8i32) -> v16i16 {
-    unsafe { __lasx_xvsrlrn_h_w(a, b) }
+pub fn lasx_xvsrlrn_h_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrlrn_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlrn_w_d(a: v4i64, b: v4i64) -> v8i32 {
-    unsafe { __lasx_xvsrlrn_w_d(a, b) }
+pub fn lasx_xvsrlrn_w_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsrlrn_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrn_bu_h(a: v16u16, b: v16u16) -> v32u8 {
-    unsafe { __lasx_xvssrlrn_bu_h(a, b) }
+pub fn lasx_xvssrlrn_bu_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrlrn_bu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrn_hu_w(a: v8u32, b: v8u32) -> v16u16 {
-    unsafe { __lasx_xvssrlrn_hu_w(a, b) }
+pub fn lasx_xvssrlrn_hu_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrlrn_hu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrn_wu_d(a: v4u64, b: v4u64) -> v8u32 {
-    unsafe { __lasx_xvssrlrn_wu_d(a, b) }
+pub fn lasx_xvssrlrn_wu_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrlrn_wu_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrstpi_b<const IMM5: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvfrstpi_b<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvfrstpi_b(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvfrstpi_b(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrstpi_h<const IMM5: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvfrstpi_h<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvfrstpi_h(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvfrstpi_h(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrstp_b(a: v32i8, b: v32i8, c: v32i8) -> v32i8 {
-    unsafe { __lasx_xvfrstp_b(a, b, c) }
+pub fn lasx_xvfrstp_b(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvfrstp_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrstp_h(a: v16i16, b: v16i16, c: v16i16) -> v16i16 {
-    unsafe { __lasx_xvfrstp_h(a, b, c) }
+pub fn lasx_xvfrstp_h(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvfrstp_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvshuf4i_d<const IMM8: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvshuf4i_d<const IMM8: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvshuf4i_d(a, b, IMM8) }
+    unsafe { transmute(__lasx_xvshuf4i_d(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbsrl_v<const IMM5: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvbsrl_v<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvbsrl_v(a, IMM5) }
+    unsafe { transmute(__lasx_xvbsrl_v(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvbsll_v<const IMM5: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvbsll_v<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvbsll_v(a, IMM5) }
+    unsafe { transmute(__lasx_xvbsll_v(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvextrins_b<const IMM8: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvextrins_b<const IMM8: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvextrins_b(a, b, IMM8) }
+    unsafe { transmute(__lasx_xvextrins_b(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvextrins_h<const IMM8: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvextrins_h<const IMM8: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvextrins_h(a, b, IMM8) }
+    unsafe { transmute(__lasx_xvextrins_h(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvextrins_w<const IMM8: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvextrins_w<const IMM8: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvextrins_w(a, b, IMM8) }
+    unsafe { transmute(__lasx_xvextrins_w(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvextrins_d<const IMM8: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvextrins_d<const IMM8: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvextrins_d(a, b, IMM8) }
+    unsafe { transmute(__lasx_xvextrins_d(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmskltz_b(a: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmskltz_b(a) }
+pub fn lasx_xvmskltz_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmskltz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmskltz_h(a: v16i16) -> v16i16 {
-    unsafe { __lasx_xvmskltz_h(a) }
+pub fn lasx_xvmskltz_h(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmskltz_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmskltz_w(a: v8i32) -> v8i32 {
-    unsafe { __lasx_xvmskltz_w(a) }
+pub fn lasx_xvmskltz_w(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmskltz_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmskltz_d(a: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmskltz_d(a) }
+pub fn lasx_xvmskltz_d(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmskltz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsigncov_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvsigncov_b(a, b) }
+pub fn lasx_xvsigncov_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsigncov_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsigncov_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvsigncov_h(a, b) }
+pub fn lasx_xvsigncov_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsigncov_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsigncov_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvsigncov_w(a, b) }
+pub fn lasx_xvsigncov_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsigncov_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsigncov_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsigncov_d(a, b) }
+pub fn lasx_xvsigncov_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsigncov_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmadd_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfmadd_s(a, b, c) }
+pub fn lasx_xvfmadd_s(a: m256, b: m256, c: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfmadd_s(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmadd_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfmadd_d(a, b, c) }
+pub fn lasx_xvfmadd_d(a: m256d, b: m256d, c: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfmadd_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmsub_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfmsub_s(a, b, c) }
+pub fn lasx_xvfmsub_s(a: m256, b: m256, c: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfmsub_s(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfmsub_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfmsub_d(a, b, c) }
+pub fn lasx_xvfmsub_d(a: m256d, b: m256d, c: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfmsub_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfnmadd_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfnmadd_s(a, b, c) }
+pub fn lasx_xvfnmadd_s(a: m256, b: m256, c: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfnmadd_s(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfnmadd_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfnmadd_d(a, b, c) }
+pub fn lasx_xvfnmadd_d(a: m256d, b: m256d, c: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfnmadd_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfnmsub_s(a: v8f32, b: v8f32, c: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfnmsub_s(a, b, c) }
+pub fn lasx_xvfnmsub_s(a: m256, b: m256, c: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfnmsub_s(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfnmsub_d(a: v4f64, b: v4f64, c: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfnmsub_d(a, b, c) }
+pub fn lasx_xvfnmsub_d(a: m256d, b: m256d, c: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfnmsub_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrne_w_s(a: v8f32) -> v8i32 {
-    unsafe { __lasx_xvftintrne_w_s(a) }
+pub fn lasx_xvftintrne_w_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrne_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrne_l_d(a: v4f64) -> v4i64 {
-    unsafe { __lasx_xvftintrne_l_d(a) }
+pub fn lasx_xvftintrne_l_d(a: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftintrne_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrp_w_s(a: v8f32) -> v8i32 {
-    unsafe { __lasx_xvftintrp_w_s(a) }
+pub fn lasx_xvftintrp_w_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrp_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrp_l_d(a: v4f64) -> v4i64 {
-    unsafe { __lasx_xvftintrp_l_d(a) }
+pub fn lasx_xvftintrp_l_d(a: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftintrp_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrm_w_s(a: v8f32) -> v8i32 {
-    unsafe { __lasx_xvftintrm_w_s(a) }
+pub fn lasx_xvftintrm_w_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrm_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrm_l_d(a: v4f64) -> v4i64 {
-    unsafe { __lasx_xvftintrm_l_d(a) }
+pub fn lasx_xvftintrm_l_d(a: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftintrm_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftint_w_d(a: v4f64, b: v4f64) -> v8i32 {
-    unsafe { __lasx_xvftint_w_d(a, b) }
+pub fn lasx_xvftint_w_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftint_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvffint_s_l(a: v4i64, b: v4i64) -> v8f32 {
-    unsafe { __lasx_xvffint_s_l(a, b) }
+pub fn lasx_xvffint_s_l(a: m256i, b: m256i) -> m256 {
+    unsafe { transmute(__lasx_xvffint_s_l(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrz_w_d(a: v4f64, b: v4f64) -> v8i32 {
-    unsafe { __lasx_xvftintrz_w_d(a, b) }
+pub fn lasx_xvftintrz_w_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftintrz_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrp_w_d(a: v4f64, b: v4f64) -> v8i32 {
-    unsafe { __lasx_xvftintrp_w_d(a, b) }
+pub fn lasx_xvftintrp_w_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftintrp_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrm_w_d(a: v4f64, b: v4f64) -> v8i32 {
-    unsafe { __lasx_xvftintrm_w_d(a, b) }
+pub fn lasx_xvftintrm_w_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftintrm_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrne_w_d(a: v4f64, b: v4f64) -> v8i32 {
-    unsafe { __lasx_xvftintrne_w_d(a, b) }
+pub fn lasx_xvftintrne_w_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvftintrne_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftinth_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftinth_l_s(a) }
+pub fn lasx_xvftinth_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftinth_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintl_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftintl_l_s(a) }
+pub fn lasx_xvftintl_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintl_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvffinth_d_w(a: v8i32) -> v4f64 {
-    unsafe { __lasx_xvffinth_d_w(a) }
+pub fn lasx_xvffinth_d_w(a: m256i) -> m256d {
+    unsafe { transmute(__lasx_xvffinth_d_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvffintl_d_w(a: v8i32) -> v4f64 {
-    unsafe { __lasx_xvffintl_d_w(a) }
+pub fn lasx_xvffintl_d_w(a: m256i) -> m256d {
+    unsafe { transmute(__lasx_xvffintl_d_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrzh_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftintrzh_l_s(a) }
+pub fn lasx_xvftintrzh_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrzh_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrzl_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftintrzl_l_s(a) }
+pub fn lasx_xvftintrzl_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrzl_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrph_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftintrph_l_s(a) }
+pub fn lasx_xvftintrph_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrph_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrpl_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftintrpl_l_s(a) }
+pub fn lasx_xvftintrpl_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrpl_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrmh_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftintrmh_l_s(a) }
+pub fn lasx_xvftintrmh_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrmh_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrml_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftintrml_l_s(a) }
+pub fn lasx_xvftintrml_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrml_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrneh_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftintrneh_l_s(a) }
+pub fn lasx_xvftintrneh_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrneh_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvftintrnel_l_s(a: v8f32) -> v4i64 {
-    unsafe { __lasx_xvftintrnel_l_s(a) }
+pub fn lasx_xvftintrnel_l_s(a: m256) -> m256i {
+    unsafe { transmute(__lasx_xvftintrnel_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrintrne_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfrintrne_s(a) }
+pub fn lasx_xvfrintrne_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfrintrne_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrintrne_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfrintrne_d(a) }
+pub fn lasx_xvfrintrne_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfrintrne_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrintrz_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfrintrz_s(a) }
+pub fn lasx_xvfrintrz_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfrintrz_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrintrz_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfrintrz_d(a) }
+pub fn lasx_xvfrintrz_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfrintrz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrintrp_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfrintrp_s(a) }
+pub fn lasx_xvfrintrp_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfrintrp_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrintrp_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfrintrp_d(a) }
+pub fn lasx_xvfrintrp_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfrintrp_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrintrm_s(a: v8f32) -> v8f32 {
-    unsafe { __lasx_xvfrintrm_s(a) }
+pub fn lasx_xvfrintrm_s(a: m256) -> m256 {
+    unsafe { transmute(__lasx_xvfrintrm_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfrintrm_d(a: v4f64) -> v4f64 {
-    unsafe { __lasx_xvfrintrm_d(a) }
+pub fn lasx_xvfrintrm_d(a: m256d) -> m256d {
+    unsafe { transmute(__lasx_xvfrintrm_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvld<const IMM_S12: i32>(mem_addr: *const i8) -> v32i8 {
+pub unsafe fn lasx_xvld<const IMM_S12: i32>(mem_addr: *const i8) -> m256i {
     static_assert_simm_bits!(IMM_S12, 12);
-    __lasx_xvld(mem_addr, IMM_S12)
+    transmute(__lasx_xvld(mem_addr, IMM_S12))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvst<const IMM_S12: i32>(a: v32i8, mem_addr: *mut i8) {
+pub unsafe fn lasx_xvst<const IMM_S12: i32>(a: m256i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S12, 12);
-    __lasx_xvst(a, mem_addr, IMM_S12)
+    transmute(__lasx_xvst(transmute(a), mem_addr, IMM_S12))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2, 3)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvstelm_b<const IMM_S8: i32, const IMM4: u32>(a: v32i8, mem_addr: *mut i8) {
+pub unsafe fn lasx_xvstelm_b<const IMM_S8: i32, const IMM4: u32>(a: m256i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S8, 8);
     static_assert_uimm_bits!(IMM4, 4);
-    __lasx_xvstelm_b(a, mem_addr, IMM_S8, IMM4)
+    transmute(__lasx_xvstelm_b(transmute(a), mem_addr, IMM_S8, IMM4))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2, 3)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvstelm_h<const IMM_S8: i32, const IMM3: u32>(a: v16i16, mem_addr: *mut i8) {
+pub unsafe fn lasx_xvstelm_h<const IMM_S8: i32, const IMM3: u32>(a: m256i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S8, 8);
     static_assert_uimm_bits!(IMM3, 3);
-    __lasx_xvstelm_h(a, mem_addr, IMM_S8, IMM3)
+    transmute(__lasx_xvstelm_h(transmute(a), mem_addr, IMM_S8, IMM3))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2, 3)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvstelm_w<const IMM_S8: i32, const IMM2: u32>(a: v8i32, mem_addr: *mut i8) {
+pub unsafe fn lasx_xvstelm_w<const IMM_S8: i32, const IMM2: u32>(a: m256i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S8, 8);
     static_assert_uimm_bits!(IMM2, 2);
-    __lasx_xvstelm_w(a, mem_addr, IMM_S8, IMM2)
+    transmute(__lasx_xvstelm_w(transmute(a), mem_addr, IMM_S8, IMM2))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2, 3)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvstelm_d<const IMM_S8: i32, const IMM1: u32>(a: v4i64, mem_addr: *mut i8) {
+pub unsafe fn lasx_xvstelm_d<const IMM_S8: i32, const IMM1: u32>(a: m256i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S8, 8);
     static_assert_uimm_bits!(IMM1, 1);
-    __lasx_xvstelm_d(a, mem_addr, IMM_S8, IMM1)
+    transmute(__lasx_xvstelm_d(transmute(a), mem_addr, IMM_S8, IMM1))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvinsve0_w<const IMM3: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvinsve0_w<const IMM3: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvinsve0_w(a, b, IMM3) }
+    unsafe { transmute(__lasx_xvinsve0_w(transmute(a), transmute(b), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvinsve0_d<const IMM2: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvinsve0_d<const IMM2: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lasx_xvinsve0_d(a, b, IMM2) }
+    unsafe { transmute(__lasx_xvinsve0_d(transmute(a), transmute(b), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickve_w<const IMM3: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvpickve_w<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvpickve_w(a, IMM3) }
+    unsafe { transmute(__lasx_xvpickve_w(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickve_d<const IMM2: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvpickve_d<const IMM2: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lasx_xvpickve_d(a, IMM2) }
+    unsafe { transmute(__lasx_xvpickve_d(transmute(a), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrn_b_h(a: v16i16, b: v16i16) -> v32i8 {
-    unsafe { __lasx_xvssrlrn_b_h(a, b) }
+pub fn lasx_xvssrlrn_b_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrlrn_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrn_h_w(a: v8i32, b: v8i32) -> v16i16 {
-    unsafe { __lasx_xvssrlrn_h_w(a, b) }
+pub fn lasx_xvssrlrn_h_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrlrn_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrn_w_d(a: v4i64, b: v4i64) -> v8i32 {
-    unsafe { __lasx_xvssrlrn_w_d(a, b) }
+pub fn lasx_xvssrlrn_w_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrlrn_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrln_b_h(a: v16i16, b: v16i16) -> v32i8 {
-    unsafe { __lasx_xvssrln_b_h(a, b) }
+pub fn lasx_xvssrln_b_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrln_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrln_h_w(a: v8i32, b: v8i32) -> v16i16 {
-    unsafe { __lasx_xvssrln_h_w(a, b) }
+pub fn lasx_xvssrln_h_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrln_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrln_w_d(a: v4i64, b: v4i64) -> v8i32 {
-    unsafe { __lasx_xvssrln_w_d(a, b) }
+pub fn lasx_xvssrln_w_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvssrln_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvorn_v(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvorn_v(a, b) }
+pub fn lasx_xvorn_v(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvorn_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvldi<const IMM_S13: i32>() -> v4i64 {
+pub fn lasx_xvldi<const IMM_S13: i32>() -> m256i {
     static_assert_simm_bits!(IMM_S13, 13);
-    unsafe { __lasx_xvldi(IMM_S13) }
+    unsafe { transmute(__lasx_xvldi(IMM_S13)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvldx(mem_addr: *const i8, b: i64) -> v32i8 {
-    __lasx_xvldx(mem_addr, b)
+pub unsafe fn lasx_xvldx(mem_addr: *const i8, b: i64) -> m256i {
+    transmute(__lasx_xvldx(mem_addr, transmute(b)))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvstx(a: v32i8, mem_addr: *mut i8, b: i64) {
-    __lasx_xvstx(a, mem_addr, b)
+pub unsafe fn lasx_xvstx(a: m256i, mem_addr: *mut i8, b: i64) {
+    transmute(__lasx_xvstx(transmute(a), mem_addr, transmute(b)))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvextl_qu_du(a: v4u64) -> v4u64 {
-    unsafe { __lasx_xvextl_qu_du(a) }
+pub fn lasx_xvextl_qu_du(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvextl_qu_du(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvinsgr2vr_w<const IMM3: u32>(a: v8i32, b: i32) -> v8i32 {
+pub fn lasx_xvinsgr2vr_w<const IMM3: u32>(a: m256i, b: i32) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvinsgr2vr_w(a, b, IMM3) }
+    unsafe { transmute(__lasx_xvinsgr2vr_w(transmute(a), transmute(b), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvinsgr2vr_d<const IMM2: u32>(a: v4i64, b: i64) -> v4i64 {
+pub fn lasx_xvinsgr2vr_d<const IMM2: u32>(a: m256i, b: i64) -> m256i {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lasx_xvinsgr2vr_d(a, b, IMM2) }
+    unsafe { transmute(__lasx_xvinsgr2vr_d(transmute(a), transmute(b), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplve0_b(a: v32i8) -> v32i8 {
-    unsafe { __lasx_xvreplve0_b(a) }
+pub fn lasx_xvreplve0_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvreplve0_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplve0_h(a: v16i16) -> v16i16 {
-    unsafe { __lasx_xvreplve0_h(a) }
+pub fn lasx_xvreplve0_h(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvreplve0_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplve0_w(a: v8i32) -> v8i32 {
-    unsafe { __lasx_xvreplve0_w(a) }
+pub fn lasx_xvreplve0_w(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvreplve0_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplve0_d(a: v4i64) -> v4i64 {
-    unsafe { __lasx_xvreplve0_d(a) }
+pub fn lasx_xvreplve0_d(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvreplve0_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvreplve0_q(a: v32i8) -> v32i8 {
-    unsafe { __lasx_xvreplve0_q(a) }
+pub fn lasx_xvreplve0_q(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvreplve0_q(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_h_b(a: v32i8) -> v16i16 {
-    unsafe { __lasx_vext2xv_h_b(a) }
+pub fn lasx_vext2xv_h_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_h_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_w_h(a: v16i16) -> v8i32 {
-    unsafe { __lasx_vext2xv_w_h(a) }
+pub fn lasx_vext2xv_w_h(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_w_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_d_w(a: v8i32) -> v4i64 {
-    unsafe { __lasx_vext2xv_d_w(a) }
+pub fn lasx_vext2xv_d_w(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_d_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_w_b(a: v32i8) -> v8i32 {
-    unsafe { __lasx_vext2xv_w_b(a) }
+pub fn lasx_vext2xv_w_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_w_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_d_h(a: v16i16) -> v4i64 {
-    unsafe { __lasx_vext2xv_d_h(a) }
+pub fn lasx_vext2xv_d_h(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_d_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_d_b(a: v32i8) -> v4i64 {
-    unsafe { __lasx_vext2xv_d_b(a) }
+pub fn lasx_vext2xv_d_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_d_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_hu_bu(a: v32i8) -> v16i16 {
-    unsafe { __lasx_vext2xv_hu_bu(a) }
+pub fn lasx_vext2xv_hu_bu(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_hu_bu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_wu_hu(a: v16i16) -> v8i32 {
-    unsafe { __lasx_vext2xv_wu_hu(a) }
+pub fn lasx_vext2xv_wu_hu(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_wu_hu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_du_wu(a: v8i32) -> v4i64 {
-    unsafe { __lasx_vext2xv_du_wu(a) }
+pub fn lasx_vext2xv_du_wu(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_du_wu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_wu_bu(a: v32i8) -> v8i32 {
-    unsafe { __lasx_vext2xv_wu_bu(a) }
+pub fn lasx_vext2xv_wu_bu(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_wu_bu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_du_hu(a: v16i16) -> v4i64 {
-    unsafe { __lasx_vext2xv_du_hu(a) }
+pub fn lasx_vext2xv_du_hu(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_du_hu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_vext2xv_du_bu(a: v32i8) -> v4i64 {
-    unsafe { __lasx_vext2xv_du_bu(a) }
+pub fn lasx_vext2xv_du_bu(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_vext2xv_du_bu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpermi_q<const IMM8: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvpermi_q<const IMM8: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvpermi_q(a, b, IMM8) }
+    unsafe { transmute(__lasx_xvpermi_q(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpermi_d<const IMM8: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvpermi_d<const IMM8: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lasx_xvpermi_d(a, IMM8) }
+    unsafe { transmute(__lasx_xvpermi_d(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvperm_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvperm_w(a, b) }
+pub fn lasx_xvperm_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvperm_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvldrepl_b<const IMM_S12: i32>(mem_addr: *const i8) -> v32i8 {
+pub unsafe fn lasx_xvldrepl_b<const IMM_S12: i32>(mem_addr: *const i8) -> m256i {
     static_assert_simm_bits!(IMM_S12, 12);
-    __lasx_xvldrepl_b(mem_addr, IMM_S12)
+    transmute(__lasx_xvldrepl_b(mem_addr, IMM_S12))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvldrepl_h<const IMM_S11: i32>(mem_addr: *const i8) -> v16i16 {
+pub unsafe fn lasx_xvldrepl_h<const IMM_S11: i32>(mem_addr: *const i8) -> m256i {
     static_assert_simm_bits!(IMM_S11, 11);
-    __lasx_xvldrepl_h(mem_addr, IMM_S11)
+    transmute(__lasx_xvldrepl_h(mem_addr, IMM_S11))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvldrepl_w<const IMM_S10: i32>(mem_addr: *const i8) -> v8i32 {
+pub unsafe fn lasx_xvldrepl_w<const IMM_S10: i32>(mem_addr: *const i8) -> m256i {
     static_assert_simm_bits!(IMM_S10, 10);
-    __lasx_xvldrepl_w(mem_addr, IMM_S10)
+    transmute(__lasx_xvldrepl_w(mem_addr, IMM_S10))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lasx_xvldrepl_d<const IMM_S9: i32>(mem_addr: *const i8) -> v4i64 {
+pub unsafe fn lasx_xvldrepl_d<const IMM_S9: i32>(mem_addr: *const i8) -> m256i {
     static_assert_simm_bits!(IMM_S9, 9);
-    __lasx_xvldrepl_d(mem_addr, IMM_S9)
+    transmute(__lasx_xvldrepl_d(mem_addr, IMM_S9))
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickve2gr_w<const IMM3: u32>(a: v8i32) -> i32 {
+pub fn lasx_xvpickve2gr_w<const IMM3: u32>(a: m256i) -> i32 {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvpickve2gr_w(a, IMM3) }
+    unsafe { transmute(__lasx_xvpickve2gr_w(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickve2gr_wu<const IMM3: u32>(a: v8i32) -> u32 {
+pub fn lasx_xvpickve2gr_wu<const IMM3: u32>(a: m256i) -> u32 {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvpickve2gr_wu(a, IMM3) }
+    unsafe { transmute(__lasx_xvpickve2gr_wu(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickve2gr_d<const IMM2: u32>(a: v4i64) -> i64 {
+pub fn lasx_xvpickve2gr_d<const IMM2: u32>(a: m256i) -> i64 {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lasx_xvpickve2gr_d(a, IMM2) }
+    unsafe { transmute(__lasx_xvpickve2gr_d(transmute(a), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickve2gr_du<const IMM2: u32>(a: v4i64) -> u64 {
+pub fn lasx_xvpickve2gr_du<const IMM2: u32>(a: m256i) -> u64 {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lasx_xvpickve2gr_du(a, IMM2) }
+    unsafe { transmute(__lasx_xvpickve2gr_du(transmute(a), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_q_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvaddwev_q_d(a, b) }
+pub fn lasx_xvaddwev_q_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_d_w(a: v8i32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvaddwev_d_w(a, b) }
+pub fn lasx_xvaddwev_d_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_w_h(a: v16i16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvaddwev_w_h(a, b) }
+pub fn lasx_xvaddwev_w_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_h_b(a: v32i8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvaddwev_h_b(a, b) }
+pub fn lasx_xvaddwev_h_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_q_du(a: v4u64, b: v4u64) -> v4i64 {
-    unsafe { __lasx_xvaddwev_q_du(a, b) }
+pub fn lasx_xvaddwev_q_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_d_wu(a: v8u32, b: v8u32) -> v4i64 {
-    unsafe { __lasx_xvaddwev_d_wu(a, b) }
+pub fn lasx_xvaddwev_d_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_w_hu(a: v16u16, b: v16u16) -> v8i32 {
-    unsafe { __lasx_xvaddwev_w_hu(a, b) }
+pub fn lasx_xvaddwev_w_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_h_bu(a: v32u8, b: v32u8) -> v16i16 {
-    unsafe { __lasx_xvaddwev_h_bu(a, b) }
+pub fn lasx_xvaddwev_h_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwev_q_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsubwev_q_d(a, b) }
+pub fn lasx_xvsubwev_q_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwev_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwev_d_w(a: v8i32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvsubwev_d_w(a, b) }
+pub fn lasx_xvsubwev_d_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwev_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwev_w_h(a: v16i16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvsubwev_w_h(a, b) }
+pub fn lasx_xvsubwev_w_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwev_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwev_h_b(a: v32i8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvsubwev_h_b(a, b) }
+pub fn lasx_xvsubwev_h_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwev_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwev_q_du(a: v4u64, b: v4u64) -> v4i64 {
-    unsafe { __lasx_xvsubwev_q_du(a, b) }
+pub fn lasx_xvsubwev_q_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwev_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwev_d_wu(a: v8u32, b: v8u32) -> v4i64 {
-    unsafe { __lasx_xvsubwev_d_wu(a, b) }
+pub fn lasx_xvsubwev_d_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwev_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwev_w_hu(a: v16u16, b: v16u16) -> v8i32 {
-    unsafe { __lasx_xvsubwev_w_hu(a, b) }
+pub fn lasx_xvsubwev_w_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwev_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwev_h_bu(a: v32u8, b: v32u8) -> v16i16 {
-    unsafe { __lasx_xvsubwev_h_bu(a, b) }
+pub fn lasx_xvsubwev_h_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwev_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_q_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmulwev_q_d(a, b) }
+pub fn lasx_xvmulwev_q_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_d_w(a: v8i32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvmulwev_d_w(a, b) }
+pub fn lasx_xvmulwev_d_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_w_h(a: v16i16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvmulwev_w_h(a, b) }
+pub fn lasx_xvmulwev_w_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_h_b(a: v32i8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvmulwev_h_b(a, b) }
+pub fn lasx_xvmulwev_h_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_q_du(a: v4u64, b: v4u64) -> v4i64 {
-    unsafe { __lasx_xvmulwev_q_du(a, b) }
+pub fn lasx_xvmulwev_q_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_d_wu(a: v8u32, b: v8u32) -> v4i64 {
-    unsafe { __lasx_xvmulwev_d_wu(a, b) }
+pub fn lasx_xvmulwev_d_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_w_hu(a: v16u16, b: v16u16) -> v8i32 {
-    unsafe { __lasx_xvmulwev_w_hu(a, b) }
+pub fn lasx_xvmulwev_w_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_h_bu(a: v32u8, b: v32u8) -> v16i16 {
-    unsafe { __lasx_xvmulwev_h_bu(a, b) }
+pub fn lasx_xvmulwev_h_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_q_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvaddwod_q_d(a, b) }
+pub fn lasx_xvaddwod_q_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_d_w(a: v8i32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvaddwod_d_w(a, b) }
+pub fn lasx_xvaddwod_d_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_w_h(a: v16i16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvaddwod_w_h(a, b) }
+pub fn lasx_xvaddwod_w_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_h_b(a: v32i8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvaddwod_h_b(a, b) }
+pub fn lasx_xvaddwod_h_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_q_du(a: v4u64, b: v4u64) -> v4i64 {
-    unsafe { __lasx_xvaddwod_q_du(a, b) }
+pub fn lasx_xvaddwod_q_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_d_wu(a: v8u32, b: v8u32) -> v4i64 {
-    unsafe { __lasx_xvaddwod_d_wu(a, b) }
+pub fn lasx_xvaddwod_d_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_w_hu(a: v16u16, b: v16u16) -> v8i32 {
-    unsafe { __lasx_xvaddwod_w_hu(a, b) }
+pub fn lasx_xvaddwod_w_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_h_bu(a: v32u8, b: v32u8) -> v16i16 {
-    unsafe { __lasx_xvaddwod_h_bu(a, b) }
+pub fn lasx_xvaddwod_h_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwod_q_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsubwod_q_d(a, b) }
+pub fn lasx_xvsubwod_q_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwod_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwod_d_w(a: v8i32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvsubwod_d_w(a, b) }
+pub fn lasx_xvsubwod_d_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwod_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwod_w_h(a: v16i16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvsubwod_w_h(a, b) }
+pub fn lasx_xvsubwod_w_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwod_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwod_h_b(a: v32i8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvsubwod_h_b(a, b) }
+pub fn lasx_xvsubwod_h_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwod_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwod_q_du(a: v4u64, b: v4u64) -> v4i64 {
-    unsafe { __lasx_xvsubwod_q_du(a, b) }
+pub fn lasx_xvsubwod_q_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwod_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwod_d_wu(a: v8u32, b: v8u32) -> v4i64 {
-    unsafe { __lasx_xvsubwod_d_wu(a, b) }
+pub fn lasx_xvsubwod_d_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwod_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwod_w_hu(a: v16u16, b: v16u16) -> v8i32 {
-    unsafe { __lasx_xvsubwod_w_hu(a, b) }
+pub fn lasx_xvsubwod_w_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwod_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsubwod_h_bu(a: v32u8, b: v32u8) -> v16i16 {
-    unsafe { __lasx_xvsubwod_h_bu(a, b) }
+pub fn lasx_xvsubwod_h_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsubwod_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_q_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmulwod_q_d(a, b) }
+pub fn lasx_xvmulwod_q_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_d_w(a: v8i32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvmulwod_d_w(a, b) }
+pub fn lasx_xvmulwod_d_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_w_h(a: v16i16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvmulwod_w_h(a, b) }
+pub fn lasx_xvmulwod_w_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_h_b(a: v32i8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvmulwod_h_b(a, b) }
+pub fn lasx_xvmulwod_h_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_q_du(a: v4u64, b: v4u64) -> v4i64 {
-    unsafe { __lasx_xvmulwod_q_du(a, b) }
+pub fn lasx_xvmulwod_q_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_d_wu(a: v8u32, b: v8u32) -> v4i64 {
-    unsafe { __lasx_xvmulwod_d_wu(a, b) }
+pub fn lasx_xvmulwod_d_wu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_w_hu(a: v16u16, b: v16u16) -> v8i32 {
-    unsafe { __lasx_xvmulwod_w_hu(a, b) }
+pub fn lasx_xvmulwod_w_hu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_h_bu(a: v32u8, b: v32u8) -> v16i16 {
-    unsafe { __lasx_xvmulwod_h_bu(a, b) }
+pub fn lasx_xvmulwod_h_bu(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_d_wu_w(a: v8u32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvaddwev_d_wu_w(a, b) }
+pub fn lasx_xvaddwev_d_wu_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_d_wu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_w_hu_h(a: v16u16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvaddwev_w_hu_h(a, b) }
+pub fn lasx_xvaddwev_w_hu_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_w_hu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_h_bu_b(a: v32u8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvaddwev_h_bu_b(a, b) }
+pub fn lasx_xvaddwev_h_bu_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_h_bu_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_d_wu_w(a: v8u32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvmulwev_d_wu_w(a, b) }
+pub fn lasx_xvmulwev_d_wu_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_d_wu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_w_hu_h(a: v16u16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvmulwev_w_hu_h(a, b) }
+pub fn lasx_xvmulwev_w_hu_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_w_hu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_h_bu_b(a: v32u8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvmulwev_h_bu_b(a, b) }
+pub fn lasx_xvmulwev_h_bu_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_h_bu_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_d_wu_w(a: v8u32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvaddwod_d_wu_w(a, b) }
+pub fn lasx_xvaddwod_d_wu_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_d_wu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_w_hu_h(a: v16u16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvaddwod_w_hu_h(a, b) }
+pub fn lasx_xvaddwod_w_hu_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_w_hu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_h_bu_b(a: v32u8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvaddwod_h_bu_b(a, b) }
+pub fn lasx_xvaddwod_h_bu_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_h_bu_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_d_wu_w(a: v8u32, b: v8i32) -> v4i64 {
-    unsafe { __lasx_xvmulwod_d_wu_w(a, b) }
+pub fn lasx_xvmulwod_d_wu_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_d_wu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_w_hu_h(a: v16u16, b: v16i16) -> v8i32 {
-    unsafe { __lasx_xvmulwod_w_hu_h(a, b) }
+pub fn lasx_xvmulwod_w_hu_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_w_hu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_h_bu_b(a: v32u8, b: v32i8) -> v16i16 {
-    unsafe { __lasx_xvmulwod_h_bu_b(a, b) }
+pub fn lasx_xvmulwod_h_bu_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_h_bu_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhaddw_q_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvhaddw_q_d(a, b) }
+pub fn lasx_xvhaddw_q_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhaddw_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhaddw_qu_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvhaddw_qu_du(a, b) }
+pub fn lasx_xvhaddw_qu_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhaddw_qu_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhsubw_q_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvhsubw_q_d(a, b) }
+pub fn lasx_xvhsubw_q_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhsubw_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvhsubw_qu_du(a: v4u64, b: v4u64) -> v4u64 {
-    unsafe { __lasx_xvhsubw_qu_du(a, b) }
+pub fn lasx_xvhsubw_qu_du(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvhsubw_qu_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_q_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmaddwev_q_d(a, b, c) }
+pub fn lasx_xvmaddwev_q_d(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_q_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_d_w(a: v4i64, b: v8i32, c: v8i32) -> v4i64 {
-    unsafe { __lasx_xvmaddwev_d_w(a, b, c) }
+pub fn lasx_xvmaddwev_d_w(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_d_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_w_h(a: v8i32, b: v16i16, c: v16i16) -> v8i32 {
-    unsafe { __lasx_xvmaddwev_w_h(a, b, c) }
+pub fn lasx_xvmaddwev_w_h(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_w_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_h_b(a: v16i16, b: v32i8, c: v32i8) -> v16i16 {
-    unsafe { __lasx_xvmaddwev_h_b(a, b, c) }
+pub fn lasx_xvmaddwev_h_b(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_h_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_q_du(a: v4u64, b: v4u64, c: v4u64) -> v4u64 {
-    unsafe { __lasx_xvmaddwev_q_du(a, b, c) }
+pub fn lasx_xvmaddwev_q_du(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_q_du(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_d_wu(a: v4u64, b: v8u32, c: v8u32) -> v4u64 {
-    unsafe { __lasx_xvmaddwev_d_wu(a, b, c) }
+pub fn lasx_xvmaddwev_d_wu(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_d_wu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_w_hu(a: v8u32, b: v16u16, c: v16u16) -> v8u32 {
-    unsafe { __lasx_xvmaddwev_w_hu(a, b, c) }
+pub fn lasx_xvmaddwev_w_hu(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_w_hu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_h_bu(a: v16u16, b: v32u8, c: v32u8) -> v16u16 {
-    unsafe { __lasx_xvmaddwev_h_bu(a, b, c) }
+pub fn lasx_xvmaddwev_h_bu(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_h_bu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_q_d(a: v4i64, b: v4i64, c: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmaddwod_q_d(a, b, c) }
+pub fn lasx_xvmaddwod_q_d(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_q_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_d_w(a: v4i64, b: v8i32, c: v8i32) -> v4i64 {
-    unsafe { __lasx_xvmaddwod_d_w(a, b, c) }
+pub fn lasx_xvmaddwod_d_w(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_d_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_w_h(a: v8i32, b: v16i16, c: v16i16) -> v8i32 {
-    unsafe { __lasx_xvmaddwod_w_h(a, b, c) }
+pub fn lasx_xvmaddwod_w_h(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_w_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_h_b(a: v16i16, b: v32i8, c: v32i8) -> v16i16 {
-    unsafe { __lasx_xvmaddwod_h_b(a, b, c) }
+pub fn lasx_xvmaddwod_h_b(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_h_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_q_du(a: v4u64, b: v4u64, c: v4u64) -> v4u64 {
-    unsafe { __lasx_xvmaddwod_q_du(a, b, c) }
+pub fn lasx_xvmaddwod_q_du(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_q_du(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_d_wu(a: v4u64, b: v8u32, c: v8u32) -> v4u64 {
-    unsafe { __lasx_xvmaddwod_d_wu(a, b, c) }
+pub fn lasx_xvmaddwod_d_wu(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_d_wu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_w_hu(a: v8u32, b: v16u16, c: v16u16) -> v8u32 {
-    unsafe { __lasx_xvmaddwod_w_hu(a, b, c) }
+pub fn lasx_xvmaddwod_w_hu(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_w_hu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_h_bu(a: v16u16, b: v32u8, c: v32u8) -> v16u16 {
-    unsafe { __lasx_xvmaddwod_h_bu(a, b, c) }
+pub fn lasx_xvmaddwod_h_bu(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_h_bu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_q_du_d(a: v4i64, b: v4u64, c: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmaddwev_q_du_d(a, b, c) }
+pub fn lasx_xvmaddwev_q_du_d(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_q_du_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_d_wu_w(a: v4i64, b: v8u32, c: v8i32) -> v4i64 {
-    unsafe { __lasx_xvmaddwev_d_wu_w(a, b, c) }
+pub fn lasx_xvmaddwev_d_wu_w(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_d_wu_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_w_hu_h(a: v8i32, b: v16u16, c: v16i16) -> v8i32 {
-    unsafe { __lasx_xvmaddwev_w_hu_h(a, b, c) }
+pub fn lasx_xvmaddwev_w_hu_h(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_w_hu_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwev_h_bu_b(a: v16i16, b: v32u8, c: v32i8) -> v16i16 {
-    unsafe { __lasx_xvmaddwev_h_bu_b(a, b, c) }
+pub fn lasx_xvmaddwev_h_bu_b(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwev_h_bu_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_q_du_d(a: v4i64, b: v4u64, c: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmaddwod_q_du_d(a, b, c) }
+pub fn lasx_xvmaddwod_q_du_d(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_q_du_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_d_wu_w(a: v4i64, b: v8u32, c: v8i32) -> v4i64 {
-    unsafe { __lasx_xvmaddwod_d_wu_w(a, b, c) }
+pub fn lasx_xvmaddwod_d_wu_w(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_d_wu_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_w_hu_h(a: v8i32, b: v16u16, c: v16i16) -> v8i32 {
-    unsafe { __lasx_xvmaddwod_w_hu_h(a, b, c) }
+pub fn lasx_xvmaddwod_w_hu_h(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_w_hu_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmaddwod_h_bu_b(a: v16i16, b: v32u8, c: v32i8) -> v16i16 {
-    unsafe { __lasx_xvmaddwod_h_bu_b(a, b, c) }
+pub fn lasx_xvmaddwod_h_bu_b(a: m256i, b: m256i, c: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmaddwod_h_bu_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrotr_b(a: v32i8, b: v32i8) -> v32i8 {
-    unsafe { __lasx_xvrotr_b(a, b) }
+pub fn lasx_xvrotr_b(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvrotr_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrotr_h(a: v16i16, b: v16i16) -> v16i16 {
-    unsafe { __lasx_xvrotr_h(a, b) }
+pub fn lasx_xvrotr_h(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvrotr_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrotr_w(a: v8i32, b: v8i32) -> v8i32 {
-    unsafe { __lasx_xvrotr_w(a, b) }
+pub fn lasx_xvrotr_w(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvrotr_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrotr_d(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvrotr_d(a, b) }
+pub fn lasx_xvrotr_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvrotr_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvadd_q(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvadd_q(a, b) }
+pub fn lasx_xvadd_q(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvadd_q(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsub_q(a: v4i64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvsub_q(a, b) }
+pub fn lasx_xvsub_q(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvsub_q(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwev_q_du_d(a: v4u64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvaddwev_q_du_d(a, b) }
+pub fn lasx_xvaddwev_q_du_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwev_q_du_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvaddwod_q_du_d(a: v4u64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvaddwod_q_du_d(a, b) }
+pub fn lasx_xvaddwod_q_du_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvaddwod_q_du_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwev_q_du_d(a: v4u64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmulwev_q_du_d(a, b) }
+pub fn lasx_xvmulwev_q_du_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwev_q_du_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmulwod_q_du_d(a: v4u64, b: v4i64) -> v4i64 {
-    unsafe { __lasx_xvmulwod_q_du_d(a, b) }
+pub fn lasx_xvmulwod_q_du_d(a: m256i, b: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmulwod_q_du_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmskgez_b(a: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmskgez_b(a) }
+pub fn lasx_xvmskgez_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmskgez_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvmsknz_b(a: v32i8) -> v32i8 {
-    unsafe { __lasx_xvmsknz_b(a) }
+pub fn lasx_xvmsknz_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvmsknz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvexth_h_b(a: v32i8) -> v16i16 {
-    unsafe { __lasx_xvexth_h_b(a) }
+pub fn lasx_xvexth_h_b(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvexth_h_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvexth_w_h(a: v16i16) -> v8i32 {
-    unsafe { __lasx_xvexth_w_h(a) }
+pub fn lasx_xvexth_w_h(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvexth_w_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvexth_d_w(a: v8i32) -> v4i64 {
-    unsafe { __lasx_xvexth_d_w(a) }
+pub fn lasx_xvexth_d_w(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvexth_d_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvexth_q_d(a: v4i64) -> v4i64 {
-    unsafe { __lasx_xvexth_q_d(a) }
+pub fn lasx_xvexth_q_d(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvexth_q_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvexth_hu_bu(a: v32u8) -> v16u16 {
-    unsafe { __lasx_xvexth_hu_bu(a) }
+pub fn lasx_xvexth_hu_bu(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvexth_hu_bu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvexth_wu_hu(a: v16u16) -> v8u32 {
-    unsafe { __lasx_xvexth_wu_hu(a) }
+pub fn lasx_xvexth_wu_hu(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvexth_wu_hu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvexth_du_wu(a: v8u32) -> v4u64 {
-    unsafe { __lasx_xvexth_du_wu(a) }
+pub fn lasx_xvexth_du_wu(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvexth_du_wu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvexth_qu_du(a: v4u64) -> v4u64 {
-    unsafe { __lasx_xvexth_qu_du(a) }
+pub fn lasx_xvexth_qu_du(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvexth_qu_du(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrotri_b<const IMM3: u32>(a: v32i8) -> v32i8 {
+pub fn lasx_xvrotri_b<const IMM3: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvrotri_b(a, IMM3) }
+    unsafe { transmute(__lasx_xvrotri_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrotri_h<const IMM4: u32>(a: v16i16) -> v16i16 {
+pub fn lasx_xvrotri_h<const IMM4: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvrotri_h(a, IMM4) }
+    unsafe { transmute(__lasx_xvrotri_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrotri_w<const IMM5: u32>(a: v8i32) -> v8i32 {
+pub fn lasx_xvrotri_w<const IMM5: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvrotri_w(a, IMM5) }
+    unsafe { transmute(__lasx_xvrotri_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrotri_d<const IMM6: u32>(a: v4i64) -> v4i64 {
+pub fn lasx_xvrotri_d<const IMM6: u32>(a: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvrotri_d(a, IMM6) }
+    unsafe { transmute(__lasx_xvrotri_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvextl_q_d(a: v4i64) -> v4i64 {
-    unsafe { __lasx_xvextl_q_d(a) }
+pub fn lasx_xvextl_q_d(a: m256i) -> m256i {
+    unsafe { transmute(__lasx_xvextl_q_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlni_b_h<const IMM4: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvsrlni_b_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsrlni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvsrlni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlni_h_w<const IMM5: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvsrlni_h_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsrlni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvsrlni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlni_w_d<const IMM6: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvsrlni_w_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsrlni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvsrlni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlni_d_q<const IMM7: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvsrlni_d_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvsrlni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvsrlni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlrni_b_h<const IMM4: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvsrlrni_b_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsrlrni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvsrlrni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlrni_h_w<const IMM5: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvsrlrni_h_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsrlrni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvsrlrni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlrni_w_d<const IMM6: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvsrlrni_w_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsrlrni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvsrlrni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrlrni_d_q<const IMM7: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvsrlrni_d_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvsrlrni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvsrlrni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlni_b_h<const IMM4: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvssrlni_b_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvssrlni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvssrlni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlni_h_w<const IMM5: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvssrlni_h_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvssrlni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvssrlni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlni_w_d<const IMM6: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvssrlni_w_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvssrlni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvssrlni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlni_d_q<const IMM7: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvssrlni_d_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvssrlni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvssrlni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlni_bu_h<const IMM4: u32>(a: v32u8, b: v32i8) -> v32u8 {
+pub fn lasx_xvssrlni_bu_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvssrlni_bu_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvssrlni_bu_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlni_hu_w<const IMM5: u32>(a: v16u16, b: v16i16) -> v16u16 {
+pub fn lasx_xvssrlni_hu_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvssrlni_hu_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvssrlni_hu_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlni_wu_d<const IMM6: u32>(a: v8u32, b: v8i32) -> v8u32 {
+pub fn lasx_xvssrlni_wu_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvssrlni_wu_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvssrlni_wu_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlni_du_q<const IMM7: u32>(a: v4u64, b: v4i64) -> v4u64 {
+pub fn lasx_xvssrlni_du_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvssrlni_du_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvssrlni_du_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrni_b_h<const IMM4: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvssrlrni_b_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvssrlrni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvssrlrni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrni_h_w<const IMM5: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvssrlrni_h_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvssrlrni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvssrlrni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrni_w_d<const IMM6: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvssrlrni_w_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvssrlrni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvssrlrni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrni_d_q<const IMM7: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvssrlrni_d_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvssrlrni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvssrlrni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrni_bu_h<const IMM4: u32>(a: v32u8, b: v32i8) -> v32u8 {
+pub fn lasx_xvssrlrni_bu_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvssrlrni_bu_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvssrlrni_bu_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrni_hu_w<const IMM5: u32>(a: v16u16, b: v16i16) -> v16u16 {
+pub fn lasx_xvssrlrni_hu_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvssrlrni_hu_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvssrlrni_hu_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrni_wu_d<const IMM6: u32>(a: v8u32, b: v8i32) -> v8u32 {
+pub fn lasx_xvssrlrni_wu_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvssrlrni_wu_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvssrlrni_wu_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrlrni_du_q<const IMM7: u32>(a: v4u64, b: v4i64) -> v4u64 {
+pub fn lasx_xvssrlrni_du_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvssrlrni_du_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvssrlrni_du_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrani_b_h<const IMM4: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvsrani_b_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsrani_b_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvsrani_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrani_h_w<const IMM5: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvsrani_h_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsrani_h_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvsrani_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrani_w_d<const IMM6: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvsrani_w_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsrani_w_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvsrani_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrani_d_q<const IMM7: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvsrani_d_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvsrani_d_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvsrani_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrarni_b_h<const IMM4: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvsrarni_b_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvsrarni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvsrarni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrarni_h_w<const IMM5: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvsrarni_h_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvsrarni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvsrarni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrarni_w_d<const IMM6: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvsrarni_w_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvsrarni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvsrarni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvsrarni_d_q<const IMM7: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvsrarni_d_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvsrarni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvsrarni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrani_b_h<const IMM4: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvssrani_b_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvssrani_b_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvssrani_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrani_h_w<const IMM5: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvssrani_h_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvssrani_h_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvssrani_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrani_w_d<const IMM6: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvssrani_w_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvssrani_w_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvssrani_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrani_d_q<const IMM7: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvssrani_d_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvssrani_d_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvssrani_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrani_bu_h<const IMM4: u32>(a: v32u8, b: v32i8) -> v32u8 {
+pub fn lasx_xvssrani_bu_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvssrani_bu_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvssrani_bu_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrani_hu_w<const IMM5: u32>(a: v16u16, b: v16i16) -> v16u16 {
+pub fn lasx_xvssrani_hu_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvssrani_hu_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvssrani_hu_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrani_wu_d<const IMM6: u32>(a: v8u32, b: v8i32) -> v8u32 {
+pub fn lasx_xvssrani_wu_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvssrani_wu_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvssrani_wu_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrani_du_q<const IMM7: u32>(a: v4u64, b: v4i64) -> v4u64 {
+pub fn lasx_xvssrani_du_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvssrani_du_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvssrani_du_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarni_b_h<const IMM4: u32>(a: v32i8, b: v32i8) -> v32i8 {
+pub fn lasx_xvssrarni_b_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvssrarni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvssrarni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarni_h_w<const IMM5: u32>(a: v16i16, b: v16i16) -> v16i16 {
+pub fn lasx_xvssrarni_h_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvssrarni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvssrarni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarni_w_d<const IMM6: u32>(a: v8i32, b: v8i32) -> v8i32 {
+pub fn lasx_xvssrarni_w_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvssrarni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvssrarni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarni_d_q<const IMM7: u32>(a: v4i64, b: v4i64) -> v4i64 {
+pub fn lasx_xvssrarni_d_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvssrarni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvssrarni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarni_bu_h<const IMM4: u32>(a: v32u8, b: v32i8) -> v32u8 {
+pub fn lasx_xvssrarni_bu_h<const IMM4: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lasx_xvssrarni_bu_h(a, b, IMM4) }
+    unsafe { transmute(__lasx_xvssrarni_bu_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarni_hu_w<const IMM5: u32>(a: v16u16, b: v16i16) -> v16u16 {
+pub fn lasx_xvssrarni_hu_w<const IMM5: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lasx_xvssrarni_hu_w(a, b, IMM5) }
+    unsafe { transmute(__lasx_xvssrarni_hu_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarni_wu_d<const IMM6: u32>(a: v8u32, b: v8i32) -> v8u32 {
+pub fn lasx_xvssrarni_wu_d<const IMM6: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lasx_xvssrarni_wu_d(a, b, IMM6) }
+    unsafe { transmute(__lasx_xvssrarni_wu_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvssrarni_du_q<const IMM7: u32>(a: v4u64, b: v4i64) -> v4u64 {
+pub fn lasx_xvssrarni_du_q<const IMM7: u32>(a: m256i, b: m256i) -> m256i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lasx_xvssrarni_du_q(a, b, IMM7) }
+    unsafe { transmute(__lasx_xvssrarni_du_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbnz_b(a: v32u8) -> i32 {
-    unsafe { __lasx_xbnz_b(a) }
+pub fn lasx_xbnz_b(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbnz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbnz_d(a: v4u64) -> i32 {
-    unsafe { __lasx_xbnz_d(a) }
+pub fn lasx_xbnz_d(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbnz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbnz_h(a: v16u16) -> i32 {
-    unsafe { __lasx_xbnz_h(a) }
+pub fn lasx_xbnz_h(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbnz_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbnz_v(a: v32u8) -> i32 {
-    unsafe { __lasx_xbnz_v(a) }
+pub fn lasx_xbnz_v(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbnz_v(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbnz_w(a: v8u32) -> i32 {
-    unsafe { __lasx_xbnz_w(a) }
+pub fn lasx_xbnz_w(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbnz_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbz_b(a: v32u8) -> i32 {
-    unsafe { __lasx_xbz_b(a) }
+pub fn lasx_xbz_b(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbz_d(a: v4u64) -> i32 {
-    unsafe { __lasx_xbz_d(a) }
+pub fn lasx_xbz_d(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbz_h(a: v16u16) -> i32 {
-    unsafe { __lasx_xbz_h(a) }
+pub fn lasx_xbz_h(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbz_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbz_v(a: v32u8) -> i32 {
-    unsafe { __lasx_xbz_v(a) }
+pub fn lasx_xbz_v(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbz_v(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xbz_w(a: v8u32) -> i32 {
-    unsafe { __lasx_xbz_w(a) }
+pub fn lasx_xbz_w(a: m256i) -> i32 {
+    unsafe { transmute(__lasx_xbz_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_caf_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_caf_d(a, b) }
+pub fn lasx_xvfcmp_caf_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_caf_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_caf_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_caf_s(a, b) }
+pub fn lasx_xvfcmp_caf_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_caf_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_ceq_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_ceq_d(a, b) }
+pub fn lasx_xvfcmp_ceq_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_ceq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_ceq_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_ceq_s(a, b) }
+pub fn lasx_xvfcmp_ceq_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_ceq_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cle_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_cle_d(a, b) }
+pub fn lasx_xvfcmp_cle_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cle_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cle_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_cle_s(a, b) }
+pub fn lasx_xvfcmp_cle_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cle_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_clt_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_clt_d(a, b) }
+pub fn lasx_xvfcmp_clt_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_clt_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_clt_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_clt_s(a, b) }
+pub fn lasx_xvfcmp_clt_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_clt_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cne_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_cne_d(a, b) }
+pub fn lasx_xvfcmp_cne_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cne_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cne_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_cne_s(a, b) }
+pub fn lasx_xvfcmp_cne_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cne_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cor_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_cor_d(a, b) }
+pub fn lasx_xvfcmp_cor_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cor_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cor_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_cor_s(a, b) }
+pub fn lasx_xvfcmp_cor_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cor_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cueq_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_cueq_d(a, b) }
+pub fn lasx_xvfcmp_cueq_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cueq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cueq_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_cueq_s(a, b) }
+pub fn lasx_xvfcmp_cueq_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cueq_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cule_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_cule_d(a, b) }
+pub fn lasx_xvfcmp_cule_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cule_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cule_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_cule_s(a, b) }
+pub fn lasx_xvfcmp_cule_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cule_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cult_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_cult_d(a, b) }
+pub fn lasx_xvfcmp_cult_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cult_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cult_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_cult_s(a, b) }
+pub fn lasx_xvfcmp_cult_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cult_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cun_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_cun_d(a, b) }
+pub fn lasx_xvfcmp_cun_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cun_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cune_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_cune_d(a, b) }
+pub fn lasx_xvfcmp_cune_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cune_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cune_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_cune_s(a, b) }
+pub fn lasx_xvfcmp_cune_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cune_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_cun_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_cun_s(a, b) }
+pub fn lasx_xvfcmp_cun_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_cun_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_saf_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_saf_d(a, b) }
+pub fn lasx_xvfcmp_saf_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_saf_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_saf_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_saf_s(a, b) }
+pub fn lasx_xvfcmp_saf_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_saf_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_seq_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_seq_d(a, b) }
+pub fn lasx_xvfcmp_seq_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_seq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_seq_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_seq_s(a, b) }
+pub fn lasx_xvfcmp_seq_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_seq_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sle_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_sle_d(a, b) }
+pub fn lasx_xvfcmp_sle_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sle_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sle_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_sle_s(a, b) }
+pub fn lasx_xvfcmp_sle_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sle_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_slt_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_slt_d(a, b) }
+pub fn lasx_xvfcmp_slt_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_slt_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_slt_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_slt_s(a, b) }
+pub fn lasx_xvfcmp_slt_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_slt_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sne_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_sne_d(a, b) }
+pub fn lasx_xvfcmp_sne_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sne_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sne_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_sne_s(a, b) }
+pub fn lasx_xvfcmp_sne_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sne_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sor_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_sor_d(a, b) }
+pub fn lasx_xvfcmp_sor_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sor_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sor_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_sor_s(a, b) }
+pub fn lasx_xvfcmp_sor_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sor_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sueq_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_sueq_d(a, b) }
+pub fn lasx_xvfcmp_sueq_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sueq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sueq_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_sueq_s(a, b) }
+pub fn lasx_xvfcmp_sueq_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sueq_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sule_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_sule_d(a, b) }
+pub fn lasx_xvfcmp_sule_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sule_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sule_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_sule_s(a, b) }
+pub fn lasx_xvfcmp_sule_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sule_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sult_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_sult_d(a, b) }
+pub fn lasx_xvfcmp_sult_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sult_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sult_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_sult_s(a, b) }
+pub fn lasx_xvfcmp_sult_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sult_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sun_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_sun_d(a, b) }
+pub fn lasx_xvfcmp_sun_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sun_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sune_d(a: v4f64, b: v4f64) -> v4i64 {
-    unsafe { __lasx_xvfcmp_sune_d(a, b) }
+pub fn lasx_xvfcmp_sune_d(a: m256d, b: m256d) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sune_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sune_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_sune_s(a, b) }
+pub fn lasx_xvfcmp_sune_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sune_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvfcmp_sun_s(a: v8f32, b: v8f32) -> v8i32 {
-    unsafe { __lasx_xvfcmp_sun_s(a, b) }
+pub fn lasx_xvfcmp_sun_s(a: m256, b: m256) -> m256i {
+    unsafe { transmute(__lasx_xvfcmp_sun_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickve_d_f<const IMM2: u32>(a: v4f64) -> v4f64 {
+pub fn lasx_xvpickve_d_f<const IMM2: u32>(a: m256d) -> m256d {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lasx_xvpickve_d_f(a, IMM2) }
+    unsafe { transmute(__lasx_xvpickve_d_f(transmute(a), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvpickve_w_f<const IMM3: u32>(a: v8f32) -> v8f32 {
+pub fn lasx_xvpickve_w_f<const IMM3: u32>(a: m256) -> m256 {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lasx_xvpickve_w_f(a, IMM3) }
+    unsafe { transmute(__lasx_xvpickve_w_f(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrepli_b<const IMM_S10: i32>() -> v32i8 {
+pub fn lasx_xvrepli_b<const IMM_S10: i32>() -> m256i {
     static_assert_simm_bits!(IMM_S10, 10);
-    unsafe { __lasx_xvrepli_b(IMM_S10) }
+    unsafe { transmute(__lasx_xvrepli_b(IMM_S10)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrepli_d<const IMM_S10: i32>() -> v4i64 {
+pub fn lasx_xvrepli_d<const IMM_S10: i32>() -> m256i {
     static_assert_simm_bits!(IMM_S10, 10);
-    unsafe { __lasx_xvrepli_d(IMM_S10) }
+    unsafe { transmute(__lasx_xvrepli_d(IMM_S10)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrepli_h<const IMM_S10: i32>() -> v16i16 {
+pub fn lasx_xvrepli_h<const IMM_S10: i32>() -> m256i {
     static_assert_simm_bits!(IMM_S10, 10);
-    unsafe { __lasx_xvrepli_h(IMM_S10) }
+    unsafe { transmute(__lasx_xvrepli_h(IMM_S10)) }
 }
 
 #[inline]
 #[target_feature(enable = "lasx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lasx_xvrepli_w<const IMM_S10: i32>() -> v8i32 {
+pub fn lasx_xvrepli_w<const IMM_S10: i32>() -> m256i {
     static_assert_simm_bits!(IMM_S10, 10);
-    unsafe { __lasx_xvrepli_w(IMM_S10) }
+    unsafe { transmute(__lasx_xvrepli_w(IMM_S10)) }
 }
diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lasx/types.rs b/library/stdarch/crates/core_arch/src/loongarch64/lasx/types.rs
index 9611517e637..a8ceede8739 100644
--- a/library/stdarch/crates/core_arch/src/loongarch64/lasx/types.rs
+++ b/library/stdarch/crates/core_arch/src/loongarch64/lasx/types.rs
@@ -1,33 +1,140 @@
 types! {
     #![unstable(feature = "stdarch_loongarch", issue = "117427")]
 
-    /// LOONGARCH-specific 256-bit wide vector of 32 packed `i8`.
-    pub struct v32i8(32 x pub(crate) i8);
+    /// 256-bit wide integer vector type, LoongArch-specific
+    ///
+    /// This type is the same as the `__m256i` type defined in `lasxintrin.h`,
+    /// representing a 256-bit SIMD register. Usage of this type typically
+    /// occurs in conjunction with the `lasx` target features for LoongArch.
+    ///
+    /// Internally this type may be viewed as:
+    ///
+    /// * `i8x32` - thirty two `i8` values packed together
+    /// * `i16x16` - sixteen `i16` values packed together
+    /// * `i32x8` - eight `i32` values packed together
+    /// * `i64x4` - four `i64` values packed together
+    ///
+    /// (as well as unsigned versions). Each intrinsic may interpret the
+    /// internal bits differently, check the documentation of the intrinsic
+    /// to see how it's being used.
+    ///
+    /// The in-memory representation of this type is the same as the one of an
+    /// equivalent array (i.e. the in-memory order of elements is the same, and
+    /// there is no padding); however, the alignment is different and equal to
+    /// the size of the type. Note that the ABI for function calls may *not* be
+    /// the same.
+    ///
+    /// Note that this means that an instance of `m256i` typically just means
+    /// a "bag of bits" which is left up to interpretation at the point of use.
+    ///
+    /// Most intrinsics using `m256i` are prefixed with `lasx_` and the integer
+    /// types tend to correspond to suffixes like "b", "h", "w" or "d".
+    pub struct m256i(4 x i64);
 
-    /// LOONGARCH-specific 256-bit wide vector of 16 packed `i16`.
-    pub struct v16i16(16 x pub(crate) i16);
+    /// 256-bit wide set of eight `f32` values, LoongArch-specific
+    ///
+    /// This type is the same as the `__m256` type defined in `lasxintrin.h`,
+    /// representing a 256-bit SIMD register which internally consists of
+    /// eight packed `f32` instances. Usage of this type typically occurs in
+    /// conjunction with the `lasx` target features for LoongArch.
+    ///
+    /// Note that unlike `m256i`, the integer version of the 256-bit registers,
+    /// this `m256` type has *one* interpretation. Each instance of `m256`
+    /// always corresponds to `f32x8`, or eight `f32` values packed together.
+    ///
+    /// The in-memory representation of this type is the same as the one of an
+    /// equivalent array (i.e. the in-memory order of elements is the same, and
+    /// there is no padding  between two consecutive elements); however, the
+    /// alignment is different and equal to the size of the type. Note that the
+    /// ABI for function calls may *not* be the same.
+    ///
+    /// Most intrinsics using `m256` are prefixed with `lasx_` and are
+    /// suffixed with "s".
+    pub struct m256(8 x f32);
 
-    /// LOONGARCH-specific 256-bit wide vector of 8 packed `i32`.
-    pub struct v8i32(8 x pub(crate) i32);
+    /// 256-bit wide set of four `f64` values, LoongArch-specific
+    ///
+    /// This type is the same as the `__m256d` type defined in `lasxintrin.h`,
+    /// representing a 256-bit SIMD register which internally consists of
+    /// four packed `f64` instances. Usage of this type typically occurs in
+    /// conjunction with the `lasx` target features for LoongArch.
+    ///
+    /// Note that unlike `m256i`, the integer version of the 256-bit registers,
+    /// this `m256d` type has *one* interpretation. Each instance of `m256d`
+    /// always corresponds to `f64x4`, or four `f64` values packed together.
+    ///
+    /// The in-memory representation of this type is the same as the one of an
+    /// equivalent array (i.e. the in-memory order of elements is the same, and
+    /// there is no padding); however, the alignment is different and equal to
+    /// the size of the type. Note that the ABI for function calls may *not* be
+    /// the same.
+    ///
+    /// Most intrinsics using `m256d` are prefixed with `lasx_` and are suffixed
+    /// with "d". Not to be confused with "d" which is used for `m256i`.
+    pub struct m256d(4 x f64);
 
-    /// LOONGARCH-specific 256-bit wide vector of 4 packed `i64`.
-    pub struct v4i64(4 x pub(crate) i64);
-
-    /// LOONGARCH-specific 256-bit wide vector of 32 packed `u8`.
-    pub struct v32u8(32 x pub(crate) u8);
-
-    /// LOONGARCH-specific 256-bit wide vector of 16 packed `u16`.
-    pub struct v16u16(16 x pub(crate) u16);
-
-    /// LOONGARCH-specific 256-bit wide vector of 8 packed `u32`.
-    pub struct v8u32(8 x pub(crate) u32);
-
-    /// LOONGARCH-specific 256-bit wide vector of 4 packed `u64`.
-    pub struct v4u64(4 x pub(crate) u64);
+}
 
-    /// LOONGARCH-specific 128-bit wide vector of 8 packed `f32`.
-    pub struct v8f32(8 x pub(crate) f32);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v32i8([i8; 32]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v16i16([i16; 16]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v8i32([i32; 8]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v4i64([i64; 4]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v32u8([u8; 32]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v16u16([u16; 16]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v8u32([u32; 8]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v4u64([u64; 4]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v8f32([f32; 8]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v4f64([f64; 4]);
 
-    /// LOONGARCH-specific 256-bit wide vector of 4 packed `f64`.
-    pub struct v4f64(4 x pub(crate) f64);
-}
+// These type aliases are provided solely for transitional compatibility.
+// They are temporary and will be removed when appropriate.
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v32i8 = m256i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v16i16 = m256i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v8i32 = m256i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v4i64 = m256i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v32u8 = m256i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v16u16 = m256i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v8u32 = m256i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v4u64 = m256i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v8f32 = m256;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v4f64 = m256d;
diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs b/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs
index ba821a3e3dc..764e69ca054 100644
--- a/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs
+++ b/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs
@@ -6,6874 +6,6875 @@
 // OUT_DIR=`pwd`/crates/core_arch cargo run -p stdarch-gen-loongarch -- crates/stdarch-gen-loongarch/lsx.spec
 // ```
 
+use crate::mem::transmute;
 use super::types::*;
 
 #[allow(improper_ctypes)]
 unsafe extern "unadjusted" {
     #[link_name = "llvm.loongarch.lsx.vsll.b"]
-    fn __lsx_vsll_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vsll_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsll.h"]
-    fn __lsx_vsll_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vsll_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsll.w"]
-    fn __lsx_vsll_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vsll_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsll.d"]
-    fn __lsx_vsll_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsll_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vslli.b"]
-    fn __lsx_vslli_b(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vslli_b(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vslli.h"]
-    fn __lsx_vslli_h(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vslli_h(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vslli.w"]
-    fn __lsx_vslli_w(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vslli_w(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vslli.d"]
-    fn __lsx_vslli_d(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vslli_d(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsra.b"]
-    fn __lsx_vsra_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vsra_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsra.h"]
-    fn __lsx_vsra_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vsra_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsra.w"]
-    fn __lsx_vsra_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vsra_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsra.d"]
-    fn __lsx_vsra_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsra_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrai.b"]
-    fn __lsx_vsrai_b(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vsrai_b(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrai.h"]
-    fn __lsx_vsrai_h(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vsrai_h(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrai.w"]
-    fn __lsx_vsrai_w(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vsrai_w(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrai.d"]
-    fn __lsx_vsrai_d(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vsrai_d(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrar.b"]
-    fn __lsx_vsrar_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vsrar_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrar.h"]
-    fn __lsx_vsrar_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vsrar_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrar.w"]
-    fn __lsx_vsrar_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vsrar_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrar.d"]
-    fn __lsx_vsrar_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsrar_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrari.b"]
-    fn __lsx_vsrari_b(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vsrari_b(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrari.h"]
-    fn __lsx_vsrari_h(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vsrari_h(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrari.w"]
-    fn __lsx_vsrari_w(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vsrari_w(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrari.d"]
-    fn __lsx_vsrari_d(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vsrari_d(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrl.b"]
-    fn __lsx_vsrl_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vsrl_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrl.h"]
-    fn __lsx_vsrl_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vsrl_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrl.w"]
-    fn __lsx_vsrl_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vsrl_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrl.d"]
-    fn __lsx_vsrl_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsrl_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrli.b"]
-    fn __lsx_vsrli_b(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vsrli_b(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrli.h"]
-    fn __lsx_vsrli_h(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vsrli_h(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrli.w"]
-    fn __lsx_vsrli_w(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vsrli_w(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrli.d"]
-    fn __lsx_vsrli_d(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vsrli_d(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrlr.b"]
-    fn __lsx_vsrlr_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vsrlr_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrlr.h"]
-    fn __lsx_vsrlr_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vsrlr_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrlr.w"]
-    fn __lsx_vsrlr_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vsrlr_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrlr.d"]
-    fn __lsx_vsrlr_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsrlr_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrlri.b"]
-    fn __lsx_vsrlri_b(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vsrlri_b(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrlri.h"]
-    fn __lsx_vsrlri_h(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vsrlri_h(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrlri.w"]
-    fn __lsx_vsrlri_w(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vsrlri_w(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrlri.d"]
-    fn __lsx_vsrlri_d(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vsrlri_d(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vbitclr.b"]
-    fn __lsx_vbitclr_b(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vbitclr_b(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vbitclr.h"]
-    fn __lsx_vbitclr_h(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vbitclr_h(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vbitclr.w"]
-    fn __lsx_vbitclr_w(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vbitclr_w(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vbitclr.d"]
-    fn __lsx_vbitclr_d(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vbitclr_d(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vbitclri.b"]
-    fn __lsx_vbitclri_b(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vbitclri_b(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vbitclri.h"]
-    fn __lsx_vbitclri_h(a: v8u16, b: u32) -> v8u16;
+    fn __lsx_vbitclri_h(a: __v8u16, b: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vbitclri.w"]
-    fn __lsx_vbitclri_w(a: v4u32, b: u32) -> v4u32;
+    fn __lsx_vbitclri_w(a: __v4u32, b: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vbitclri.d"]
-    fn __lsx_vbitclri_d(a: v2u64, b: u32) -> v2u64;
+    fn __lsx_vbitclri_d(a: __v2u64, b: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vbitset.b"]
-    fn __lsx_vbitset_b(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vbitset_b(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vbitset.h"]
-    fn __lsx_vbitset_h(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vbitset_h(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vbitset.w"]
-    fn __lsx_vbitset_w(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vbitset_w(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vbitset.d"]
-    fn __lsx_vbitset_d(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vbitset_d(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vbitseti.b"]
-    fn __lsx_vbitseti_b(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vbitseti_b(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vbitseti.h"]
-    fn __lsx_vbitseti_h(a: v8u16, b: u32) -> v8u16;
+    fn __lsx_vbitseti_h(a: __v8u16, b: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vbitseti.w"]
-    fn __lsx_vbitseti_w(a: v4u32, b: u32) -> v4u32;
+    fn __lsx_vbitseti_w(a: __v4u32, b: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vbitseti.d"]
-    fn __lsx_vbitseti_d(a: v2u64, b: u32) -> v2u64;
+    fn __lsx_vbitseti_d(a: __v2u64, b: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vbitrev.b"]
-    fn __lsx_vbitrev_b(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vbitrev_b(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vbitrev.h"]
-    fn __lsx_vbitrev_h(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vbitrev_h(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vbitrev.w"]
-    fn __lsx_vbitrev_w(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vbitrev_w(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vbitrev.d"]
-    fn __lsx_vbitrev_d(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vbitrev_d(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vbitrevi.b"]
-    fn __lsx_vbitrevi_b(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vbitrevi_b(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vbitrevi.h"]
-    fn __lsx_vbitrevi_h(a: v8u16, b: u32) -> v8u16;
+    fn __lsx_vbitrevi_h(a: __v8u16, b: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vbitrevi.w"]
-    fn __lsx_vbitrevi_w(a: v4u32, b: u32) -> v4u32;
+    fn __lsx_vbitrevi_w(a: __v4u32, b: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vbitrevi.d"]
-    fn __lsx_vbitrevi_d(a: v2u64, b: u32) -> v2u64;
+    fn __lsx_vbitrevi_d(a: __v2u64, b: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vadd.b"]
-    fn __lsx_vadd_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vadd_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vadd.h"]
-    fn __lsx_vadd_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vadd_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vadd.w"]
-    fn __lsx_vadd_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vadd_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vadd.d"]
-    fn __lsx_vadd_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vadd_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddi.bu"]
-    fn __lsx_vaddi_bu(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vaddi_bu(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vaddi.hu"]
-    fn __lsx_vaddi_hu(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vaddi_hu(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vaddi.wu"]
-    fn __lsx_vaddi_wu(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vaddi_wu(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vaddi.du"]
-    fn __lsx_vaddi_du(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vaddi_du(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsub.b"]
-    fn __lsx_vsub_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vsub_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsub.h"]
-    fn __lsx_vsub_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vsub_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsub.w"]
-    fn __lsx_vsub_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vsub_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsub.d"]
-    fn __lsx_vsub_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsub_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsubi.bu"]
-    fn __lsx_vsubi_bu(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vsubi_bu(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsubi.hu"]
-    fn __lsx_vsubi_hu(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vsubi_hu(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsubi.wu"]
-    fn __lsx_vsubi_wu(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vsubi_wu(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsubi.du"]
-    fn __lsx_vsubi_du(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vsubi_du(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmax.b"]
-    fn __lsx_vmax_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vmax_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmax.h"]
-    fn __lsx_vmax_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vmax_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmax.w"]
-    fn __lsx_vmax_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vmax_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmax.d"]
-    fn __lsx_vmax_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vmax_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmaxi.b"]
-    fn __lsx_vmaxi_b(a: v16i8, b: i32) -> v16i8;
+    fn __lsx_vmaxi_b(a: __v16i8, b: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmaxi.h"]
-    fn __lsx_vmaxi_h(a: v8i16, b: i32) -> v8i16;
+    fn __lsx_vmaxi_h(a: __v8i16, b: i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmaxi.w"]
-    fn __lsx_vmaxi_w(a: v4i32, b: i32) -> v4i32;
+    fn __lsx_vmaxi_w(a: __v4i32, b: i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmaxi.d"]
-    fn __lsx_vmaxi_d(a: v2i64, b: i32) -> v2i64;
+    fn __lsx_vmaxi_d(a: __v2i64, b: i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmax.bu"]
-    fn __lsx_vmax_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vmax_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vmax.hu"]
-    fn __lsx_vmax_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vmax_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vmax.wu"]
-    fn __lsx_vmax_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vmax_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vmax.du"]
-    fn __lsx_vmax_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vmax_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vmaxi.bu"]
-    fn __lsx_vmaxi_bu(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vmaxi_bu(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vmaxi.hu"]
-    fn __lsx_vmaxi_hu(a: v8u16, b: u32) -> v8u16;
+    fn __lsx_vmaxi_hu(a: __v8u16, b: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vmaxi.wu"]
-    fn __lsx_vmaxi_wu(a: v4u32, b: u32) -> v4u32;
+    fn __lsx_vmaxi_wu(a: __v4u32, b: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vmaxi.du"]
-    fn __lsx_vmaxi_du(a: v2u64, b: u32) -> v2u64;
+    fn __lsx_vmaxi_du(a: __v2u64, b: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vmin.b"]
-    fn __lsx_vmin_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vmin_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmin.h"]
-    fn __lsx_vmin_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vmin_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmin.w"]
-    fn __lsx_vmin_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vmin_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmin.d"]
-    fn __lsx_vmin_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vmin_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmini.b"]
-    fn __lsx_vmini_b(a: v16i8, b: i32) -> v16i8;
+    fn __lsx_vmini_b(a: __v16i8, b: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmini.h"]
-    fn __lsx_vmini_h(a: v8i16, b: i32) -> v8i16;
+    fn __lsx_vmini_h(a: __v8i16, b: i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmini.w"]
-    fn __lsx_vmini_w(a: v4i32, b: i32) -> v4i32;
+    fn __lsx_vmini_w(a: __v4i32, b: i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmini.d"]
-    fn __lsx_vmini_d(a: v2i64, b: i32) -> v2i64;
+    fn __lsx_vmini_d(a: __v2i64, b: i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmin.bu"]
-    fn __lsx_vmin_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vmin_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vmin.hu"]
-    fn __lsx_vmin_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vmin_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vmin.wu"]
-    fn __lsx_vmin_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vmin_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vmin.du"]
-    fn __lsx_vmin_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vmin_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vmini.bu"]
-    fn __lsx_vmini_bu(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vmini_bu(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vmini.hu"]
-    fn __lsx_vmini_hu(a: v8u16, b: u32) -> v8u16;
+    fn __lsx_vmini_hu(a: __v8u16, b: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vmini.wu"]
-    fn __lsx_vmini_wu(a: v4u32, b: u32) -> v4u32;
+    fn __lsx_vmini_wu(a: __v4u32, b: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vmini.du"]
-    fn __lsx_vmini_du(a: v2u64, b: u32) -> v2u64;
+    fn __lsx_vmini_du(a: __v2u64, b: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vseq.b"]
-    fn __lsx_vseq_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vseq_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vseq.h"]
-    fn __lsx_vseq_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vseq_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vseq.w"]
-    fn __lsx_vseq_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vseq_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vseq.d"]
-    fn __lsx_vseq_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vseq_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vseqi.b"]
-    fn __lsx_vseqi_b(a: v16i8, b: i32) -> v16i8;
+    fn __lsx_vseqi_b(a: __v16i8, b: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vseqi.h"]
-    fn __lsx_vseqi_h(a: v8i16, b: i32) -> v8i16;
+    fn __lsx_vseqi_h(a: __v8i16, b: i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vseqi.w"]
-    fn __lsx_vseqi_w(a: v4i32, b: i32) -> v4i32;
+    fn __lsx_vseqi_w(a: __v4i32, b: i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vseqi.d"]
-    fn __lsx_vseqi_d(a: v2i64, b: i32) -> v2i64;
+    fn __lsx_vseqi_d(a: __v2i64, b: i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vslti.b"]
-    fn __lsx_vslti_b(a: v16i8, b: i32) -> v16i8;
+    fn __lsx_vslti_b(a: __v16i8, b: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vslt.b"]
-    fn __lsx_vslt_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vslt_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vslt.h"]
-    fn __lsx_vslt_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vslt_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vslt.w"]
-    fn __lsx_vslt_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vslt_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vslt.d"]
-    fn __lsx_vslt_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vslt_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vslti.h"]
-    fn __lsx_vslti_h(a: v8i16, b: i32) -> v8i16;
+    fn __lsx_vslti_h(a: __v8i16, b: i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vslti.w"]
-    fn __lsx_vslti_w(a: v4i32, b: i32) -> v4i32;
+    fn __lsx_vslti_w(a: __v4i32, b: i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vslti.d"]
-    fn __lsx_vslti_d(a: v2i64, b: i32) -> v2i64;
+    fn __lsx_vslti_d(a: __v2i64, b: i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vslt.bu"]
-    fn __lsx_vslt_bu(a: v16u8, b: v16u8) -> v16i8;
+    fn __lsx_vslt_bu(a: __v16u8, b: __v16u8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vslt.hu"]
-    fn __lsx_vslt_hu(a: v8u16, b: v8u16) -> v8i16;
+    fn __lsx_vslt_hu(a: __v8u16, b: __v8u16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vslt.wu"]
-    fn __lsx_vslt_wu(a: v4u32, b: v4u32) -> v4i32;
+    fn __lsx_vslt_wu(a: __v4u32, b: __v4u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vslt.du"]
-    fn __lsx_vslt_du(a: v2u64, b: v2u64) -> v2i64;
+    fn __lsx_vslt_du(a: __v2u64, b: __v2u64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vslti.bu"]
-    fn __lsx_vslti_bu(a: v16u8, b: u32) -> v16i8;
+    fn __lsx_vslti_bu(a: __v16u8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vslti.hu"]
-    fn __lsx_vslti_hu(a: v8u16, b: u32) -> v8i16;
+    fn __lsx_vslti_hu(a: __v8u16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vslti.wu"]
-    fn __lsx_vslti_wu(a: v4u32, b: u32) -> v4i32;
+    fn __lsx_vslti_wu(a: __v4u32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vslti.du"]
-    fn __lsx_vslti_du(a: v2u64, b: u32) -> v2i64;
+    fn __lsx_vslti_du(a: __v2u64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsle.b"]
-    fn __lsx_vsle_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vsle_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsle.h"]
-    fn __lsx_vsle_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vsle_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsle.w"]
-    fn __lsx_vsle_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vsle_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsle.d"]
-    fn __lsx_vsle_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsle_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vslei.b"]
-    fn __lsx_vslei_b(a: v16i8, b: i32) -> v16i8;
+    fn __lsx_vslei_b(a: __v16i8, b: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vslei.h"]
-    fn __lsx_vslei_h(a: v8i16, b: i32) -> v8i16;
+    fn __lsx_vslei_h(a: __v8i16, b: i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vslei.w"]
-    fn __lsx_vslei_w(a: v4i32, b: i32) -> v4i32;
+    fn __lsx_vslei_w(a: __v4i32, b: i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vslei.d"]
-    fn __lsx_vslei_d(a: v2i64, b: i32) -> v2i64;
+    fn __lsx_vslei_d(a: __v2i64, b: i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsle.bu"]
-    fn __lsx_vsle_bu(a: v16u8, b: v16u8) -> v16i8;
+    fn __lsx_vsle_bu(a: __v16u8, b: __v16u8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsle.hu"]
-    fn __lsx_vsle_hu(a: v8u16, b: v8u16) -> v8i16;
+    fn __lsx_vsle_hu(a: __v8u16, b: __v8u16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsle.wu"]
-    fn __lsx_vsle_wu(a: v4u32, b: v4u32) -> v4i32;
+    fn __lsx_vsle_wu(a: __v4u32, b: __v4u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsle.du"]
-    fn __lsx_vsle_du(a: v2u64, b: v2u64) -> v2i64;
+    fn __lsx_vsle_du(a: __v2u64, b: __v2u64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vslei.bu"]
-    fn __lsx_vslei_bu(a: v16u8, b: u32) -> v16i8;
+    fn __lsx_vslei_bu(a: __v16u8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vslei.hu"]
-    fn __lsx_vslei_hu(a: v8u16, b: u32) -> v8i16;
+    fn __lsx_vslei_hu(a: __v8u16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vslei.wu"]
-    fn __lsx_vslei_wu(a: v4u32, b: u32) -> v4i32;
+    fn __lsx_vslei_wu(a: __v4u32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vslei.du"]
-    fn __lsx_vslei_du(a: v2u64, b: u32) -> v2i64;
+    fn __lsx_vslei_du(a: __v2u64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsat.b"]
-    fn __lsx_vsat_b(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vsat_b(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsat.h"]
-    fn __lsx_vsat_h(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vsat_h(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsat.w"]
-    fn __lsx_vsat_w(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vsat_w(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsat.d"]
-    fn __lsx_vsat_d(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vsat_d(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsat.bu"]
-    fn __lsx_vsat_bu(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vsat_bu(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vsat.hu"]
-    fn __lsx_vsat_hu(a: v8u16, b: u32) -> v8u16;
+    fn __lsx_vsat_hu(a: __v8u16, b: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vsat.wu"]
-    fn __lsx_vsat_wu(a: v4u32, b: u32) -> v4u32;
+    fn __lsx_vsat_wu(a: __v4u32, b: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vsat.du"]
-    fn __lsx_vsat_du(a: v2u64, b: u32) -> v2u64;
+    fn __lsx_vsat_du(a: __v2u64, b: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vadda.b"]
-    fn __lsx_vadda_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vadda_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vadda.h"]
-    fn __lsx_vadda_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vadda_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vadda.w"]
-    fn __lsx_vadda_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vadda_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vadda.d"]
-    fn __lsx_vadda_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vadda_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsadd.b"]
-    fn __lsx_vsadd_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vsadd_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsadd.h"]
-    fn __lsx_vsadd_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vsadd_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsadd.w"]
-    fn __lsx_vsadd_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vsadd_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsadd.d"]
-    fn __lsx_vsadd_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsadd_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsadd.bu"]
-    fn __lsx_vsadd_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vsadd_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vsadd.hu"]
-    fn __lsx_vsadd_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vsadd_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vsadd.wu"]
-    fn __lsx_vsadd_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vsadd_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vsadd.du"]
-    fn __lsx_vsadd_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vsadd_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vavg.b"]
-    fn __lsx_vavg_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vavg_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vavg.h"]
-    fn __lsx_vavg_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vavg_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vavg.w"]
-    fn __lsx_vavg_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vavg_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vavg.d"]
-    fn __lsx_vavg_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vavg_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vavg.bu"]
-    fn __lsx_vavg_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vavg_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vavg.hu"]
-    fn __lsx_vavg_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vavg_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vavg.wu"]
-    fn __lsx_vavg_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vavg_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vavg.du"]
-    fn __lsx_vavg_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vavg_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vavgr.b"]
-    fn __lsx_vavgr_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vavgr_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vavgr.h"]
-    fn __lsx_vavgr_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vavgr_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vavgr.w"]
-    fn __lsx_vavgr_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vavgr_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vavgr.d"]
-    fn __lsx_vavgr_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vavgr_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vavgr.bu"]
-    fn __lsx_vavgr_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vavgr_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vavgr.hu"]
-    fn __lsx_vavgr_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vavgr_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vavgr.wu"]
-    fn __lsx_vavgr_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vavgr_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vavgr.du"]
-    fn __lsx_vavgr_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vavgr_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vssub.b"]
-    fn __lsx_vssub_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vssub_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vssub.h"]
-    fn __lsx_vssub_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vssub_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vssub.w"]
-    fn __lsx_vssub_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vssub_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssub.d"]
-    fn __lsx_vssub_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vssub_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vssub.bu"]
-    fn __lsx_vssub_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vssub_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vssub.hu"]
-    fn __lsx_vssub_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vssub_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vssub.wu"]
-    fn __lsx_vssub_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vssub_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vssub.du"]
-    fn __lsx_vssub_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vssub_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vabsd.b"]
-    fn __lsx_vabsd_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vabsd_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vabsd.h"]
-    fn __lsx_vabsd_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vabsd_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vabsd.w"]
-    fn __lsx_vabsd_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vabsd_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vabsd.d"]
-    fn __lsx_vabsd_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vabsd_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vabsd.bu"]
-    fn __lsx_vabsd_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vabsd_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vabsd.hu"]
-    fn __lsx_vabsd_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vabsd_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vabsd.wu"]
-    fn __lsx_vabsd_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vabsd_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vabsd.du"]
-    fn __lsx_vabsd_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vabsd_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vmul.b"]
-    fn __lsx_vmul_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vmul_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmul.h"]
-    fn __lsx_vmul_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vmul_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmul.w"]
-    fn __lsx_vmul_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vmul_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmul.d"]
-    fn __lsx_vmul_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vmul_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmadd.b"]
-    fn __lsx_vmadd_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8;
+    fn __lsx_vmadd_b(a: __v16i8, b: __v16i8, c: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmadd.h"]
-    fn __lsx_vmadd_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16;
+    fn __lsx_vmadd_h(a: __v8i16, b: __v8i16, c: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmadd.w"]
-    fn __lsx_vmadd_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32;
+    fn __lsx_vmadd_w(a: __v4i32, b: __v4i32, c: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmadd.d"]
-    fn __lsx_vmadd_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64;
+    fn __lsx_vmadd_d(a: __v2i64, b: __v2i64, c: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmsub.b"]
-    fn __lsx_vmsub_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8;
+    fn __lsx_vmsub_b(a: __v16i8, b: __v16i8, c: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmsub.h"]
-    fn __lsx_vmsub_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16;
+    fn __lsx_vmsub_h(a: __v8i16, b: __v8i16, c: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmsub.w"]
-    fn __lsx_vmsub_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32;
+    fn __lsx_vmsub_w(a: __v4i32, b: __v4i32, c: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmsub.d"]
-    fn __lsx_vmsub_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64;
+    fn __lsx_vmsub_d(a: __v2i64, b: __v2i64, c: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vdiv.b"]
-    fn __lsx_vdiv_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vdiv_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vdiv.h"]
-    fn __lsx_vdiv_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vdiv_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vdiv.w"]
-    fn __lsx_vdiv_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vdiv_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vdiv.d"]
-    fn __lsx_vdiv_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vdiv_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vdiv.bu"]
-    fn __lsx_vdiv_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vdiv_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vdiv.hu"]
-    fn __lsx_vdiv_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vdiv_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vdiv.wu"]
-    fn __lsx_vdiv_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vdiv_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vdiv.du"]
-    fn __lsx_vdiv_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vdiv_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vhaddw.h.b"]
-    fn __lsx_vhaddw_h_b(a: v16i8, b: v16i8) -> v8i16;
+    fn __lsx_vhaddw_h_b(a: __v16i8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vhaddw.w.h"]
-    fn __lsx_vhaddw_w_h(a: v8i16, b: v8i16) -> v4i32;
+    fn __lsx_vhaddw_w_h(a: __v8i16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vhaddw.d.w"]
-    fn __lsx_vhaddw_d_w(a: v4i32, b: v4i32) -> v2i64;
+    fn __lsx_vhaddw_d_w(a: __v4i32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vhaddw.hu.bu"]
-    fn __lsx_vhaddw_hu_bu(a: v16u8, b: v16u8) -> v8u16;
+    fn __lsx_vhaddw_hu_bu(a: __v16u8, b: __v16u8) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vhaddw.wu.hu"]
-    fn __lsx_vhaddw_wu_hu(a: v8u16, b: v8u16) -> v4u32;
+    fn __lsx_vhaddw_wu_hu(a: __v8u16, b: __v8u16) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vhaddw.du.wu"]
-    fn __lsx_vhaddw_du_wu(a: v4u32, b: v4u32) -> v2u64;
+    fn __lsx_vhaddw_du_wu(a: __v4u32, b: __v4u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vhsubw.h.b"]
-    fn __lsx_vhsubw_h_b(a: v16i8, b: v16i8) -> v8i16;
+    fn __lsx_vhsubw_h_b(a: __v16i8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vhsubw.w.h"]
-    fn __lsx_vhsubw_w_h(a: v8i16, b: v8i16) -> v4i32;
+    fn __lsx_vhsubw_w_h(a: __v8i16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vhsubw.d.w"]
-    fn __lsx_vhsubw_d_w(a: v4i32, b: v4i32) -> v2i64;
+    fn __lsx_vhsubw_d_w(a: __v4i32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vhsubw.hu.bu"]
-    fn __lsx_vhsubw_hu_bu(a: v16u8, b: v16u8) -> v8i16;
+    fn __lsx_vhsubw_hu_bu(a: __v16u8, b: __v16u8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vhsubw.wu.hu"]
-    fn __lsx_vhsubw_wu_hu(a: v8u16, b: v8u16) -> v4i32;
+    fn __lsx_vhsubw_wu_hu(a: __v8u16, b: __v8u16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vhsubw.du.wu"]
-    fn __lsx_vhsubw_du_wu(a: v4u32, b: v4u32) -> v2i64;
+    fn __lsx_vhsubw_du_wu(a: __v4u32, b: __v4u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmod.b"]
-    fn __lsx_vmod_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vmod_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmod.h"]
-    fn __lsx_vmod_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vmod_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmod.w"]
-    fn __lsx_vmod_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vmod_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmod.d"]
-    fn __lsx_vmod_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vmod_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmod.bu"]
-    fn __lsx_vmod_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vmod_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vmod.hu"]
-    fn __lsx_vmod_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vmod_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vmod.wu"]
-    fn __lsx_vmod_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vmod_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vmod.du"]
-    fn __lsx_vmod_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vmod_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vreplve.b"]
-    fn __lsx_vreplve_b(a: v16i8, b: i32) -> v16i8;
+    fn __lsx_vreplve_b(a: __v16i8, b: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vreplve.h"]
-    fn __lsx_vreplve_h(a: v8i16, b: i32) -> v8i16;
+    fn __lsx_vreplve_h(a: __v8i16, b: i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vreplve.w"]
-    fn __lsx_vreplve_w(a: v4i32, b: i32) -> v4i32;
+    fn __lsx_vreplve_w(a: __v4i32, b: i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vreplve.d"]
-    fn __lsx_vreplve_d(a: v2i64, b: i32) -> v2i64;
+    fn __lsx_vreplve_d(a: __v2i64, b: i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vreplvei.b"]
-    fn __lsx_vreplvei_b(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vreplvei_b(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vreplvei.h"]
-    fn __lsx_vreplvei_h(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vreplvei_h(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vreplvei.w"]
-    fn __lsx_vreplvei_w(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vreplvei_w(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vreplvei.d"]
-    fn __lsx_vreplvei_d(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vreplvei_d(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vpickev.b"]
-    fn __lsx_vpickev_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vpickev_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vpickev.h"]
-    fn __lsx_vpickev_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vpickev_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vpickev.w"]
-    fn __lsx_vpickev_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vpickev_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vpickev.d"]
-    fn __lsx_vpickev_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vpickev_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vpickod.b"]
-    fn __lsx_vpickod_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vpickod_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vpickod.h"]
-    fn __lsx_vpickod_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vpickod_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vpickod.w"]
-    fn __lsx_vpickod_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vpickod_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vpickod.d"]
-    fn __lsx_vpickod_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vpickod_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vilvh.b"]
-    fn __lsx_vilvh_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vilvh_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vilvh.h"]
-    fn __lsx_vilvh_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vilvh_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vilvh.w"]
-    fn __lsx_vilvh_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vilvh_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vilvh.d"]
-    fn __lsx_vilvh_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vilvh_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vilvl.b"]
-    fn __lsx_vilvl_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vilvl_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vilvl.h"]
-    fn __lsx_vilvl_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vilvl_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vilvl.w"]
-    fn __lsx_vilvl_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vilvl_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vilvl.d"]
-    fn __lsx_vilvl_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vilvl_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vpackev.b"]
-    fn __lsx_vpackev_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vpackev_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vpackev.h"]
-    fn __lsx_vpackev_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vpackev_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vpackev.w"]
-    fn __lsx_vpackev_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vpackev_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vpackev.d"]
-    fn __lsx_vpackev_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vpackev_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vpackod.b"]
-    fn __lsx_vpackod_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vpackod_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vpackod.h"]
-    fn __lsx_vpackod_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vpackod_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vpackod.w"]
-    fn __lsx_vpackod_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vpackod_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vpackod.d"]
-    fn __lsx_vpackod_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vpackod_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vshuf.h"]
-    fn __lsx_vshuf_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16;
+    fn __lsx_vshuf_h(a: __v8i16, b: __v8i16, c: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vshuf.w"]
-    fn __lsx_vshuf_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32;
+    fn __lsx_vshuf_w(a: __v4i32, b: __v4i32, c: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vshuf.d"]
-    fn __lsx_vshuf_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64;
+    fn __lsx_vshuf_d(a: __v2i64, b: __v2i64, c: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vand.v"]
-    fn __lsx_vand_v(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vand_v(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vandi.b"]
-    fn __lsx_vandi_b(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vandi_b(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vor.v"]
-    fn __lsx_vor_v(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vor_v(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vori.b"]
-    fn __lsx_vori_b(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vori_b(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vnor.v"]
-    fn __lsx_vnor_v(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vnor_v(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vnori.b"]
-    fn __lsx_vnori_b(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vnori_b(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vxor.v"]
-    fn __lsx_vxor_v(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vxor_v(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vxori.b"]
-    fn __lsx_vxori_b(a: v16u8, b: u32) -> v16u8;
+    fn __lsx_vxori_b(a: __v16u8, b: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vbitsel.v"]
-    fn __lsx_vbitsel_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8;
+    fn __lsx_vbitsel_v(a: __v16u8, b: __v16u8, c: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vbitseli.b"]
-    fn __lsx_vbitseli_b(a: v16u8, b: v16u8, c: u32) -> v16u8;
+    fn __lsx_vbitseli_b(a: __v16u8, b: __v16u8, c: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vshuf4i.b"]
-    fn __lsx_vshuf4i_b(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vshuf4i_b(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vshuf4i.h"]
-    fn __lsx_vshuf4i_h(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vshuf4i_h(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vshuf4i.w"]
-    fn __lsx_vshuf4i_w(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vshuf4i_w(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vreplgr2vr.b"]
-    fn __lsx_vreplgr2vr_b(a: i32) -> v16i8;
+    fn __lsx_vreplgr2vr_b(a: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vreplgr2vr.h"]
-    fn __lsx_vreplgr2vr_h(a: i32) -> v8i16;
+    fn __lsx_vreplgr2vr_h(a: i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vreplgr2vr.w"]
-    fn __lsx_vreplgr2vr_w(a: i32) -> v4i32;
+    fn __lsx_vreplgr2vr_w(a: i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vreplgr2vr.d"]
-    fn __lsx_vreplgr2vr_d(a: i64) -> v2i64;
+    fn __lsx_vreplgr2vr_d(a: i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vpcnt.b"]
-    fn __lsx_vpcnt_b(a: v16i8) -> v16i8;
+    fn __lsx_vpcnt_b(a: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vpcnt.h"]
-    fn __lsx_vpcnt_h(a: v8i16) -> v8i16;
+    fn __lsx_vpcnt_h(a: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vpcnt.w"]
-    fn __lsx_vpcnt_w(a: v4i32) -> v4i32;
+    fn __lsx_vpcnt_w(a: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vpcnt.d"]
-    fn __lsx_vpcnt_d(a: v2i64) -> v2i64;
+    fn __lsx_vpcnt_d(a: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vclo.b"]
-    fn __lsx_vclo_b(a: v16i8) -> v16i8;
+    fn __lsx_vclo_b(a: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vclo.h"]
-    fn __lsx_vclo_h(a: v8i16) -> v8i16;
+    fn __lsx_vclo_h(a: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vclo.w"]
-    fn __lsx_vclo_w(a: v4i32) -> v4i32;
+    fn __lsx_vclo_w(a: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vclo.d"]
-    fn __lsx_vclo_d(a: v2i64) -> v2i64;
+    fn __lsx_vclo_d(a: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vclz.b"]
-    fn __lsx_vclz_b(a: v16i8) -> v16i8;
+    fn __lsx_vclz_b(a: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vclz.h"]
-    fn __lsx_vclz_h(a: v8i16) -> v8i16;
+    fn __lsx_vclz_h(a: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vclz.w"]
-    fn __lsx_vclz_w(a: v4i32) -> v4i32;
+    fn __lsx_vclz_w(a: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vclz.d"]
-    fn __lsx_vclz_d(a: v2i64) -> v2i64;
+    fn __lsx_vclz_d(a: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vpickve2gr.b"]
-    fn __lsx_vpickve2gr_b(a: v16i8, b: u32) -> i32;
+    fn __lsx_vpickve2gr_b(a: __v16i8, b: u32) -> i32;
     #[link_name = "llvm.loongarch.lsx.vpickve2gr.h"]
-    fn __lsx_vpickve2gr_h(a: v8i16, b: u32) -> i32;
+    fn __lsx_vpickve2gr_h(a: __v8i16, b: u32) -> i32;
     #[link_name = "llvm.loongarch.lsx.vpickve2gr.w"]
-    fn __lsx_vpickve2gr_w(a: v4i32, b: u32) -> i32;
+    fn __lsx_vpickve2gr_w(a: __v4i32, b: u32) -> i32;
     #[link_name = "llvm.loongarch.lsx.vpickve2gr.d"]
-    fn __lsx_vpickve2gr_d(a: v2i64, b: u32) -> i64;
+    fn __lsx_vpickve2gr_d(a: __v2i64, b: u32) -> i64;
     #[link_name = "llvm.loongarch.lsx.vpickve2gr.bu"]
-    fn __lsx_vpickve2gr_bu(a: v16i8, b: u32) -> u32;
+    fn __lsx_vpickve2gr_bu(a: __v16i8, b: u32) -> u32;
     #[link_name = "llvm.loongarch.lsx.vpickve2gr.hu"]
-    fn __lsx_vpickve2gr_hu(a: v8i16, b: u32) -> u32;
+    fn __lsx_vpickve2gr_hu(a: __v8i16, b: u32) -> u32;
     #[link_name = "llvm.loongarch.lsx.vpickve2gr.wu"]
-    fn __lsx_vpickve2gr_wu(a: v4i32, b: u32) -> u32;
+    fn __lsx_vpickve2gr_wu(a: __v4i32, b: u32) -> u32;
     #[link_name = "llvm.loongarch.lsx.vpickve2gr.du"]
-    fn __lsx_vpickve2gr_du(a: v2i64, b: u32) -> u64;
+    fn __lsx_vpickve2gr_du(a: __v2i64, b: u32) -> u64;
     #[link_name = "llvm.loongarch.lsx.vinsgr2vr.b"]
-    fn __lsx_vinsgr2vr_b(a: v16i8, b: i32, c: u32) -> v16i8;
+    fn __lsx_vinsgr2vr_b(a: __v16i8, b: i32, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vinsgr2vr.h"]
-    fn __lsx_vinsgr2vr_h(a: v8i16, b: i32, c: u32) -> v8i16;
+    fn __lsx_vinsgr2vr_h(a: __v8i16, b: i32, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vinsgr2vr.w"]
-    fn __lsx_vinsgr2vr_w(a: v4i32, b: i32, c: u32) -> v4i32;
+    fn __lsx_vinsgr2vr_w(a: __v4i32, b: i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vinsgr2vr.d"]
-    fn __lsx_vinsgr2vr_d(a: v2i64, b: i64, c: u32) -> v2i64;
+    fn __lsx_vinsgr2vr_d(a: __v2i64, b: i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfadd.s"]
-    fn __lsx_vfadd_s(a: v4f32, b: v4f32) -> v4f32;
+    fn __lsx_vfadd_s(a: __v4f32, b: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfadd.d"]
-    fn __lsx_vfadd_d(a: v2f64, b: v2f64) -> v2f64;
+    fn __lsx_vfadd_d(a: __v2f64, b: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfsub.s"]
-    fn __lsx_vfsub_s(a: v4f32, b: v4f32) -> v4f32;
+    fn __lsx_vfsub_s(a: __v4f32, b: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfsub.d"]
-    fn __lsx_vfsub_d(a: v2f64, b: v2f64) -> v2f64;
+    fn __lsx_vfsub_d(a: __v2f64, b: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfmul.s"]
-    fn __lsx_vfmul_s(a: v4f32, b: v4f32) -> v4f32;
+    fn __lsx_vfmul_s(a: __v4f32, b: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfmul.d"]
-    fn __lsx_vfmul_d(a: v2f64, b: v2f64) -> v2f64;
+    fn __lsx_vfmul_d(a: __v2f64, b: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfdiv.s"]
-    fn __lsx_vfdiv_s(a: v4f32, b: v4f32) -> v4f32;
+    fn __lsx_vfdiv_s(a: __v4f32, b: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfdiv.d"]
-    fn __lsx_vfdiv_d(a: v2f64, b: v2f64) -> v2f64;
+    fn __lsx_vfdiv_d(a: __v2f64, b: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfcvt.h.s"]
-    fn __lsx_vfcvt_h_s(a: v4f32, b: v4f32) -> v8i16;
+    fn __lsx_vfcvt_h_s(a: __v4f32, b: __v4f32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vfcvt.s.d"]
-    fn __lsx_vfcvt_s_d(a: v2f64, b: v2f64) -> v4f32;
+    fn __lsx_vfcvt_s_d(a: __v2f64, b: __v2f64) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfmin.s"]
-    fn __lsx_vfmin_s(a: v4f32, b: v4f32) -> v4f32;
+    fn __lsx_vfmin_s(a: __v4f32, b: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfmin.d"]
-    fn __lsx_vfmin_d(a: v2f64, b: v2f64) -> v2f64;
+    fn __lsx_vfmin_d(a: __v2f64, b: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfmina.s"]
-    fn __lsx_vfmina_s(a: v4f32, b: v4f32) -> v4f32;
+    fn __lsx_vfmina_s(a: __v4f32, b: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfmina.d"]
-    fn __lsx_vfmina_d(a: v2f64, b: v2f64) -> v2f64;
+    fn __lsx_vfmina_d(a: __v2f64, b: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfmax.s"]
-    fn __lsx_vfmax_s(a: v4f32, b: v4f32) -> v4f32;
+    fn __lsx_vfmax_s(a: __v4f32, b: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfmax.d"]
-    fn __lsx_vfmax_d(a: v2f64, b: v2f64) -> v2f64;
+    fn __lsx_vfmax_d(a: __v2f64, b: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfmaxa.s"]
-    fn __lsx_vfmaxa_s(a: v4f32, b: v4f32) -> v4f32;
+    fn __lsx_vfmaxa_s(a: __v4f32, b: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfmaxa.d"]
-    fn __lsx_vfmaxa_d(a: v2f64, b: v2f64) -> v2f64;
+    fn __lsx_vfmaxa_d(a: __v2f64, b: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfclass.s"]
-    fn __lsx_vfclass_s(a: v4f32) -> v4i32;
+    fn __lsx_vfclass_s(a: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfclass.d"]
-    fn __lsx_vfclass_d(a: v2f64) -> v2i64;
+    fn __lsx_vfclass_d(a: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfsqrt.s"]
-    fn __lsx_vfsqrt_s(a: v4f32) -> v4f32;
+    fn __lsx_vfsqrt_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfsqrt.d"]
-    fn __lsx_vfsqrt_d(a: v2f64) -> v2f64;
+    fn __lsx_vfsqrt_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfrecip.s"]
-    fn __lsx_vfrecip_s(a: v4f32) -> v4f32;
+    fn __lsx_vfrecip_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfrecip.d"]
-    fn __lsx_vfrecip_d(a: v2f64) -> v2f64;
+    fn __lsx_vfrecip_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfrecipe.s"]
-    fn __lsx_vfrecipe_s(a: v4f32) -> v4f32;
+    fn __lsx_vfrecipe_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfrecipe.d"]
-    fn __lsx_vfrecipe_d(a: v2f64) -> v2f64;
+    fn __lsx_vfrecipe_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfrsqrte.s"]
-    fn __lsx_vfrsqrte_s(a: v4f32) -> v4f32;
+    fn __lsx_vfrsqrte_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfrsqrte.d"]
-    fn __lsx_vfrsqrte_d(a: v2f64) -> v2f64;
+    fn __lsx_vfrsqrte_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfrint.s"]
-    fn __lsx_vfrint_s(a: v4f32) -> v4f32;
+    fn __lsx_vfrint_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfrint.d"]
-    fn __lsx_vfrint_d(a: v2f64) -> v2f64;
+    fn __lsx_vfrint_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfrsqrt.s"]
-    fn __lsx_vfrsqrt_s(a: v4f32) -> v4f32;
+    fn __lsx_vfrsqrt_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfrsqrt.d"]
-    fn __lsx_vfrsqrt_d(a: v2f64) -> v2f64;
+    fn __lsx_vfrsqrt_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vflogb.s"]
-    fn __lsx_vflogb_s(a: v4f32) -> v4f32;
+    fn __lsx_vflogb_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vflogb.d"]
-    fn __lsx_vflogb_d(a: v2f64) -> v2f64;
+    fn __lsx_vflogb_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfcvth.s.h"]
-    fn __lsx_vfcvth_s_h(a: v8i16) -> v4f32;
+    fn __lsx_vfcvth_s_h(a: __v8i16) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfcvth.d.s"]
-    fn __lsx_vfcvth_d_s(a: v4f32) -> v2f64;
+    fn __lsx_vfcvth_d_s(a: __v4f32) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfcvtl.s.h"]
-    fn __lsx_vfcvtl_s_h(a: v8i16) -> v4f32;
+    fn __lsx_vfcvtl_s_h(a: __v8i16) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfcvtl.d.s"]
-    fn __lsx_vfcvtl_d_s(a: v4f32) -> v2f64;
+    fn __lsx_vfcvtl_d_s(a: __v4f32) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vftint.w.s"]
-    fn __lsx_vftint_w_s(a: v4f32) -> v4i32;
+    fn __lsx_vftint_w_s(a: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vftint.l.d"]
-    fn __lsx_vftint_l_d(a: v2f64) -> v2i64;
+    fn __lsx_vftint_l_d(a: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftint.wu.s"]
-    fn __lsx_vftint_wu_s(a: v4f32) -> v4u32;
+    fn __lsx_vftint_wu_s(a: __v4f32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vftint.lu.d"]
-    fn __lsx_vftint_lu_d(a: v2f64) -> v2u64;
+    fn __lsx_vftint_lu_d(a: __v2f64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vftintrz.w.s"]
-    fn __lsx_vftintrz_w_s(a: v4f32) -> v4i32;
+    fn __lsx_vftintrz_w_s(a: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vftintrz.l.d"]
-    fn __lsx_vftintrz_l_d(a: v2f64) -> v2i64;
+    fn __lsx_vftintrz_l_d(a: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrz.wu.s"]
-    fn __lsx_vftintrz_wu_s(a: v4f32) -> v4u32;
+    fn __lsx_vftintrz_wu_s(a: __v4f32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vftintrz.lu.d"]
-    fn __lsx_vftintrz_lu_d(a: v2f64) -> v2u64;
+    fn __lsx_vftintrz_lu_d(a: __v2f64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vffint.s.w"]
-    fn __lsx_vffint_s_w(a: v4i32) -> v4f32;
+    fn __lsx_vffint_s_w(a: __v4i32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vffint.d.l"]
-    fn __lsx_vffint_d_l(a: v2i64) -> v2f64;
+    fn __lsx_vffint_d_l(a: __v2i64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vffint.s.wu"]
-    fn __lsx_vffint_s_wu(a: v4u32) -> v4f32;
+    fn __lsx_vffint_s_wu(a: __v4u32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vffint.d.lu"]
-    fn __lsx_vffint_d_lu(a: v2u64) -> v2f64;
+    fn __lsx_vffint_d_lu(a: __v2u64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vandn.v"]
-    fn __lsx_vandn_v(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vandn_v(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vneg.b"]
-    fn __lsx_vneg_b(a: v16i8) -> v16i8;
+    fn __lsx_vneg_b(a: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vneg.h"]
-    fn __lsx_vneg_h(a: v8i16) -> v8i16;
+    fn __lsx_vneg_h(a: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vneg.w"]
-    fn __lsx_vneg_w(a: v4i32) -> v4i32;
+    fn __lsx_vneg_w(a: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vneg.d"]
-    fn __lsx_vneg_d(a: v2i64) -> v2i64;
+    fn __lsx_vneg_d(a: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmuh.b"]
-    fn __lsx_vmuh_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vmuh_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmuh.h"]
-    fn __lsx_vmuh_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vmuh_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmuh.w"]
-    fn __lsx_vmuh_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vmuh_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmuh.d"]
-    fn __lsx_vmuh_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vmuh_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmuh.bu"]
-    fn __lsx_vmuh_bu(a: v16u8, b: v16u8) -> v16u8;
+    fn __lsx_vmuh_bu(a: __v16u8, b: __v16u8) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vmuh.hu"]
-    fn __lsx_vmuh_hu(a: v8u16, b: v8u16) -> v8u16;
+    fn __lsx_vmuh_hu(a: __v8u16, b: __v8u16) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vmuh.wu"]
-    fn __lsx_vmuh_wu(a: v4u32, b: v4u32) -> v4u32;
+    fn __lsx_vmuh_wu(a: __v4u32, b: __v4u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vmuh.du"]
-    fn __lsx_vmuh_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vmuh_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vsllwil.h.b"]
-    fn __lsx_vsllwil_h_b(a: v16i8, b: u32) -> v8i16;
+    fn __lsx_vsllwil_h_b(a: __v16i8, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsllwil.w.h"]
-    fn __lsx_vsllwil_w_h(a: v8i16, b: u32) -> v4i32;
+    fn __lsx_vsllwil_w_h(a: __v8i16, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsllwil.d.w"]
-    fn __lsx_vsllwil_d_w(a: v4i32, b: u32) -> v2i64;
+    fn __lsx_vsllwil_d_w(a: __v4i32, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsllwil.hu.bu"]
-    fn __lsx_vsllwil_hu_bu(a: v16u8, b: u32) -> v8u16;
+    fn __lsx_vsllwil_hu_bu(a: __v16u8, b: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vsllwil.wu.hu"]
-    fn __lsx_vsllwil_wu_hu(a: v8u16, b: u32) -> v4u32;
+    fn __lsx_vsllwil_wu_hu(a: __v8u16, b: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vsllwil.du.wu"]
-    fn __lsx_vsllwil_du_wu(a: v4u32, b: u32) -> v2u64;
+    fn __lsx_vsllwil_du_wu(a: __v4u32, b: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vsran.b.h"]
-    fn __lsx_vsran_b_h(a: v8i16, b: v8i16) -> v16i8;
+    fn __lsx_vsran_b_h(a: __v8i16, b: __v8i16) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsran.h.w"]
-    fn __lsx_vsran_h_w(a: v4i32, b: v4i32) -> v8i16;
+    fn __lsx_vsran_h_w(a: __v4i32, b: __v4i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsran.w.d"]
-    fn __lsx_vsran_w_d(a: v2i64, b: v2i64) -> v4i32;
+    fn __lsx_vsran_w_d(a: __v2i64, b: __v2i64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssran.b.h"]
-    fn __lsx_vssran_b_h(a: v8i16, b: v8i16) -> v16i8;
+    fn __lsx_vssran_b_h(a: __v8i16, b: __v8i16) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vssran.h.w"]
-    fn __lsx_vssran_h_w(a: v4i32, b: v4i32) -> v8i16;
+    fn __lsx_vssran_h_w(a: __v4i32, b: __v4i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vssran.w.d"]
-    fn __lsx_vssran_w_d(a: v2i64, b: v2i64) -> v4i32;
+    fn __lsx_vssran_w_d(a: __v2i64, b: __v2i64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssran.bu.h"]
-    fn __lsx_vssran_bu_h(a: v8u16, b: v8u16) -> v16u8;
+    fn __lsx_vssran_bu_h(a: __v8u16, b: __v8u16) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vssran.hu.w"]
-    fn __lsx_vssran_hu_w(a: v4u32, b: v4u32) -> v8u16;
+    fn __lsx_vssran_hu_w(a: __v4u32, b: __v4u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vssran.wu.d"]
-    fn __lsx_vssran_wu_d(a: v2u64, b: v2u64) -> v4u32;
+    fn __lsx_vssran_wu_d(a: __v2u64, b: __v2u64) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vsrarn.b.h"]
-    fn __lsx_vsrarn_b_h(a: v8i16, b: v8i16) -> v16i8;
+    fn __lsx_vsrarn_b_h(a: __v8i16, b: __v8i16) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrarn.h.w"]
-    fn __lsx_vsrarn_h_w(a: v4i32, b: v4i32) -> v8i16;
+    fn __lsx_vsrarn_h_w(a: __v4i32, b: __v4i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrarn.w.d"]
-    fn __lsx_vsrarn_w_d(a: v2i64, b: v2i64) -> v4i32;
+    fn __lsx_vsrarn_w_d(a: __v2i64, b: __v2i64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssrarn.b.h"]
-    fn __lsx_vssrarn_b_h(a: v8i16, b: v8i16) -> v16i8;
+    fn __lsx_vssrarn_b_h(a: __v8i16, b: __v8i16) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vssrarn.h.w"]
-    fn __lsx_vssrarn_h_w(a: v4i32, b: v4i32) -> v8i16;
+    fn __lsx_vssrarn_h_w(a: __v4i32, b: __v4i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vssrarn.w.d"]
-    fn __lsx_vssrarn_w_d(a: v2i64, b: v2i64) -> v4i32;
+    fn __lsx_vssrarn_w_d(a: __v2i64, b: __v2i64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssrarn.bu.h"]
-    fn __lsx_vssrarn_bu_h(a: v8u16, b: v8u16) -> v16u8;
+    fn __lsx_vssrarn_bu_h(a: __v8u16, b: __v8u16) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vssrarn.hu.w"]
-    fn __lsx_vssrarn_hu_w(a: v4u32, b: v4u32) -> v8u16;
+    fn __lsx_vssrarn_hu_w(a: __v4u32, b: __v4u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vssrarn.wu.d"]
-    fn __lsx_vssrarn_wu_d(a: v2u64, b: v2u64) -> v4u32;
+    fn __lsx_vssrarn_wu_d(a: __v2u64, b: __v2u64) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vsrln.b.h"]
-    fn __lsx_vsrln_b_h(a: v8i16, b: v8i16) -> v16i8;
+    fn __lsx_vsrln_b_h(a: __v8i16, b: __v8i16) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrln.h.w"]
-    fn __lsx_vsrln_h_w(a: v4i32, b: v4i32) -> v8i16;
+    fn __lsx_vsrln_h_w(a: __v4i32, b: __v4i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrln.w.d"]
-    fn __lsx_vsrln_w_d(a: v2i64, b: v2i64) -> v4i32;
+    fn __lsx_vsrln_w_d(a: __v2i64, b: __v2i64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssrln.bu.h"]
-    fn __lsx_vssrln_bu_h(a: v8u16, b: v8u16) -> v16u8;
+    fn __lsx_vssrln_bu_h(a: __v8u16, b: __v8u16) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vssrln.hu.w"]
-    fn __lsx_vssrln_hu_w(a: v4u32, b: v4u32) -> v8u16;
+    fn __lsx_vssrln_hu_w(a: __v4u32, b: __v4u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vssrln.wu.d"]
-    fn __lsx_vssrln_wu_d(a: v2u64, b: v2u64) -> v4u32;
+    fn __lsx_vssrln_wu_d(a: __v2u64, b: __v2u64) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vsrlrn.b.h"]
-    fn __lsx_vsrlrn_b_h(a: v8i16, b: v8i16) -> v16i8;
+    fn __lsx_vsrlrn_b_h(a: __v8i16, b: __v8i16) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrlrn.h.w"]
-    fn __lsx_vsrlrn_h_w(a: v4i32, b: v4i32) -> v8i16;
+    fn __lsx_vsrlrn_h_w(a: __v4i32, b: __v4i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrlrn.w.d"]
-    fn __lsx_vsrlrn_w_d(a: v2i64, b: v2i64) -> v4i32;
+    fn __lsx_vsrlrn_w_d(a: __v2i64, b: __v2i64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssrlrn.bu.h"]
-    fn __lsx_vssrlrn_bu_h(a: v8u16, b: v8u16) -> v16u8;
+    fn __lsx_vssrlrn_bu_h(a: __v8u16, b: __v8u16) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vssrlrn.hu.w"]
-    fn __lsx_vssrlrn_hu_w(a: v4u32, b: v4u32) -> v8u16;
+    fn __lsx_vssrlrn_hu_w(a: __v4u32, b: __v4u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vssrlrn.wu.d"]
-    fn __lsx_vssrlrn_wu_d(a: v2u64, b: v2u64) -> v4u32;
+    fn __lsx_vssrlrn_wu_d(a: __v2u64, b: __v2u64) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vfrstpi.b"]
-    fn __lsx_vfrstpi_b(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vfrstpi_b(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vfrstpi.h"]
-    fn __lsx_vfrstpi_h(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vfrstpi_h(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vfrstp.b"]
-    fn __lsx_vfrstp_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8;
+    fn __lsx_vfrstp_b(a: __v16i8, b: __v16i8, c: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vfrstp.h"]
-    fn __lsx_vfrstp_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16;
+    fn __lsx_vfrstp_h(a: __v8i16, b: __v8i16, c: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vshuf4i.d"]
-    fn __lsx_vshuf4i_d(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vshuf4i_d(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vbsrl.v"]
-    fn __lsx_vbsrl_v(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vbsrl_v(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vbsll.v"]
-    fn __lsx_vbsll_v(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vbsll_v(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vextrins.b"]
-    fn __lsx_vextrins_b(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vextrins_b(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vextrins.h"]
-    fn __lsx_vextrins_h(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vextrins_h(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vextrins.w"]
-    fn __lsx_vextrins_w(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vextrins_w(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vextrins.d"]
-    fn __lsx_vextrins_d(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vextrins_d(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmskltz.b"]
-    fn __lsx_vmskltz_b(a: v16i8) -> v16i8;
+    fn __lsx_vmskltz_b(a: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmskltz.h"]
-    fn __lsx_vmskltz_h(a: v8i16) -> v8i16;
+    fn __lsx_vmskltz_h(a: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmskltz.w"]
-    fn __lsx_vmskltz_w(a: v4i32) -> v4i32;
+    fn __lsx_vmskltz_w(a: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmskltz.d"]
-    fn __lsx_vmskltz_d(a: v2i64) -> v2i64;
+    fn __lsx_vmskltz_d(a: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsigncov.b"]
-    fn __lsx_vsigncov_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vsigncov_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsigncov.h"]
-    fn __lsx_vsigncov_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vsigncov_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsigncov.w"]
-    fn __lsx_vsigncov_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vsigncov_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsigncov.d"]
-    fn __lsx_vsigncov_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsigncov_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfmadd.s"]
-    fn __lsx_vfmadd_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32;
+    fn __lsx_vfmadd_s(a: __v4f32, b: __v4f32, c: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfmadd.d"]
-    fn __lsx_vfmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64;
+    fn __lsx_vfmadd_d(a: __v2f64, b: __v2f64, c: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfmsub.s"]
-    fn __lsx_vfmsub_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32;
+    fn __lsx_vfmsub_s(a: __v4f32, b: __v4f32, c: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfmsub.d"]
-    fn __lsx_vfmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64;
+    fn __lsx_vfmsub_d(a: __v2f64, b: __v2f64, c: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfnmadd.s"]
-    fn __lsx_vfnmadd_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32;
+    fn __lsx_vfnmadd_s(a: __v4f32, b: __v4f32, c: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfnmadd.d"]
-    fn __lsx_vfnmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64;
+    fn __lsx_vfnmadd_d(a: __v2f64, b: __v2f64, c: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfnmsub.s"]
-    fn __lsx_vfnmsub_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32;
+    fn __lsx_vfnmsub_s(a: __v4f32, b: __v4f32, c: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfnmsub.d"]
-    fn __lsx_vfnmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64;
+    fn __lsx_vfnmsub_d(a: __v2f64, b: __v2f64, c: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vftintrne.w.s"]
-    fn __lsx_vftintrne_w_s(a: v4f32) -> v4i32;
+    fn __lsx_vftintrne_w_s(a: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vftintrne.l.d"]
-    fn __lsx_vftintrne_l_d(a: v2f64) -> v2i64;
+    fn __lsx_vftintrne_l_d(a: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrp.w.s"]
-    fn __lsx_vftintrp_w_s(a: v4f32) -> v4i32;
+    fn __lsx_vftintrp_w_s(a: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vftintrp.l.d"]
-    fn __lsx_vftintrp_l_d(a: v2f64) -> v2i64;
+    fn __lsx_vftintrp_l_d(a: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrm.w.s"]
-    fn __lsx_vftintrm_w_s(a: v4f32) -> v4i32;
+    fn __lsx_vftintrm_w_s(a: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vftintrm.l.d"]
-    fn __lsx_vftintrm_l_d(a: v2f64) -> v2i64;
+    fn __lsx_vftintrm_l_d(a: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftint.w.d"]
-    fn __lsx_vftint_w_d(a: v2f64, b: v2f64) -> v4i32;
+    fn __lsx_vftint_w_d(a: __v2f64, b: __v2f64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vffint.s.l"]
-    fn __lsx_vffint_s_l(a: v2i64, b: v2i64) -> v4f32;
+    fn __lsx_vffint_s_l(a: __v2i64, b: __v2i64) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vftintrz.w.d"]
-    fn __lsx_vftintrz_w_d(a: v2f64, b: v2f64) -> v4i32;
+    fn __lsx_vftintrz_w_d(a: __v2f64, b: __v2f64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vftintrp.w.d"]
-    fn __lsx_vftintrp_w_d(a: v2f64, b: v2f64) -> v4i32;
+    fn __lsx_vftintrp_w_d(a: __v2f64, b: __v2f64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vftintrm.w.d"]
-    fn __lsx_vftintrm_w_d(a: v2f64, b: v2f64) -> v4i32;
+    fn __lsx_vftintrm_w_d(a: __v2f64, b: __v2f64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vftintrne.w.d"]
-    fn __lsx_vftintrne_w_d(a: v2f64, b: v2f64) -> v4i32;
+    fn __lsx_vftintrne_w_d(a: __v2f64, b: __v2f64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vftintl.l.s"]
-    fn __lsx_vftintl_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftintl_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftinth.l.s"]
-    fn __lsx_vftinth_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftinth_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vffinth.d.w"]
-    fn __lsx_vffinth_d_w(a: v4i32) -> v2f64;
+    fn __lsx_vffinth_d_w(a: __v4i32) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vffintl.d.w"]
-    fn __lsx_vffintl_d_w(a: v4i32) -> v2f64;
+    fn __lsx_vffintl_d_w(a: __v4i32) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vftintrzl.l.s"]
-    fn __lsx_vftintrzl_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftintrzl_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrzh.l.s"]
-    fn __lsx_vftintrzh_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftintrzh_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrpl.l.s"]
-    fn __lsx_vftintrpl_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftintrpl_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrph.l.s"]
-    fn __lsx_vftintrph_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftintrph_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrml.l.s"]
-    fn __lsx_vftintrml_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftintrml_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrmh.l.s"]
-    fn __lsx_vftintrmh_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftintrmh_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrnel.l.s"]
-    fn __lsx_vftintrnel_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftintrnel_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vftintrneh.l.s"]
-    fn __lsx_vftintrneh_l_s(a: v4f32) -> v2i64;
+    fn __lsx_vftintrneh_l_s(a: __v4f32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfrintrne.s"]
-    fn __lsx_vfrintrne_s(a: v4f32) -> v4f32;
+    fn __lsx_vfrintrne_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfrintrne.d"]
-    fn __lsx_vfrintrne_d(a: v2f64) -> v2f64;
+    fn __lsx_vfrintrne_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfrintrz.s"]
-    fn __lsx_vfrintrz_s(a: v4f32) -> v4f32;
+    fn __lsx_vfrintrz_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfrintrz.d"]
-    fn __lsx_vfrintrz_d(a: v2f64) -> v2f64;
+    fn __lsx_vfrintrz_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfrintrp.s"]
-    fn __lsx_vfrintrp_s(a: v4f32) -> v4f32;
+    fn __lsx_vfrintrp_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfrintrp.d"]
-    fn __lsx_vfrintrp_d(a: v2f64) -> v2f64;
+    fn __lsx_vfrintrp_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vfrintrm.s"]
-    fn __lsx_vfrintrm_s(a: v4f32) -> v4f32;
+    fn __lsx_vfrintrm_s(a: __v4f32) -> __v4f32;
     #[link_name = "llvm.loongarch.lsx.vfrintrm.d"]
-    fn __lsx_vfrintrm_d(a: v2f64) -> v2f64;
+    fn __lsx_vfrintrm_d(a: __v2f64) -> __v2f64;
     #[link_name = "llvm.loongarch.lsx.vstelm.b"]
-    fn __lsx_vstelm_b(a: v16i8, b: *mut i8, c: i32, d: u32);
+    fn __lsx_vstelm_b(a: __v16i8, b: *mut i8, c: i32, d: u32);
     #[link_name = "llvm.loongarch.lsx.vstelm.h"]
-    fn __lsx_vstelm_h(a: v8i16, b: *mut i8, c: i32, d: u32);
+    fn __lsx_vstelm_h(a: __v8i16, b: *mut i8, c: i32, d: u32);
     #[link_name = "llvm.loongarch.lsx.vstelm.w"]
-    fn __lsx_vstelm_w(a: v4i32, b: *mut i8, c: i32, d: u32);
+    fn __lsx_vstelm_w(a: __v4i32, b: *mut i8, c: i32, d: u32);
     #[link_name = "llvm.loongarch.lsx.vstelm.d"]
-    fn __lsx_vstelm_d(a: v2i64, b: *mut i8, c: i32, d: u32);
+    fn __lsx_vstelm_d(a: __v2i64, b: *mut i8, c: i32, d: u32);
     #[link_name = "llvm.loongarch.lsx.vaddwev.d.w"]
-    fn __lsx_vaddwev_d_w(a: v4i32, b: v4i32) -> v2i64;
+    fn __lsx_vaddwev_d_w(a: __v4i32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwev.w.h"]
-    fn __lsx_vaddwev_w_h(a: v8i16, b: v8i16) -> v4i32;
+    fn __lsx_vaddwev_w_h(a: __v8i16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vaddwev.h.b"]
-    fn __lsx_vaddwev_h_b(a: v16i8, b: v16i8) -> v8i16;
+    fn __lsx_vaddwev_h_b(a: __v16i8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vaddwod.d.w"]
-    fn __lsx_vaddwod_d_w(a: v4i32, b: v4i32) -> v2i64;
+    fn __lsx_vaddwod_d_w(a: __v4i32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwod.w.h"]
-    fn __lsx_vaddwod_w_h(a: v8i16, b: v8i16) -> v4i32;
+    fn __lsx_vaddwod_w_h(a: __v8i16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vaddwod.h.b"]
-    fn __lsx_vaddwod_h_b(a: v16i8, b: v16i8) -> v8i16;
+    fn __lsx_vaddwod_h_b(a: __v16i8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vaddwev.d.wu"]
-    fn __lsx_vaddwev_d_wu(a: v4u32, b: v4u32) -> v2i64;
+    fn __lsx_vaddwev_d_wu(a: __v4u32, b: __v4u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwev.w.hu"]
-    fn __lsx_vaddwev_w_hu(a: v8u16, b: v8u16) -> v4i32;
+    fn __lsx_vaddwev_w_hu(a: __v8u16, b: __v8u16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vaddwev.h.bu"]
-    fn __lsx_vaddwev_h_bu(a: v16u8, b: v16u8) -> v8i16;
+    fn __lsx_vaddwev_h_bu(a: __v16u8, b: __v16u8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vaddwod.d.wu"]
-    fn __lsx_vaddwod_d_wu(a: v4u32, b: v4u32) -> v2i64;
+    fn __lsx_vaddwod_d_wu(a: __v4u32, b: __v4u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwod.w.hu"]
-    fn __lsx_vaddwod_w_hu(a: v8u16, b: v8u16) -> v4i32;
+    fn __lsx_vaddwod_w_hu(a: __v8u16, b: __v8u16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vaddwod.h.bu"]
-    fn __lsx_vaddwod_h_bu(a: v16u8, b: v16u8) -> v8i16;
+    fn __lsx_vaddwod_h_bu(a: __v16u8, b: __v16u8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vaddwev.d.wu.w"]
-    fn __lsx_vaddwev_d_wu_w(a: v4u32, b: v4i32) -> v2i64;
+    fn __lsx_vaddwev_d_wu_w(a: __v4u32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwev.w.hu.h"]
-    fn __lsx_vaddwev_w_hu_h(a: v8u16, b: v8i16) -> v4i32;
+    fn __lsx_vaddwev_w_hu_h(a: __v8u16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vaddwev.h.bu.b"]
-    fn __lsx_vaddwev_h_bu_b(a: v16u8, b: v16i8) -> v8i16;
+    fn __lsx_vaddwev_h_bu_b(a: __v16u8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vaddwod.d.wu.w"]
-    fn __lsx_vaddwod_d_wu_w(a: v4u32, b: v4i32) -> v2i64;
+    fn __lsx_vaddwod_d_wu_w(a: __v4u32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwod.w.hu.h"]
-    fn __lsx_vaddwod_w_hu_h(a: v8u16, b: v8i16) -> v4i32;
+    fn __lsx_vaddwod_w_hu_h(a: __v8u16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vaddwod.h.bu.b"]
-    fn __lsx_vaddwod_h_bu_b(a: v16u8, b: v16i8) -> v8i16;
+    fn __lsx_vaddwod_h_bu_b(a: __v16u8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsubwev.d.w"]
-    fn __lsx_vsubwev_d_w(a: v4i32, b: v4i32) -> v2i64;
+    fn __lsx_vsubwev_d_w(a: __v4i32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsubwev.w.h"]
-    fn __lsx_vsubwev_w_h(a: v8i16, b: v8i16) -> v4i32;
+    fn __lsx_vsubwev_w_h(a: __v8i16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsubwev.h.b"]
-    fn __lsx_vsubwev_h_b(a: v16i8, b: v16i8) -> v8i16;
+    fn __lsx_vsubwev_h_b(a: __v16i8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsubwod.d.w"]
-    fn __lsx_vsubwod_d_w(a: v4i32, b: v4i32) -> v2i64;
+    fn __lsx_vsubwod_d_w(a: __v4i32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsubwod.w.h"]
-    fn __lsx_vsubwod_w_h(a: v8i16, b: v8i16) -> v4i32;
+    fn __lsx_vsubwod_w_h(a: __v8i16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsubwod.h.b"]
-    fn __lsx_vsubwod_h_b(a: v16i8, b: v16i8) -> v8i16;
+    fn __lsx_vsubwod_h_b(a: __v16i8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsubwev.d.wu"]
-    fn __lsx_vsubwev_d_wu(a: v4u32, b: v4u32) -> v2i64;
+    fn __lsx_vsubwev_d_wu(a: __v4u32, b: __v4u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsubwev.w.hu"]
-    fn __lsx_vsubwev_w_hu(a: v8u16, b: v8u16) -> v4i32;
+    fn __lsx_vsubwev_w_hu(a: __v8u16, b: __v8u16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsubwev.h.bu"]
-    fn __lsx_vsubwev_h_bu(a: v16u8, b: v16u8) -> v8i16;
+    fn __lsx_vsubwev_h_bu(a: __v16u8, b: __v16u8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsubwod.d.wu"]
-    fn __lsx_vsubwod_d_wu(a: v4u32, b: v4u32) -> v2i64;
+    fn __lsx_vsubwod_d_wu(a: __v4u32, b: __v4u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsubwod.w.hu"]
-    fn __lsx_vsubwod_w_hu(a: v8u16, b: v8u16) -> v4i32;
+    fn __lsx_vsubwod_w_hu(a: __v8u16, b: __v8u16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsubwod.h.bu"]
-    fn __lsx_vsubwod_h_bu(a: v16u8, b: v16u8) -> v8i16;
+    fn __lsx_vsubwod_h_bu(a: __v16u8, b: __v16u8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vaddwev.q.d"]
-    fn __lsx_vaddwev_q_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vaddwev_q_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwod.q.d"]
-    fn __lsx_vaddwod_q_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vaddwod_q_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwev.q.du"]
-    fn __lsx_vaddwev_q_du(a: v2u64, b: v2u64) -> v2i64;
+    fn __lsx_vaddwev_q_du(a: __v2u64, b: __v2u64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwod.q.du"]
-    fn __lsx_vaddwod_q_du(a: v2u64, b: v2u64) -> v2i64;
+    fn __lsx_vaddwod_q_du(a: __v2u64, b: __v2u64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsubwev.q.d"]
-    fn __lsx_vsubwev_q_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsubwev_q_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsubwod.q.d"]
-    fn __lsx_vsubwod_q_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsubwod_q_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsubwev.q.du"]
-    fn __lsx_vsubwev_q_du(a: v2u64, b: v2u64) -> v2i64;
+    fn __lsx_vsubwev_q_du(a: __v2u64, b: __v2u64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsubwod.q.du"]
-    fn __lsx_vsubwod_q_du(a: v2u64, b: v2u64) -> v2i64;
+    fn __lsx_vsubwod_q_du(a: __v2u64, b: __v2u64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwev.q.du.d"]
-    fn __lsx_vaddwev_q_du_d(a: v2u64, b: v2i64) -> v2i64;
+    fn __lsx_vaddwev_q_du_d(a: __v2u64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vaddwod.q.du.d"]
-    fn __lsx_vaddwod_q_du_d(a: v2u64, b: v2i64) -> v2i64;
+    fn __lsx_vaddwod_q_du_d(a: __v2u64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwev.d.w"]
-    fn __lsx_vmulwev_d_w(a: v4i32, b: v4i32) -> v2i64;
+    fn __lsx_vmulwev_d_w(a: __v4i32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwev.w.h"]
-    fn __lsx_vmulwev_w_h(a: v8i16, b: v8i16) -> v4i32;
+    fn __lsx_vmulwev_w_h(a: __v8i16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmulwev.h.b"]
-    fn __lsx_vmulwev_h_b(a: v16i8, b: v16i8) -> v8i16;
+    fn __lsx_vmulwev_h_b(a: __v16i8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmulwod.d.w"]
-    fn __lsx_vmulwod_d_w(a: v4i32, b: v4i32) -> v2i64;
+    fn __lsx_vmulwod_d_w(a: __v4i32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwod.w.h"]
-    fn __lsx_vmulwod_w_h(a: v8i16, b: v8i16) -> v4i32;
+    fn __lsx_vmulwod_w_h(a: __v8i16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmulwod.h.b"]
-    fn __lsx_vmulwod_h_b(a: v16i8, b: v16i8) -> v8i16;
+    fn __lsx_vmulwod_h_b(a: __v16i8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmulwev.d.wu"]
-    fn __lsx_vmulwev_d_wu(a: v4u32, b: v4u32) -> v2i64;
+    fn __lsx_vmulwev_d_wu(a: __v4u32, b: __v4u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwev.w.hu"]
-    fn __lsx_vmulwev_w_hu(a: v8u16, b: v8u16) -> v4i32;
+    fn __lsx_vmulwev_w_hu(a: __v8u16, b: __v8u16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmulwev.h.bu"]
-    fn __lsx_vmulwev_h_bu(a: v16u8, b: v16u8) -> v8i16;
+    fn __lsx_vmulwev_h_bu(a: __v16u8, b: __v16u8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmulwod.d.wu"]
-    fn __lsx_vmulwod_d_wu(a: v4u32, b: v4u32) -> v2i64;
+    fn __lsx_vmulwod_d_wu(a: __v4u32, b: __v4u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwod.w.hu"]
-    fn __lsx_vmulwod_w_hu(a: v8u16, b: v8u16) -> v4i32;
+    fn __lsx_vmulwod_w_hu(a: __v8u16, b: __v8u16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmulwod.h.bu"]
-    fn __lsx_vmulwod_h_bu(a: v16u8, b: v16u8) -> v8i16;
+    fn __lsx_vmulwod_h_bu(a: __v16u8, b: __v16u8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmulwev.d.wu.w"]
-    fn __lsx_vmulwev_d_wu_w(a: v4u32, b: v4i32) -> v2i64;
+    fn __lsx_vmulwev_d_wu_w(a: __v4u32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwev.w.hu.h"]
-    fn __lsx_vmulwev_w_hu_h(a: v8u16, b: v8i16) -> v4i32;
+    fn __lsx_vmulwev_w_hu_h(a: __v8u16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmulwev.h.bu.b"]
-    fn __lsx_vmulwev_h_bu_b(a: v16u8, b: v16i8) -> v8i16;
+    fn __lsx_vmulwev_h_bu_b(a: __v16u8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmulwod.d.wu.w"]
-    fn __lsx_vmulwod_d_wu_w(a: v4u32, b: v4i32) -> v2i64;
+    fn __lsx_vmulwod_d_wu_w(a: __v4u32, b: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwod.w.hu.h"]
-    fn __lsx_vmulwod_w_hu_h(a: v8u16, b: v8i16) -> v4i32;
+    fn __lsx_vmulwod_w_hu_h(a: __v8u16, b: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmulwod.h.bu.b"]
-    fn __lsx_vmulwod_h_bu_b(a: v16u8, b: v16i8) -> v8i16;
+    fn __lsx_vmulwod_h_bu_b(a: __v16u8, b: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmulwev.q.d"]
-    fn __lsx_vmulwev_q_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vmulwev_q_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwod.q.d"]
-    fn __lsx_vmulwod_q_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vmulwod_q_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwev.q.du"]
-    fn __lsx_vmulwev_q_du(a: v2u64, b: v2u64) -> v2i64;
+    fn __lsx_vmulwev_q_du(a: __v2u64, b: __v2u64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwod.q.du"]
-    fn __lsx_vmulwod_q_du(a: v2u64, b: v2u64) -> v2i64;
+    fn __lsx_vmulwod_q_du(a: __v2u64, b: __v2u64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwev.q.du.d"]
-    fn __lsx_vmulwev_q_du_d(a: v2u64, b: v2i64) -> v2i64;
+    fn __lsx_vmulwev_q_du_d(a: __v2u64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmulwod.q.du.d"]
-    fn __lsx_vmulwod_q_du_d(a: v2u64, b: v2i64) -> v2i64;
+    fn __lsx_vmulwod_q_du_d(a: __v2u64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vhaddw.q.d"]
-    fn __lsx_vhaddw_q_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vhaddw_q_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vhaddw.qu.du"]
-    fn __lsx_vhaddw_qu_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vhaddw_qu_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vhsubw.q.d"]
-    fn __lsx_vhsubw_q_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vhsubw_q_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vhsubw.qu.du"]
-    fn __lsx_vhsubw_qu_du(a: v2u64, b: v2u64) -> v2u64;
+    fn __lsx_vhsubw_qu_du(a: __v2u64, b: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.d.w"]
-    fn __lsx_vmaddwev_d_w(a: v2i64, b: v4i32, c: v4i32) -> v2i64;
+    fn __lsx_vmaddwev_d_w(a: __v2i64, b: __v4i32, c: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.w.h"]
-    fn __lsx_vmaddwev_w_h(a: v4i32, b: v8i16, c: v8i16) -> v4i32;
+    fn __lsx_vmaddwev_w_h(a: __v4i32, b: __v8i16, c: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.h.b"]
-    fn __lsx_vmaddwev_h_b(a: v8i16, b: v16i8, c: v16i8) -> v8i16;
+    fn __lsx_vmaddwev_h_b(a: __v8i16, b: __v16i8, c: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.d.wu"]
-    fn __lsx_vmaddwev_d_wu(a: v2u64, b: v4u32, c: v4u32) -> v2u64;
+    fn __lsx_vmaddwev_d_wu(a: __v2u64, b: __v4u32, c: __v4u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.w.hu"]
-    fn __lsx_vmaddwev_w_hu(a: v4u32, b: v8u16, c: v8u16) -> v4u32;
+    fn __lsx_vmaddwev_w_hu(a: __v4u32, b: __v8u16, c: __v8u16) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.h.bu"]
-    fn __lsx_vmaddwev_h_bu(a: v8u16, b: v16u8, c: v16u8) -> v8u16;
+    fn __lsx_vmaddwev_h_bu(a: __v8u16, b: __v16u8, c: __v16u8) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.d.w"]
-    fn __lsx_vmaddwod_d_w(a: v2i64, b: v4i32, c: v4i32) -> v2i64;
+    fn __lsx_vmaddwod_d_w(a: __v2i64, b: __v4i32, c: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.w.h"]
-    fn __lsx_vmaddwod_w_h(a: v4i32, b: v8i16, c: v8i16) -> v4i32;
+    fn __lsx_vmaddwod_w_h(a: __v4i32, b: __v8i16, c: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.h.b"]
-    fn __lsx_vmaddwod_h_b(a: v8i16, b: v16i8, c: v16i8) -> v8i16;
+    fn __lsx_vmaddwod_h_b(a: __v8i16, b: __v16i8, c: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.d.wu"]
-    fn __lsx_vmaddwod_d_wu(a: v2u64, b: v4u32, c: v4u32) -> v2u64;
+    fn __lsx_vmaddwod_d_wu(a: __v2u64, b: __v4u32, c: __v4u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.w.hu"]
-    fn __lsx_vmaddwod_w_hu(a: v4u32, b: v8u16, c: v8u16) -> v4u32;
+    fn __lsx_vmaddwod_w_hu(a: __v4u32, b: __v8u16, c: __v8u16) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.h.bu"]
-    fn __lsx_vmaddwod_h_bu(a: v8u16, b: v16u8, c: v16u8) -> v8u16;
+    fn __lsx_vmaddwod_h_bu(a: __v8u16, b: __v16u8, c: __v16u8) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.d.wu.w"]
-    fn __lsx_vmaddwev_d_wu_w(a: v2i64, b: v4u32, c: v4i32) -> v2i64;
+    fn __lsx_vmaddwev_d_wu_w(a: __v2i64, b: __v4u32, c: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.w.hu.h"]
-    fn __lsx_vmaddwev_w_hu_h(a: v4i32, b: v8u16, c: v8i16) -> v4i32;
+    fn __lsx_vmaddwev_w_hu_h(a: __v4i32, b: __v8u16, c: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.h.bu.b"]
-    fn __lsx_vmaddwev_h_bu_b(a: v8i16, b: v16u8, c: v16i8) -> v8i16;
+    fn __lsx_vmaddwev_h_bu_b(a: __v8i16, b: __v16u8, c: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.d.wu.w"]
-    fn __lsx_vmaddwod_d_wu_w(a: v2i64, b: v4u32, c: v4i32) -> v2i64;
+    fn __lsx_vmaddwod_d_wu_w(a: __v2i64, b: __v4u32, c: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.w.hu.h"]
-    fn __lsx_vmaddwod_w_hu_h(a: v4i32, b: v8u16, c: v8i16) -> v4i32;
+    fn __lsx_vmaddwod_w_hu_h(a: __v4i32, b: __v8u16, c: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.h.bu.b"]
-    fn __lsx_vmaddwod_h_bu_b(a: v8i16, b: v16u8, c: v16i8) -> v8i16;
+    fn __lsx_vmaddwod_h_bu_b(a: __v8i16, b: __v16u8, c: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.q.d"]
-    fn __lsx_vmaddwev_q_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64;
+    fn __lsx_vmaddwev_q_d(a: __v2i64, b: __v2i64, c: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.q.d"]
-    fn __lsx_vmaddwod_q_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64;
+    fn __lsx_vmaddwod_q_d(a: __v2i64, b: __v2i64, c: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.q.du"]
-    fn __lsx_vmaddwev_q_du(a: v2u64, b: v2u64, c: v2u64) -> v2u64;
+    fn __lsx_vmaddwev_q_du(a: __v2u64, b: __v2u64, c: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.q.du"]
-    fn __lsx_vmaddwod_q_du(a: v2u64, b: v2u64, c: v2u64) -> v2u64;
+    fn __lsx_vmaddwod_q_du(a: __v2u64, b: __v2u64, c: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vmaddwev.q.du.d"]
-    fn __lsx_vmaddwev_q_du_d(a: v2i64, b: v2u64, c: v2i64) -> v2i64;
+    fn __lsx_vmaddwev_q_du_d(a: __v2i64, b: __v2u64, c: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmaddwod.q.du.d"]
-    fn __lsx_vmaddwod_q_du_d(a: v2i64, b: v2u64, c: v2i64) -> v2i64;
+    fn __lsx_vmaddwod_q_du_d(a: __v2i64, b: __v2u64, c: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vrotr.b"]
-    fn __lsx_vrotr_b(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vrotr_b(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vrotr.h"]
-    fn __lsx_vrotr_h(a: v8i16, b: v8i16) -> v8i16;
+    fn __lsx_vrotr_h(a: __v8i16, b: __v8i16) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vrotr.w"]
-    fn __lsx_vrotr_w(a: v4i32, b: v4i32) -> v4i32;
+    fn __lsx_vrotr_w(a: __v4i32, b: __v4i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vrotr.d"]
-    fn __lsx_vrotr_d(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vrotr_d(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vadd.q"]
-    fn __lsx_vadd_q(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vadd_q(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsub.q"]
-    fn __lsx_vsub_q(a: v2i64, b: v2i64) -> v2i64;
+    fn __lsx_vsub_q(a: __v2i64, b: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vldrepl.b"]
-    fn __lsx_vldrepl_b(a: *const i8, b: i32) -> v16i8;
+    fn __lsx_vldrepl_b(a: *const i8, b: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vldrepl.h"]
-    fn __lsx_vldrepl_h(a: *const i8, b: i32) -> v8i16;
+    fn __lsx_vldrepl_h(a: *const i8, b: i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vldrepl.w"]
-    fn __lsx_vldrepl_w(a: *const i8, b: i32) -> v4i32;
+    fn __lsx_vldrepl_w(a: *const i8, b: i32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vldrepl.d"]
-    fn __lsx_vldrepl_d(a: *const i8, b: i32) -> v2i64;
+    fn __lsx_vldrepl_d(a: *const i8, b: i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vmskgez.b"]
-    fn __lsx_vmskgez_b(a: v16i8) -> v16i8;
+    fn __lsx_vmskgez_b(a: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vmsknz.b"]
-    fn __lsx_vmsknz_b(a: v16i8) -> v16i8;
+    fn __lsx_vmsknz_b(a: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vexth.h.b"]
-    fn __lsx_vexth_h_b(a: v16i8) -> v8i16;
+    fn __lsx_vexth_h_b(a: __v16i8) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vexth.w.h"]
-    fn __lsx_vexth_w_h(a: v8i16) -> v4i32;
+    fn __lsx_vexth_w_h(a: __v8i16) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vexth.d.w"]
-    fn __lsx_vexth_d_w(a: v4i32) -> v2i64;
+    fn __lsx_vexth_d_w(a: __v4i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vexth.q.d"]
-    fn __lsx_vexth_q_d(a: v2i64) -> v2i64;
+    fn __lsx_vexth_q_d(a: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vexth.hu.bu"]
-    fn __lsx_vexth_hu_bu(a: v16u8) -> v8u16;
+    fn __lsx_vexth_hu_bu(a: __v16u8) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vexth.wu.hu"]
-    fn __lsx_vexth_wu_hu(a: v8u16) -> v4u32;
+    fn __lsx_vexth_wu_hu(a: __v8u16) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vexth.du.wu"]
-    fn __lsx_vexth_du_wu(a: v4u32) -> v2u64;
+    fn __lsx_vexth_du_wu(a: __v4u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vexth.qu.du"]
-    fn __lsx_vexth_qu_du(a: v2u64) -> v2u64;
+    fn __lsx_vexth_qu_du(a: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vrotri.b"]
-    fn __lsx_vrotri_b(a: v16i8, b: u32) -> v16i8;
+    fn __lsx_vrotri_b(a: __v16i8, b: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vrotri.h"]
-    fn __lsx_vrotri_h(a: v8i16, b: u32) -> v8i16;
+    fn __lsx_vrotri_h(a: __v8i16, b: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vrotri.w"]
-    fn __lsx_vrotri_w(a: v4i32, b: u32) -> v4i32;
+    fn __lsx_vrotri_w(a: __v4i32, b: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vrotri.d"]
-    fn __lsx_vrotri_d(a: v2i64, b: u32) -> v2i64;
+    fn __lsx_vrotri_d(a: __v2i64, b: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vextl.q.d"]
-    fn __lsx_vextl_q_d(a: v2i64) -> v2i64;
+    fn __lsx_vextl_q_d(a: __v2i64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrlni.b.h"]
-    fn __lsx_vsrlni_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vsrlni_b_h(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrlni.h.w"]
-    fn __lsx_vsrlni_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vsrlni_h_w(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrlni.w.d"]
-    fn __lsx_vsrlni_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vsrlni_w_d(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrlni.d.q"]
-    fn __lsx_vsrlni_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vsrlni_d_q(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrlrni.b.h"]
-    fn __lsx_vsrlrni_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vsrlrni_b_h(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrlrni.h.w"]
-    fn __lsx_vsrlrni_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vsrlrni_h_w(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrlrni.w.d"]
-    fn __lsx_vsrlrni_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vsrlrni_w_d(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrlrni.d.q"]
-    fn __lsx_vsrlrni_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vsrlrni_d_q(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vssrlni.b.h"]
-    fn __lsx_vssrlni_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vssrlni_b_h(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vssrlni.h.w"]
-    fn __lsx_vssrlni_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vssrlni_h_w(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vssrlni.w.d"]
-    fn __lsx_vssrlni_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vssrlni_w_d(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssrlni.d.q"]
-    fn __lsx_vssrlni_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vssrlni_d_q(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vssrlni.bu.h"]
-    fn __lsx_vssrlni_bu_h(a: v16u8, b: v16i8, c: u32) -> v16u8;
+    fn __lsx_vssrlni_bu_h(a: __v16u8, b: __v16i8, c: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vssrlni.hu.w"]
-    fn __lsx_vssrlni_hu_w(a: v8u16, b: v8i16, c: u32) -> v8u16;
+    fn __lsx_vssrlni_hu_w(a: __v8u16, b: __v8i16, c: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vssrlni.wu.d"]
-    fn __lsx_vssrlni_wu_d(a: v4u32, b: v4i32, c: u32) -> v4u32;
+    fn __lsx_vssrlni_wu_d(a: __v4u32, b: __v4i32, c: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vssrlni.du.q"]
-    fn __lsx_vssrlni_du_q(a: v2u64, b: v2i64, c: u32) -> v2u64;
+    fn __lsx_vssrlni_du_q(a: __v2u64, b: __v2i64, c: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vssrlrni.b.h"]
-    fn __lsx_vssrlrni_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vssrlrni_b_h(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vssrlrni.h.w"]
-    fn __lsx_vssrlrni_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vssrlrni_h_w(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vssrlrni.w.d"]
-    fn __lsx_vssrlrni_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vssrlrni_w_d(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssrlrni.d.q"]
-    fn __lsx_vssrlrni_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vssrlrni_d_q(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vssrlrni.bu.h"]
-    fn __lsx_vssrlrni_bu_h(a: v16u8, b: v16i8, c: u32) -> v16u8;
+    fn __lsx_vssrlrni_bu_h(a: __v16u8, b: __v16i8, c: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vssrlrni.hu.w"]
-    fn __lsx_vssrlrni_hu_w(a: v8u16, b: v8i16, c: u32) -> v8u16;
+    fn __lsx_vssrlrni_hu_w(a: __v8u16, b: __v8i16, c: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vssrlrni.wu.d"]
-    fn __lsx_vssrlrni_wu_d(a: v4u32, b: v4i32, c: u32) -> v4u32;
+    fn __lsx_vssrlrni_wu_d(a: __v4u32, b: __v4i32, c: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vssrlrni.du.q"]
-    fn __lsx_vssrlrni_du_q(a: v2u64, b: v2i64, c: u32) -> v2u64;
+    fn __lsx_vssrlrni_du_q(a: __v2u64, b: __v2i64, c: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vsrani.b.h"]
-    fn __lsx_vsrani_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vsrani_b_h(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrani.h.w"]
-    fn __lsx_vsrani_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vsrani_h_w(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrani.w.d"]
-    fn __lsx_vsrani_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vsrani_w_d(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrani.d.q"]
-    fn __lsx_vsrani_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vsrani_d_q(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vsrarni.b.h"]
-    fn __lsx_vsrarni_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vsrarni_b_h(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vsrarni.h.w"]
-    fn __lsx_vsrarni_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vsrarni_h_w(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vsrarni.w.d"]
-    fn __lsx_vsrarni_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vsrarni_w_d(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vsrarni.d.q"]
-    fn __lsx_vsrarni_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vsrarni_d_q(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vssrani.b.h"]
-    fn __lsx_vssrani_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vssrani_b_h(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vssrani.h.w"]
-    fn __lsx_vssrani_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vssrani_h_w(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vssrani.w.d"]
-    fn __lsx_vssrani_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vssrani_w_d(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssrani.d.q"]
-    fn __lsx_vssrani_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vssrani_d_q(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vssrani.bu.h"]
-    fn __lsx_vssrani_bu_h(a: v16u8, b: v16i8, c: u32) -> v16u8;
+    fn __lsx_vssrani_bu_h(a: __v16u8, b: __v16i8, c: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vssrani.hu.w"]
-    fn __lsx_vssrani_hu_w(a: v8u16, b: v8i16, c: u32) -> v8u16;
+    fn __lsx_vssrani_hu_w(a: __v8u16, b: __v8i16, c: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vssrani.wu.d"]
-    fn __lsx_vssrani_wu_d(a: v4u32, b: v4i32, c: u32) -> v4u32;
+    fn __lsx_vssrani_wu_d(a: __v4u32, b: __v4i32, c: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vssrani.du.q"]
-    fn __lsx_vssrani_du_q(a: v2u64, b: v2i64, c: u32) -> v2u64;
+    fn __lsx_vssrani_du_q(a: __v2u64, b: __v2i64, c: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vssrarni.b.h"]
-    fn __lsx_vssrarni_b_h(a: v16i8, b: v16i8, c: u32) -> v16i8;
+    fn __lsx_vssrarni_b_h(a: __v16i8, b: __v16i8, c: u32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vssrarni.h.w"]
-    fn __lsx_vssrarni_h_w(a: v8i16, b: v8i16, c: u32) -> v8i16;
+    fn __lsx_vssrarni_h_w(a: __v8i16, b: __v8i16, c: u32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vssrarni.w.d"]
-    fn __lsx_vssrarni_w_d(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vssrarni_w_d(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssrarni.d.q"]
-    fn __lsx_vssrarni_d_q(a: v2i64, b: v2i64, c: u32) -> v2i64;
+    fn __lsx_vssrarni_d_q(a: __v2i64, b: __v2i64, c: u32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vssrarni.bu.h"]
-    fn __lsx_vssrarni_bu_h(a: v16u8, b: v16i8, c: u32) -> v16u8;
+    fn __lsx_vssrarni_bu_h(a: __v16u8, b: __v16i8, c: u32) -> __v16u8;
     #[link_name = "llvm.loongarch.lsx.vssrarni.hu.w"]
-    fn __lsx_vssrarni_hu_w(a: v8u16, b: v8i16, c: u32) -> v8u16;
+    fn __lsx_vssrarni_hu_w(a: __v8u16, b: __v8i16, c: u32) -> __v8u16;
     #[link_name = "llvm.loongarch.lsx.vssrarni.wu.d"]
-    fn __lsx_vssrarni_wu_d(a: v4u32, b: v4i32, c: u32) -> v4u32;
+    fn __lsx_vssrarni_wu_d(a: __v4u32, b: __v4i32, c: u32) -> __v4u32;
     #[link_name = "llvm.loongarch.lsx.vssrarni.du.q"]
-    fn __lsx_vssrarni_du_q(a: v2u64, b: v2i64, c: u32) -> v2u64;
+    fn __lsx_vssrarni_du_q(a: __v2u64, b: __v2i64, c: u32) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.vpermi.w"]
-    fn __lsx_vpermi_w(a: v4i32, b: v4i32, c: u32) -> v4i32;
+    fn __lsx_vpermi_w(a: __v4i32, b: __v4i32, c: u32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vld"]
-    fn __lsx_vld(a: *const i8, b: i32) -> v16i8;
+    fn __lsx_vld(a: *const i8, b: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vst"]
-    fn __lsx_vst(a: v16i8, b: *mut i8, c: i32);
+    fn __lsx_vst(a: __v16i8, b: *mut i8, c: i32);
     #[link_name = "llvm.loongarch.lsx.vssrlrn.b.h"]
-    fn __lsx_vssrlrn_b_h(a: v8i16, b: v8i16) -> v16i8;
+    fn __lsx_vssrlrn_b_h(a: __v8i16, b: __v8i16) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vssrlrn.h.w"]
-    fn __lsx_vssrlrn_h_w(a: v4i32, b: v4i32) -> v8i16;
+    fn __lsx_vssrlrn_h_w(a: __v4i32, b: __v4i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vssrlrn.w.d"]
-    fn __lsx_vssrlrn_w_d(a: v2i64, b: v2i64) -> v4i32;
+    fn __lsx_vssrlrn_w_d(a: __v2i64, b: __v2i64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vssrln.b.h"]
-    fn __lsx_vssrln_b_h(a: v8i16, b: v8i16) -> v16i8;
+    fn __lsx_vssrln_b_h(a: __v8i16, b: __v8i16) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vssrln.h.w"]
-    fn __lsx_vssrln_h_w(a: v4i32, b: v4i32) -> v8i16;
+    fn __lsx_vssrln_h_w(a: __v4i32, b: __v4i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vssrln.w.d"]
-    fn __lsx_vssrln_w_d(a: v2i64, b: v2i64) -> v4i32;
+    fn __lsx_vssrln_w_d(a: __v2i64, b: __v2i64) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vorn.v"]
-    fn __lsx_vorn_v(a: v16i8, b: v16i8) -> v16i8;
+    fn __lsx_vorn_v(a: __v16i8, b: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vldi"]
-    fn __lsx_vldi(a: i32) -> v2i64;
+    fn __lsx_vldi(a: i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vshuf.b"]
-    fn __lsx_vshuf_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8;
+    fn __lsx_vshuf_b(a: __v16i8, b: __v16i8, c: __v16i8) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vldx"]
-    fn __lsx_vldx(a: *const i8, b: i64) -> v16i8;
+    fn __lsx_vldx(a: *const i8, b: i64) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vstx"]
-    fn __lsx_vstx(a: v16i8, b: *mut i8, c: i64);
+    fn __lsx_vstx(a: __v16i8, b: *mut i8, c: i64);
     #[link_name = "llvm.loongarch.lsx.vextl.qu.du"]
-    fn __lsx_vextl_qu_du(a: v2u64) -> v2u64;
+    fn __lsx_vextl_qu_du(a: __v2u64) -> __v2u64;
     #[link_name = "llvm.loongarch.lsx.bnz.b"]
-    fn __lsx_bnz_b(a: v16u8) -> i32;
+    fn __lsx_bnz_b(a: __v16u8) -> i32;
     #[link_name = "llvm.loongarch.lsx.bnz.d"]
-    fn __lsx_bnz_d(a: v2u64) -> i32;
+    fn __lsx_bnz_d(a: __v2u64) -> i32;
     #[link_name = "llvm.loongarch.lsx.bnz.h"]
-    fn __lsx_bnz_h(a: v8u16) -> i32;
+    fn __lsx_bnz_h(a: __v8u16) -> i32;
     #[link_name = "llvm.loongarch.lsx.bnz.v"]
-    fn __lsx_bnz_v(a: v16u8) -> i32;
+    fn __lsx_bnz_v(a: __v16u8) -> i32;
     #[link_name = "llvm.loongarch.lsx.bnz.w"]
-    fn __lsx_bnz_w(a: v4u32) -> i32;
+    fn __lsx_bnz_w(a: __v4u32) -> i32;
     #[link_name = "llvm.loongarch.lsx.bz.b"]
-    fn __lsx_bz_b(a: v16u8) -> i32;
+    fn __lsx_bz_b(a: __v16u8) -> i32;
     #[link_name = "llvm.loongarch.lsx.bz.d"]
-    fn __lsx_bz_d(a: v2u64) -> i32;
+    fn __lsx_bz_d(a: __v2u64) -> i32;
     #[link_name = "llvm.loongarch.lsx.bz.h"]
-    fn __lsx_bz_h(a: v8u16) -> i32;
+    fn __lsx_bz_h(a: __v8u16) -> i32;
     #[link_name = "llvm.loongarch.lsx.bz.v"]
-    fn __lsx_bz_v(a: v16u8) -> i32;
+    fn __lsx_bz_v(a: __v16u8) -> i32;
     #[link_name = "llvm.loongarch.lsx.bz.w"]
-    fn __lsx_bz_w(a: v4u32) -> i32;
+    fn __lsx_bz_w(a: __v4u32) -> i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.caf.d"]
-    fn __lsx_vfcmp_caf_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_caf_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.caf.s"]
-    fn __lsx_vfcmp_caf_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_caf_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.ceq.d"]
-    fn __lsx_vfcmp_ceq_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_ceq_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.ceq.s"]
-    fn __lsx_vfcmp_ceq_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_ceq_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cle.d"]
-    fn __lsx_vfcmp_cle_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_cle_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cle.s"]
-    fn __lsx_vfcmp_cle_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_cle_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.clt.d"]
-    fn __lsx_vfcmp_clt_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_clt_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.clt.s"]
-    fn __lsx_vfcmp_clt_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_clt_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cne.d"]
-    fn __lsx_vfcmp_cne_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_cne_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cne.s"]
-    fn __lsx_vfcmp_cne_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_cne_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cor.d"]
-    fn __lsx_vfcmp_cor_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_cor_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cor.s"]
-    fn __lsx_vfcmp_cor_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_cor_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cueq.d"]
-    fn __lsx_vfcmp_cueq_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_cueq_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cueq.s"]
-    fn __lsx_vfcmp_cueq_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_cueq_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cule.d"]
-    fn __lsx_vfcmp_cule_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_cule_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cule.s"]
-    fn __lsx_vfcmp_cule_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_cule_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cult.d"]
-    fn __lsx_vfcmp_cult_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_cult_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cult.s"]
-    fn __lsx_vfcmp_cult_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_cult_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cun.d"]
-    fn __lsx_vfcmp_cun_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_cun_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cune.d"]
-    fn __lsx_vfcmp_cune_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_cune_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cune.s"]
-    fn __lsx_vfcmp_cune_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_cune_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.cun.s"]
-    fn __lsx_vfcmp_cun_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_cun_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.saf.d"]
-    fn __lsx_vfcmp_saf_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_saf_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.saf.s"]
-    fn __lsx_vfcmp_saf_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_saf_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.seq.d"]
-    fn __lsx_vfcmp_seq_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_seq_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.seq.s"]
-    fn __lsx_vfcmp_seq_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_seq_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sle.d"]
-    fn __lsx_vfcmp_sle_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_sle_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sle.s"]
-    fn __lsx_vfcmp_sle_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_sle_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.slt.d"]
-    fn __lsx_vfcmp_slt_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_slt_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.slt.s"]
-    fn __lsx_vfcmp_slt_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_slt_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sne.d"]
-    fn __lsx_vfcmp_sne_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_sne_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sne.s"]
-    fn __lsx_vfcmp_sne_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_sne_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sor.d"]
-    fn __lsx_vfcmp_sor_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_sor_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sor.s"]
-    fn __lsx_vfcmp_sor_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_sor_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sueq.d"]
-    fn __lsx_vfcmp_sueq_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_sueq_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sueq.s"]
-    fn __lsx_vfcmp_sueq_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_sueq_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sule.d"]
-    fn __lsx_vfcmp_sule_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_sule_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sule.s"]
-    fn __lsx_vfcmp_sule_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_sule_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sult.d"]
-    fn __lsx_vfcmp_sult_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_sult_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sult.s"]
-    fn __lsx_vfcmp_sult_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_sult_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sun.d"]
-    fn __lsx_vfcmp_sun_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_sun_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sune.d"]
-    fn __lsx_vfcmp_sune_d(a: v2f64, b: v2f64) -> v2i64;
+    fn __lsx_vfcmp_sune_d(a: __v2f64, b: __v2f64) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sune.s"]
-    fn __lsx_vfcmp_sune_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_sune_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vfcmp.sun.s"]
-    fn __lsx_vfcmp_sun_s(a: v4f32, b: v4f32) -> v4i32;
+    fn __lsx_vfcmp_sun_s(a: __v4f32, b: __v4f32) -> __v4i32;
     #[link_name = "llvm.loongarch.lsx.vrepli.b"]
-    fn __lsx_vrepli_b(a: i32) -> v16i8;
+    fn __lsx_vrepli_b(a: i32) -> __v16i8;
     #[link_name = "llvm.loongarch.lsx.vrepli.d"]
-    fn __lsx_vrepli_d(a: i32) -> v2i64;
+    fn __lsx_vrepli_d(a: i32) -> __v2i64;
     #[link_name = "llvm.loongarch.lsx.vrepli.h"]
-    fn __lsx_vrepli_h(a: i32) -> v8i16;
+    fn __lsx_vrepli_h(a: i32) -> __v8i16;
     #[link_name = "llvm.loongarch.lsx.vrepli.w"]
-    fn __lsx_vrepli_w(a: i32) -> v4i32;
+    fn __lsx_vrepli_w(a: i32) -> __v4i32;
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsll_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vsll_b(a, b) }
+pub fn lsx_vsll_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsll_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsll_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vsll_h(a, b) }
+pub fn lsx_vsll_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsll_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsll_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vsll_w(a, b) }
+pub fn lsx_vsll_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsll_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsll_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsll_d(a, b) }
+pub fn lsx_vsll_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsll_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslli_b<const IMM3: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vslli_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vslli_b(a, IMM3) }
+    unsafe { transmute(__lsx_vslli_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslli_h<const IMM4: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vslli_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vslli_h(a, IMM4) }
+    unsafe { transmute(__lsx_vslli_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslli_w<const IMM5: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vslli_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vslli_w(a, IMM5) }
+    unsafe { transmute(__lsx_vslli_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslli_d<const IMM6: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vslli_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vslli_d(a, IMM6) }
+    unsafe { transmute(__lsx_vslli_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsra_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vsra_b(a, b) }
+pub fn lsx_vsra_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsra_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsra_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vsra_h(a, b) }
+pub fn lsx_vsra_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsra_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsra_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vsra_w(a, b) }
+pub fn lsx_vsra_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsra_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsra_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsra_d(a, b) }
+pub fn lsx_vsra_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsra_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrai_b<const IMM3: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vsrai_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vsrai_b(a, IMM3) }
+    unsafe { transmute(__lsx_vsrai_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrai_h<const IMM4: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vsrai_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsrai_h(a, IMM4) }
+    unsafe { transmute(__lsx_vsrai_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrai_w<const IMM5: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vsrai_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsrai_w(a, IMM5) }
+    unsafe { transmute(__lsx_vsrai_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrai_d<const IMM6: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vsrai_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsrai_d(a, IMM6) }
+    unsafe { transmute(__lsx_vsrai_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrar_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vsrar_b(a, b) }
+pub fn lsx_vsrar_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrar_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrar_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vsrar_h(a, b) }
+pub fn lsx_vsrar_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrar_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrar_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vsrar_w(a, b) }
+pub fn lsx_vsrar_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrar_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrar_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsrar_d(a, b) }
+pub fn lsx_vsrar_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrar_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrari_b<const IMM3: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vsrari_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vsrari_b(a, IMM3) }
+    unsafe { transmute(__lsx_vsrari_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrari_h<const IMM4: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vsrari_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsrari_h(a, IMM4) }
+    unsafe { transmute(__lsx_vsrari_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrari_w<const IMM5: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vsrari_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsrari_w(a, IMM5) }
+    unsafe { transmute(__lsx_vsrari_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrari_d<const IMM6: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vsrari_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsrari_d(a, IMM6) }
+    unsafe { transmute(__lsx_vsrari_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrl_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vsrl_b(a, b) }
+pub fn lsx_vsrl_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrl_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrl_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vsrl_h(a, b) }
+pub fn lsx_vsrl_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrl_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrl_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vsrl_w(a, b) }
+pub fn lsx_vsrl_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrl_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrl_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsrl_d(a, b) }
+pub fn lsx_vsrl_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrl_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrli_b<const IMM3: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vsrli_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vsrli_b(a, IMM3) }
+    unsafe { transmute(__lsx_vsrli_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrli_h<const IMM4: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vsrli_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsrli_h(a, IMM4) }
+    unsafe { transmute(__lsx_vsrli_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrli_w<const IMM5: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vsrli_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsrli_w(a, IMM5) }
+    unsafe { transmute(__lsx_vsrli_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrli_d<const IMM6: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vsrli_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsrli_d(a, IMM6) }
+    unsafe { transmute(__lsx_vsrli_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlr_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vsrlr_b(a, b) }
+pub fn lsx_vsrlr_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrlr_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlr_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vsrlr_h(a, b) }
+pub fn lsx_vsrlr_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrlr_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlr_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vsrlr_w(a, b) }
+pub fn lsx_vsrlr_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrlr_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlr_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsrlr_d(a, b) }
+pub fn lsx_vsrlr_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrlr_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlri_b<const IMM3: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vsrlri_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vsrlri_b(a, IMM3) }
+    unsafe { transmute(__lsx_vsrlri_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlri_h<const IMM4: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vsrlri_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsrlri_h(a, IMM4) }
+    unsafe { transmute(__lsx_vsrlri_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlri_w<const IMM5: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vsrlri_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsrlri_w(a, IMM5) }
+    unsafe { transmute(__lsx_vsrlri_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlri_d<const IMM6: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vsrlri_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsrlri_d(a, IMM6) }
+    unsafe { transmute(__lsx_vsrlri_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitclr_b(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vbitclr_b(a, b) }
+pub fn lsx_vbitclr_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitclr_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitclr_h(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vbitclr_h(a, b) }
+pub fn lsx_vbitclr_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitclr_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitclr_w(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vbitclr_w(a, b) }
+pub fn lsx_vbitclr_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitclr_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitclr_d(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vbitclr_d(a, b) }
+pub fn lsx_vbitclr_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitclr_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitclri_b<const IMM3: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vbitclri_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vbitclri_b(a, IMM3) }
+    unsafe { transmute(__lsx_vbitclri_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitclri_h<const IMM4: u32>(a: v8u16) -> v8u16 {
+pub fn lsx_vbitclri_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vbitclri_h(a, IMM4) }
+    unsafe { transmute(__lsx_vbitclri_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitclri_w<const IMM5: u32>(a: v4u32) -> v4u32 {
+pub fn lsx_vbitclri_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vbitclri_w(a, IMM5) }
+    unsafe { transmute(__lsx_vbitclri_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitclri_d<const IMM6: u32>(a: v2u64) -> v2u64 {
+pub fn lsx_vbitclri_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vbitclri_d(a, IMM6) }
+    unsafe { transmute(__lsx_vbitclri_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitset_b(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vbitset_b(a, b) }
+pub fn lsx_vbitset_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitset_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitset_h(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vbitset_h(a, b) }
+pub fn lsx_vbitset_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitset_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitset_w(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vbitset_w(a, b) }
+pub fn lsx_vbitset_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitset_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitset_d(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vbitset_d(a, b) }
+pub fn lsx_vbitset_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitset_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitseti_b<const IMM3: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vbitseti_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vbitseti_b(a, IMM3) }
+    unsafe { transmute(__lsx_vbitseti_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitseti_h<const IMM4: u32>(a: v8u16) -> v8u16 {
+pub fn lsx_vbitseti_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vbitseti_h(a, IMM4) }
+    unsafe { transmute(__lsx_vbitseti_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitseti_w<const IMM5: u32>(a: v4u32) -> v4u32 {
+pub fn lsx_vbitseti_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vbitseti_w(a, IMM5) }
+    unsafe { transmute(__lsx_vbitseti_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitseti_d<const IMM6: u32>(a: v2u64) -> v2u64 {
+pub fn lsx_vbitseti_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vbitseti_d(a, IMM6) }
+    unsafe { transmute(__lsx_vbitseti_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitrev_b(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vbitrev_b(a, b) }
+pub fn lsx_vbitrev_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitrev_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitrev_h(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vbitrev_h(a, b) }
+pub fn lsx_vbitrev_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitrev_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitrev_w(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vbitrev_w(a, b) }
+pub fn lsx_vbitrev_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitrev_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitrev_d(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vbitrev_d(a, b) }
+pub fn lsx_vbitrev_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitrev_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitrevi_b<const IMM3: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vbitrevi_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vbitrevi_b(a, IMM3) }
+    unsafe { transmute(__lsx_vbitrevi_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitrevi_h<const IMM4: u32>(a: v8u16) -> v8u16 {
+pub fn lsx_vbitrevi_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vbitrevi_h(a, IMM4) }
+    unsafe { transmute(__lsx_vbitrevi_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitrevi_w<const IMM5: u32>(a: v4u32) -> v4u32 {
+pub fn lsx_vbitrevi_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vbitrevi_w(a, IMM5) }
+    unsafe { transmute(__lsx_vbitrevi_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitrevi_d<const IMM6: u32>(a: v2u64) -> v2u64 {
+pub fn lsx_vbitrevi_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vbitrevi_d(a, IMM6) }
+    unsafe { transmute(__lsx_vbitrevi_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vadd_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vadd_b(a, b) }
+pub fn lsx_vadd_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vadd_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vadd_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vadd_h(a, b) }
+pub fn lsx_vadd_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vadd_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vadd_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vadd_w(a, b) }
+pub fn lsx_vadd_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vadd_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vadd_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vadd_d(a, b) }
+pub fn lsx_vadd_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vadd_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddi_bu<const IMM5: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vaddi_bu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vaddi_bu(a, IMM5) }
+    unsafe { transmute(__lsx_vaddi_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddi_hu<const IMM5: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vaddi_hu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vaddi_hu(a, IMM5) }
+    unsafe { transmute(__lsx_vaddi_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddi_wu<const IMM5: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vaddi_wu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vaddi_wu(a, IMM5) }
+    unsafe { transmute(__lsx_vaddi_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddi_du<const IMM5: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vaddi_du<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vaddi_du(a, IMM5) }
+    unsafe { transmute(__lsx_vaddi_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsub_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vsub_b(a, b) }
+pub fn lsx_vsub_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsub_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsub_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vsub_h(a, b) }
+pub fn lsx_vsub_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsub_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsub_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vsub_w(a, b) }
+pub fn lsx_vsub_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsub_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsub_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsub_d(a, b) }
+pub fn lsx_vsub_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsub_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubi_bu<const IMM5: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vsubi_bu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsubi_bu(a, IMM5) }
+    unsafe { transmute(__lsx_vsubi_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubi_hu<const IMM5: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vsubi_hu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsubi_hu(a, IMM5) }
+    unsafe { transmute(__lsx_vsubi_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubi_wu<const IMM5: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vsubi_wu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsubi_wu(a, IMM5) }
+    unsafe { transmute(__lsx_vsubi_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubi_du<const IMM5: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vsubi_du<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsubi_du(a, IMM5) }
+    unsafe { transmute(__lsx_vsubi_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmax_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vmax_b(a, b) }
+pub fn lsx_vmax_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmax_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmax_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vmax_h(a, b) }
+pub fn lsx_vmax_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmax_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmax_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vmax_w(a, b) }
+pub fn lsx_vmax_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmax_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmax_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vmax_d(a, b) }
+pub fn lsx_vmax_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmax_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaxi_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
+pub fn lsx_vmaxi_b<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vmaxi_b(a, IMM_S5) }
+    unsafe { transmute(__lsx_vmaxi_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaxi_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
+pub fn lsx_vmaxi_h<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vmaxi_h(a, IMM_S5) }
+    unsafe { transmute(__lsx_vmaxi_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaxi_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
+pub fn lsx_vmaxi_w<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vmaxi_w(a, IMM_S5) }
+    unsafe { transmute(__lsx_vmaxi_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaxi_d<const IMM_S5: i32>(a: v2i64) -> v2i64 {
+pub fn lsx_vmaxi_d<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vmaxi_d(a, IMM_S5) }
+    unsafe { transmute(__lsx_vmaxi_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmax_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vmax_bu(a, b) }
+pub fn lsx_vmax_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmax_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmax_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vmax_hu(a, b) }
+pub fn lsx_vmax_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmax_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmax_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vmax_wu(a, b) }
+pub fn lsx_vmax_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmax_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmax_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vmax_du(a, b) }
+pub fn lsx_vmax_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmax_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaxi_bu<const IMM5: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vmaxi_bu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vmaxi_bu(a, IMM5) }
+    unsafe { transmute(__lsx_vmaxi_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaxi_hu<const IMM5: u32>(a: v8u16) -> v8u16 {
+pub fn lsx_vmaxi_hu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vmaxi_hu(a, IMM5) }
+    unsafe { transmute(__lsx_vmaxi_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaxi_wu<const IMM5: u32>(a: v4u32) -> v4u32 {
+pub fn lsx_vmaxi_wu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vmaxi_wu(a, IMM5) }
+    unsafe { transmute(__lsx_vmaxi_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaxi_du<const IMM5: u32>(a: v2u64) -> v2u64 {
+pub fn lsx_vmaxi_du<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vmaxi_du(a, IMM5) }
+    unsafe { transmute(__lsx_vmaxi_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmin_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vmin_b(a, b) }
+pub fn lsx_vmin_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmin_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmin_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vmin_h(a, b) }
+pub fn lsx_vmin_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmin_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmin_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vmin_w(a, b) }
+pub fn lsx_vmin_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmin_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmin_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vmin_d(a, b) }
+pub fn lsx_vmin_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmin_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmini_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
+pub fn lsx_vmini_b<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vmini_b(a, IMM_S5) }
+    unsafe { transmute(__lsx_vmini_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmini_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
+pub fn lsx_vmini_h<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vmini_h(a, IMM_S5) }
+    unsafe { transmute(__lsx_vmini_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmini_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
+pub fn lsx_vmini_w<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vmini_w(a, IMM_S5) }
+    unsafe { transmute(__lsx_vmini_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmini_d<const IMM_S5: i32>(a: v2i64) -> v2i64 {
+pub fn lsx_vmini_d<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vmini_d(a, IMM_S5) }
+    unsafe { transmute(__lsx_vmini_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmin_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vmin_bu(a, b) }
+pub fn lsx_vmin_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmin_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmin_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vmin_hu(a, b) }
+pub fn lsx_vmin_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmin_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmin_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vmin_wu(a, b) }
+pub fn lsx_vmin_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmin_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmin_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vmin_du(a, b) }
+pub fn lsx_vmin_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmin_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmini_bu<const IMM5: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vmini_bu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vmini_bu(a, IMM5) }
+    unsafe { transmute(__lsx_vmini_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmini_hu<const IMM5: u32>(a: v8u16) -> v8u16 {
+pub fn lsx_vmini_hu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vmini_hu(a, IMM5) }
+    unsafe { transmute(__lsx_vmini_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmini_wu<const IMM5: u32>(a: v4u32) -> v4u32 {
+pub fn lsx_vmini_wu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vmini_wu(a, IMM5) }
+    unsafe { transmute(__lsx_vmini_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmini_du<const IMM5: u32>(a: v2u64) -> v2u64 {
+pub fn lsx_vmini_du<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vmini_du(a, IMM5) }
+    unsafe { transmute(__lsx_vmini_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vseq_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vseq_b(a, b) }
+pub fn lsx_vseq_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vseq_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vseq_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vseq_h(a, b) }
+pub fn lsx_vseq_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vseq_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vseq_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vseq_w(a, b) }
+pub fn lsx_vseq_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vseq_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vseq_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vseq_d(a, b) }
+pub fn lsx_vseq_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vseq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vseqi_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
+pub fn lsx_vseqi_b<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vseqi_b(a, IMM_S5) }
+    unsafe { transmute(__lsx_vseqi_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vseqi_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
+pub fn lsx_vseqi_h<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vseqi_h(a, IMM_S5) }
+    unsafe { transmute(__lsx_vseqi_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vseqi_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
+pub fn lsx_vseqi_w<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vseqi_w(a, IMM_S5) }
+    unsafe { transmute(__lsx_vseqi_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vseqi_d<const IMM_S5: i32>(a: v2i64) -> v2i64 {
+pub fn lsx_vseqi_d<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vseqi_d(a, IMM_S5) }
+    unsafe { transmute(__lsx_vseqi_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslti_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
+pub fn lsx_vslti_b<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vslti_b(a, IMM_S5) }
+    unsafe { transmute(__lsx_vslti_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslt_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vslt_b(a, b) }
+pub fn lsx_vslt_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vslt_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslt_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vslt_h(a, b) }
+pub fn lsx_vslt_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vslt_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslt_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vslt_w(a, b) }
+pub fn lsx_vslt_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vslt_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslt_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vslt_d(a, b) }
+pub fn lsx_vslt_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vslt_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslti_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
+pub fn lsx_vslti_h<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vslti_h(a, IMM_S5) }
+    unsafe { transmute(__lsx_vslti_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslti_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
+pub fn lsx_vslti_w<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vslti_w(a, IMM_S5) }
+    unsafe { transmute(__lsx_vslti_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslti_d<const IMM_S5: i32>(a: v2i64) -> v2i64 {
+pub fn lsx_vslti_d<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vslti_d(a, IMM_S5) }
+    unsafe { transmute(__lsx_vslti_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslt_bu(a: v16u8, b: v16u8) -> v16i8 {
-    unsafe { __lsx_vslt_bu(a, b) }
+pub fn lsx_vslt_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vslt_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslt_hu(a: v8u16, b: v8u16) -> v8i16 {
-    unsafe { __lsx_vslt_hu(a, b) }
+pub fn lsx_vslt_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vslt_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslt_wu(a: v4u32, b: v4u32) -> v4i32 {
-    unsafe { __lsx_vslt_wu(a, b) }
+pub fn lsx_vslt_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vslt_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslt_du(a: v2u64, b: v2u64) -> v2i64 {
-    unsafe { __lsx_vslt_du(a, b) }
+pub fn lsx_vslt_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vslt_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslti_bu<const IMM5: u32>(a: v16u8) -> v16i8 {
+pub fn lsx_vslti_bu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vslti_bu(a, IMM5) }
+    unsafe { transmute(__lsx_vslti_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslti_hu<const IMM5: u32>(a: v8u16) -> v8i16 {
+pub fn lsx_vslti_hu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vslti_hu(a, IMM5) }
+    unsafe { transmute(__lsx_vslti_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslti_wu<const IMM5: u32>(a: v4u32) -> v4i32 {
+pub fn lsx_vslti_wu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vslti_wu(a, IMM5) }
+    unsafe { transmute(__lsx_vslti_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslti_du<const IMM5: u32>(a: v2u64) -> v2i64 {
+pub fn lsx_vslti_du<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vslti_du(a, IMM5) }
+    unsafe { transmute(__lsx_vslti_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsle_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vsle_b(a, b) }
+pub fn lsx_vsle_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsle_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsle_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vsle_h(a, b) }
+pub fn lsx_vsle_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsle_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsle_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vsle_w(a, b) }
+pub fn lsx_vsle_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsle_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsle_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsle_d(a, b) }
+pub fn lsx_vsle_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsle_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslei_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
+pub fn lsx_vslei_b<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vslei_b(a, IMM_S5) }
+    unsafe { transmute(__lsx_vslei_b(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslei_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
+pub fn lsx_vslei_h<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vslei_h(a, IMM_S5) }
+    unsafe { transmute(__lsx_vslei_h(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslei_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
+pub fn lsx_vslei_w<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vslei_w(a, IMM_S5) }
+    unsafe { transmute(__lsx_vslei_w(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslei_d<const IMM_S5: i32>(a: v2i64) -> v2i64 {
+pub fn lsx_vslei_d<const IMM_S5: i32>(a: m128i) -> m128i {
     static_assert_simm_bits!(IMM_S5, 5);
-    unsafe { __lsx_vslei_d(a, IMM_S5) }
+    unsafe { transmute(__lsx_vslei_d(transmute(a), IMM_S5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsle_bu(a: v16u8, b: v16u8) -> v16i8 {
-    unsafe { __lsx_vsle_bu(a, b) }
+pub fn lsx_vsle_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsle_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsle_hu(a: v8u16, b: v8u16) -> v8i16 {
-    unsafe { __lsx_vsle_hu(a, b) }
+pub fn lsx_vsle_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsle_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsle_wu(a: v4u32, b: v4u32) -> v4i32 {
-    unsafe { __lsx_vsle_wu(a, b) }
+pub fn lsx_vsle_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsle_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsle_du(a: v2u64, b: v2u64) -> v2i64 {
-    unsafe { __lsx_vsle_du(a, b) }
+pub fn lsx_vsle_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsle_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslei_bu<const IMM5: u32>(a: v16u8) -> v16i8 {
+pub fn lsx_vslei_bu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vslei_bu(a, IMM5) }
+    unsafe { transmute(__lsx_vslei_bu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslei_hu<const IMM5: u32>(a: v8u16) -> v8i16 {
+pub fn lsx_vslei_hu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vslei_hu(a, IMM5) }
+    unsafe { transmute(__lsx_vslei_hu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslei_wu<const IMM5: u32>(a: v4u32) -> v4i32 {
+pub fn lsx_vslei_wu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vslei_wu(a, IMM5) }
+    unsafe { transmute(__lsx_vslei_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vslei_du<const IMM5: u32>(a: v2u64) -> v2i64 {
+pub fn lsx_vslei_du<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vslei_du(a, IMM5) }
+    unsafe { transmute(__lsx_vslei_du(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsat_b<const IMM3: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vsat_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vsat_b(a, IMM3) }
+    unsafe { transmute(__lsx_vsat_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsat_h<const IMM4: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vsat_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsat_h(a, IMM4) }
+    unsafe { transmute(__lsx_vsat_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsat_w<const IMM5: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vsat_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsat_w(a, IMM5) }
+    unsafe { transmute(__lsx_vsat_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsat_d<const IMM6: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vsat_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsat_d(a, IMM6) }
+    unsafe { transmute(__lsx_vsat_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsat_bu<const IMM3: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vsat_bu<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vsat_bu(a, IMM3) }
+    unsafe { transmute(__lsx_vsat_bu(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsat_hu<const IMM4: u32>(a: v8u16) -> v8u16 {
+pub fn lsx_vsat_hu<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsat_hu(a, IMM4) }
+    unsafe { transmute(__lsx_vsat_hu(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsat_wu<const IMM5: u32>(a: v4u32) -> v4u32 {
+pub fn lsx_vsat_wu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsat_wu(a, IMM5) }
+    unsafe { transmute(__lsx_vsat_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsat_du<const IMM6: u32>(a: v2u64) -> v2u64 {
+pub fn lsx_vsat_du<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsat_du(a, IMM6) }
+    unsafe { transmute(__lsx_vsat_du(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vadda_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vadda_b(a, b) }
+pub fn lsx_vadda_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vadda_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vadda_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vadda_h(a, b) }
+pub fn lsx_vadda_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vadda_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vadda_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vadda_w(a, b) }
+pub fn lsx_vadda_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vadda_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vadda_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vadda_d(a, b) }
+pub fn lsx_vadda_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vadda_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsadd_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vsadd_b(a, b) }
+pub fn lsx_vsadd_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsadd_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsadd_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vsadd_h(a, b) }
+pub fn lsx_vsadd_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsadd_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsadd_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vsadd_w(a, b) }
+pub fn lsx_vsadd_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsadd_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsadd_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsadd_d(a, b) }
+pub fn lsx_vsadd_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsadd_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsadd_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vsadd_bu(a, b) }
+pub fn lsx_vsadd_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsadd_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsadd_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vsadd_hu(a, b) }
+pub fn lsx_vsadd_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsadd_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsadd_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vsadd_wu(a, b) }
+pub fn lsx_vsadd_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsadd_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsadd_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vsadd_du(a, b) }
+pub fn lsx_vsadd_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsadd_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavg_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vavg_b(a, b) }
+pub fn lsx_vavg_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavg_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavg_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vavg_h(a, b) }
+pub fn lsx_vavg_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavg_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavg_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vavg_w(a, b) }
+pub fn lsx_vavg_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavg_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavg_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vavg_d(a, b) }
+pub fn lsx_vavg_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavg_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavg_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vavg_bu(a, b) }
+pub fn lsx_vavg_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavg_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavg_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vavg_hu(a, b) }
+pub fn lsx_vavg_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavg_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavg_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vavg_wu(a, b) }
+pub fn lsx_vavg_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavg_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavg_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vavg_du(a, b) }
+pub fn lsx_vavg_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavg_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavgr_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vavgr_b(a, b) }
+pub fn lsx_vavgr_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavgr_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavgr_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vavgr_h(a, b) }
+pub fn lsx_vavgr_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavgr_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavgr_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vavgr_w(a, b) }
+pub fn lsx_vavgr_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavgr_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavgr_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vavgr_d(a, b) }
+pub fn lsx_vavgr_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavgr_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavgr_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vavgr_bu(a, b) }
+pub fn lsx_vavgr_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavgr_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavgr_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vavgr_hu(a, b) }
+pub fn lsx_vavgr_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavgr_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavgr_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vavgr_wu(a, b) }
+pub fn lsx_vavgr_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavgr_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vavgr_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vavgr_du(a, b) }
+pub fn lsx_vavgr_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vavgr_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssub_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vssub_b(a, b) }
+pub fn lsx_vssub_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssub_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssub_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vssub_h(a, b) }
+pub fn lsx_vssub_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssub_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssub_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vssub_w(a, b) }
+pub fn lsx_vssub_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssub_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssub_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vssub_d(a, b) }
+pub fn lsx_vssub_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssub_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssub_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vssub_bu(a, b) }
+pub fn lsx_vssub_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssub_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssub_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vssub_hu(a, b) }
+pub fn lsx_vssub_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssub_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssub_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vssub_wu(a, b) }
+pub fn lsx_vssub_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssub_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssub_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vssub_du(a, b) }
+pub fn lsx_vssub_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssub_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vabsd_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vabsd_b(a, b) }
+pub fn lsx_vabsd_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vabsd_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vabsd_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vabsd_h(a, b) }
+pub fn lsx_vabsd_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vabsd_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vabsd_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vabsd_w(a, b) }
+pub fn lsx_vabsd_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vabsd_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vabsd_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vabsd_d(a, b) }
+pub fn lsx_vabsd_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vabsd_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vabsd_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vabsd_bu(a, b) }
+pub fn lsx_vabsd_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vabsd_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vabsd_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vabsd_hu(a, b) }
+pub fn lsx_vabsd_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vabsd_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vabsd_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vabsd_wu(a, b) }
+pub fn lsx_vabsd_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vabsd_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vabsd_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vabsd_du(a, b) }
+pub fn lsx_vabsd_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vabsd_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmul_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vmul_b(a, b) }
+pub fn lsx_vmul_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmul_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmul_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vmul_h(a, b) }
+pub fn lsx_vmul_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmul_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmul_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vmul_w(a, b) }
+pub fn lsx_vmul_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmul_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmul_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vmul_d(a, b) }
+pub fn lsx_vmul_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmul_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmadd_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 {
-    unsafe { __lsx_vmadd_b(a, b, c) }
+pub fn lsx_vmadd_b(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmadd_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmadd_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 {
-    unsafe { __lsx_vmadd_h(a, b, c) }
+pub fn lsx_vmadd_h(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmadd_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmadd_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 {
-    unsafe { __lsx_vmadd_w(a, b, c) }
+pub fn lsx_vmadd_w(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmadd_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmadd_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 {
-    unsafe { __lsx_vmadd_d(a, b, c) }
+pub fn lsx_vmadd_d(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmadd_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmsub_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 {
-    unsafe { __lsx_vmsub_b(a, b, c) }
+pub fn lsx_vmsub_b(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmsub_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmsub_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 {
-    unsafe { __lsx_vmsub_h(a, b, c) }
+pub fn lsx_vmsub_h(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmsub_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmsub_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 {
-    unsafe { __lsx_vmsub_w(a, b, c) }
+pub fn lsx_vmsub_w(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmsub_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmsub_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 {
-    unsafe { __lsx_vmsub_d(a, b, c) }
+pub fn lsx_vmsub_d(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmsub_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vdiv_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vdiv_b(a, b) }
+pub fn lsx_vdiv_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vdiv_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vdiv_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vdiv_h(a, b) }
+pub fn lsx_vdiv_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vdiv_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vdiv_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vdiv_w(a, b) }
+pub fn lsx_vdiv_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vdiv_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vdiv_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vdiv_d(a, b) }
+pub fn lsx_vdiv_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vdiv_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vdiv_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vdiv_bu(a, b) }
+pub fn lsx_vdiv_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vdiv_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vdiv_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vdiv_hu(a, b) }
+pub fn lsx_vdiv_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vdiv_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vdiv_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vdiv_wu(a, b) }
+pub fn lsx_vdiv_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vdiv_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vdiv_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vdiv_du(a, b) }
+pub fn lsx_vdiv_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vdiv_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhaddw_h_b(a: v16i8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vhaddw_h_b(a, b) }
+pub fn lsx_vhaddw_h_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhaddw_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhaddw_w_h(a: v8i16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vhaddw_w_h(a, b) }
+pub fn lsx_vhaddw_w_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhaddw_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhaddw_d_w(a: v4i32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vhaddw_d_w(a, b) }
+pub fn lsx_vhaddw_d_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhaddw_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhaddw_hu_bu(a: v16u8, b: v16u8) -> v8u16 {
-    unsafe { __lsx_vhaddw_hu_bu(a, b) }
+pub fn lsx_vhaddw_hu_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhaddw_hu_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhaddw_wu_hu(a: v8u16, b: v8u16) -> v4u32 {
-    unsafe { __lsx_vhaddw_wu_hu(a, b) }
+pub fn lsx_vhaddw_wu_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhaddw_wu_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhaddw_du_wu(a: v4u32, b: v4u32) -> v2u64 {
-    unsafe { __lsx_vhaddw_du_wu(a, b) }
+pub fn lsx_vhaddw_du_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhaddw_du_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhsubw_h_b(a: v16i8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vhsubw_h_b(a, b) }
+pub fn lsx_vhsubw_h_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhsubw_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhsubw_w_h(a: v8i16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vhsubw_w_h(a, b) }
+pub fn lsx_vhsubw_w_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhsubw_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhsubw_d_w(a: v4i32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vhsubw_d_w(a, b) }
+pub fn lsx_vhsubw_d_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhsubw_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhsubw_hu_bu(a: v16u8, b: v16u8) -> v8i16 {
-    unsafe { __lsx_vhsubw_hu_bu(a, b) }
+pub fn lsx_vhsubw_hu_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhsubw_hu_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhsubw_wu_hu(a: v8u16, b: v8u16) -> v4i32 {
-    unsafe { __lsx_vhsubw_wu_hu(a, b) }
+pub fn lsx_vhsubw_wu_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhsubw_wu_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhsubw_du_wu(a: v4u32, b: v4u32) -> v2i64 {
-    unsafe { __lsx_vhsubw_du_wu(a, b) }
+pub fn lsx_vhsubw_du_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhsubw_du_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmod_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vmod_b(a, b) }
+pub fn lsx_vmod_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmod_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmod_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vmod_h(a, b) }
+pub fn lsx_vmod_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmod_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmod_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vmod_w(a, b) }
+pub fn lsx_vmod_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmod_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmod_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vmod_d(a, b) }
+pub fn lsx_vmod_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmod_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmod_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vmod_bu(a, b) }
+pub fn lsx_vmod_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmod_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmod_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vmod_hu(a, b) }
+pub fn lsx_vmod_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmod_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmod_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vmod_wu(a, b) }
+pub fn lsx_vmod_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmod_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmod_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vmod_du(a, b) }
+pub fn lsx_vmod_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmod_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplve_b(a: v16i8, b: i32) -> v16i8 {
-    unsafe { __lsx_vreplve_b(a, b) }
+pub fn lsx_vreplve_b(a: m128i, b: i32) -> m128i {
+    unsafe { transmute(__lsx_vreplve_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplve_h(a: v8i16, b: i32) -> v8i16 {
-    unsafe { __lsx_vreplve_h(a, b) }
+pub fn lsx_vreplve_h(a: m128i, b: i32) -> m128i {
+    unsafe { transmute(__lsx_vreplve_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplve_w(a: v4i32, b: i32) -> v4i32 {
-    unsafe { __lsx_vreplve_w(a, b) }
+pub fn lsx_vreplve_w(a: m128i, b: i32) -> m128i {
+    unsafe { transmute(__lsx_vreplve_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplve_d(a: v2i64, b: i32) -> v2i64 {
-    unsafe { __lsx_vreplve_d(a, b) }
+pub fn lsx_vreplve_d(a: m128i, b: i32) -> m128i {
+    unsafe { transmute(__lsx_vreplve_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplvei_b<const IMM4: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vreplvei_b<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vreplvei_b(a, IMM4) }
+    unsafe { transmute(__lsx_vreplvei_b(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplvei_h<const IMM3: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vreplvei_h<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vreplvei_h(a, IMM3) }
+    unsafe { transmute(__lsx_vreplvei_h(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplvei_w<const IMM2: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vreplvei_w<const IMM2: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lsx_vreplvei_w(a, IMM2) }
+    unsafe { transmute(__lsx_vreplvei_w(transmute(a), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplvei_d<const IMM1: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vreplvei_d<const IMM1: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM1, 1);
-    unsafe { __lsx_vreplvei_d(a, IMM1) }
+    unsafe { transmute(__lsx_vreplvei_d(transmute(a), IMM1)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickev_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vpickev_b(a, b) }
+pub fn lsx_vpickev_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpickev_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickev_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vpickev_h(a, b) }
+pub fn lsx_vpickev_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpickev_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickev_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vpickev_w(a, b) }
+pub fn lsx_vpickev_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpickev_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickev_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vpickev_d(a, b) }
+pub fn lsx_vpickev_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpickev_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickod_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vpickod_b(a, b) }
+pub fn lsx_vpickod_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpickod_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickod_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vpickod_h(a, b) }
+pub fn lsx_vpickod_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpickod_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickod_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vpickod_w(a, b) }
+pub fn lsx_vpickod_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpickod_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickod_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vpickod_d(a, b) }
+pub fn lsx_vpickod_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpickod_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vilvh_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vilvh_b(a, b) }
+pub fn lsx_vilvh_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vilvh_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vilvh_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vilvh_h(a, b) }
+pub fn lsx_vilvh_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vilvh_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vilvh_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vilvh_w(a, b) }
+pub fn lsx_vilvh_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vilvh_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vilvh_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vilvh_d(a, b) }
+pub fn lsx_vilvh_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vilvh_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vilvl_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vilvl_b(a, b) }
+pub fn lsx_vilvl_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vilvl_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vilvl_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vilvl_h(a, b) }
+pub fn lsx_vilvl_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vilvl_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vilvl_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vilvl_w(a, b) }
+pub fn lsx_vilvl_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vilvl_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vilvl_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vilvl_d(a, b) }
+pub fn lsx_vilvl_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vilvl_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpackev_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vpackev_b(a, b) }
+pub fn lsx_vpackev_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpackev_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpackev_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vpackev_h(a, b) }
+pub fn lsx_vpackev_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpackev_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpackev_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vpackev_w(a, b) }
+pub fn lsx_vpackev_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpackev_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpackev_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vpackev_d(a, b) }
+pub fn lsx_vpackev_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpackev_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpackod_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vpackod_b(a, b) }
+pub fn lsx_vpackod_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpackod_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpackod_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vpackod_h(a, b) }
+pub fn lsx_vpackod_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpackod_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpackod_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vpackod_w(a, b) }
+pub fn lsx_vpackod_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpackod_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpackod_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vpackod_d(a, b) }
+pub fn lsx_vpackod_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpackod_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vshuf_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 {
-    unsafe { __lsx_vshuf_h(a, b, c) }
+pub fn lsx_vshuf_h(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vshuf_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vshuf_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 {
-    unsafe { __lsx_vshuf_w(a, b, c) }
+pub fn lsx_vshuf_w(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vshuf_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vshuf_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 {
-    unsafe { __lsx_vshuf_d(a, b, c) }
+pub fn lsx_vshuf_d(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vshuf_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vand_v(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vand_v(a, b) }
+pub fn lsx_vand_v(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vand_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vandi_b<const IMM8: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vandi_b<const IMM8: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vandi_b(a, IMM8) }
+    unsafe { transmute(__lsx_vandi_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vor_v(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vor_v(a, b) }
+pub fn lsx_vor_v(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vor_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vori_b<const IMM8: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vori_b<const IMM8: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vori_b(a, IMM8) }
+    unsafe { transmute(__lsx_vori_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vnor_v(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vnor_v(a, b) }
+pub fn lsx_vnor_v(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vnor_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vnori_b<const IMM8: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vnori_b<const IMM8: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vnori_b(a, IMM8) }
+    unsafe { transmute(__lsx_vnori_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vxor_v(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vxor_v(a, b) }
+pub fn lsx_vxor_v(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vxor_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vxori_b<const IMM8: u32>(a: v16u8) -> v16u8 {
+pub fn lsx_vxori_b<const IMM8: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vxori_b(a, IMM8) }
+    unsafe { transmute(__lsx_vxori_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitsel_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 {
-    unsafe { __lsx_vbitsel_v(a, b, c) }
+pub fn lsx_vbitsel_v(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vbitsel_v(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbitseli_b<const IMM8: u32>(a: v16u8, b: v16u8) -> v16u8 {
+pub fn lsx_vbitseli_b<const IMM8: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vbitseli_b(a, b, IMM8) }
+    unsafe { transmute(__lsx_vbitseli_b(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vshuf4i_b<const IMM8: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vshuf4i_b<const IMM8: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vshuf4i_b(a, IMM8) }
+    unsafe { transmute(__lsx_vshuf4i_b(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vshuf4i_h<const IMM8: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vshuf4i_h<const IMM8: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vshuf4i_h(a, IMM8) }
+    unsafe { transmute(__lsx_vshuf4i_h(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vshuf4i_w<const IMM8: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vshuf4i_w<const IMM8: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vshuf4i_w(a, IMM8) }
+    unsafe { transmute(__lsx_vshuf4i_w(transmute(a), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplgr2vr_b(a: i32) -> v16i8 {
-    unsafe { __lsx_vreplgr2vr_b(a) }
+pub fn lsx_vreplgr2vr_b(a: i32) -> m128i {
+    unsafe { transmute(__lsx_vreplgr2vr_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplgr2vr_h(a: i32) -> v8i16 {
-    unsafe { __lsx_vreplgr2vr_h(a) }
+pub fn lsx_vreplgr2vr_h(a: i32) -> m128i {
+    unsafe { transmute(__lsx_vreplgr2vr_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplgr2vr_w(a: i32) -> v4i32 {
-    unsafe { __lsx_vreplgr2vr_w(a) }
+pub fn lsx_vreplgr2vr_w(a: i32) -> m128i {
+    unsafe { transmute(__lsx_vreplgr2vr_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vreplgr2vr_d(a: i64) -> v2i64 {
-    unsafe { __lsx_vreplgr2vr_d(a) }
+pub fn lsx_vreplgr2vr_d(a: i64) -> m128i {
+    unsafe { transmute(__lsx_vreplgr2vr_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpcnt_b(a: v16i8) -> v16i8 {
-    unsafe { __lsx_vpcnt_b(a) }
+pub fn lsx_vpcnt_b(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpcnt_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpcnt_h(a: v8i16) -> v8i16 {
-    unsafe { __lsx_vpcnt_h(a) }
+pub fn lsx_vpcnt_h(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpcnt_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpcnt_w(a: v4i32) -> v4i32 {
-    unsafe { __lsx_vpcnt_w(a) }
+pub fn lsx_vpcnt_w(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpcnt_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpcnt_d(a: v2i64) -> v2i64 {
-    unsafe { __lsx_vpcnt_d(a) }
+pub fn lsx_vpcnt_d(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vpcnt_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vclo_b(a: v16i8) -> v16i8 {
-    unsafe { __lsx_vclo_b(a) }
+pub fn lsx_vclo_b(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vclo_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vclo_h(a: v8i16) -> v8i16 {
-    unsafe { __lsx_vclo_h(a) }
+pub fn lsx_vclo_h(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vclo_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vclo_w(a: v4i32) -> v4i32 {
-    unsafe { __lsx_vclo_w(a) }
+pub fn lsx_vclo_w(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vclo_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vclo_d(a: v2i64) -> v2i64 {
-    unsafe { __lsx_vclo_d(a) }
+pub fn lsx_vclo_d(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vclo_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vclz_b(a: v16i8) -> v16i8 {
-    unsafe { __lsx_vclz_b(a) }
+pub fn lsx_vclz_b(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vclz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vclz_h(a: v8i16) -> v8i16 {
-    unsafe { __lsx_vclz_h(a) }
+pub fn lsx_vclz_h(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vclz_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vclz_w(a: v4i32) -> v4i32 {
-    unsafe { __lsx_vclz_w(a) }
+pub fn lsx_vclz_w(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vclz_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vclz_d(a: v2i64) -> v2i64 {
-    unsafe { __lsx_vclz_d(a) }
+pub fn lsx_vclz_d(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vclz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickve2gr_b<const IMM4: u32>(a: v16i8) -> i32 {
+pub fn lsx_vpickve2gr_b<const IMM4: u32>(a: m128i) -> i32 {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vpickve2gr_b(a, IMM4) }
+    unsafe { transmute(__lsx_vpickve2gr_b(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickve2gr_h<const IMM3: u32>(a: v8i16) -> i32 {
+pub fn lsx_vpickve2gr_h<const IMM3: u32>(a: m128i) -> i32 {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vpickve2gr_h(a, IMM3) }
+    unsafe { transmute(__lsx_vpickve2gr_h(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickve2gr_w<const IMM2: u32>(a: v4i32) -> i32 {
+pub fn lsx_vpickve2gr_w<const IMM2: u32>(a: m128i) -> i32 {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lsx_vpickve2gr_w(a, IMM2) }
+    unsafe { transmute(__lsx_vpickve2gr_w(transmute(a), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickve2gr_d<const IMM1: u32>(a: v2i64) -> i64 {
+pub fn lsx_vpickve2gr_d<const IMM1: u32>(a: m128i) -> i64 {
     static_assert_uimm_bits!(IMM1, 1);
-    unsafe { __lsx_vpickve2gr_d(a, IMM1) }
+    unsafe { transmute(__lsx_vpickve2gr_d(transmute(a), IMM1)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickve2gr_bu<const IMM4: u32>(a: v16i8) -> u32 {
+pub fn lsx_vpickve2gr_bu<const IMM4: u32>(a: m128i) -> u32 {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vpickve2gr_bu(a, IMM4) }
+    unsafe { transmute(__lsx_vpickve2gr_bu(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickve2gr_hu<const IMM3: u32>(a: v8i16) -> u32 {
+pub fn lsx_vpickve2gr_hu<const IMM3: u32>(a: m128i) -> u32 {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vpickve2gr_hu(a, IMM3) }
+    unsafe { transmute(__lsx_vpickve2gr_hu(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickve2gr_wu<const IMM2: u32>(a: v4i32) -> u32 {
+pub fn lsx_vpickve2gr_wu<const IMM2: u32>(a: m128i) -> u32 {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lsx_vpickve2gr_wu(a, IMM2) }
+    unsafe { transmute(__lsx_vpickve2gr_wu(transmute(a), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpickve2gr_du<const IMM1: u32>(a: v2i64) -> u64 {
+pub fn lsx_vpickve2gr_du<const IMM1: u32>(a: m128i) -> u64 {
     static_assert_uimm_bits!(IMM1, 1);
-    unsafe { __lsx_vpickve2gr_du(a, IMM1) }
+    unsafe { transmute(__lsx_vpickve2gr_du(transmute(a), IMM1)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vinsgr2vr_b<const IMM4: u32>(a: v16i8, b: i32) -> v16i8 {
+pub fn lsx_vinsgr2vr_b<const IMM4: u32>(a: m128i, b: i32) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vinsgr2vr_b(a, b, IMM4) }
+    unsafe { transmute(__lsx_vinsgr2vr_b(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vinsgr2vr_h<const IMM3: u32>(a: v8i16, b: i32) -> v8i16 {
+pub fn lsx_vinsgr2vr_h<const IMM3: u32>(a: m128i, b: i32) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vinsgr2vr_h(a, b, IMM3) }
+    unsafe { transmute(__lsx_vinsgr2vr_h(transmute(a), transmute(b), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vinsgr2vr_w<const IMM2: u32>(a: v4i32, b: i32) -> v4i32 {
+pub fn lsx_vinsgr2vr_w<const IMM2: u32>(a: m128i, b: i32) -> m128i {
     static_assert_uimm_bits!(IMM2, 2);
-    unsafe { __lsx_vinsgr2vr_w(a, b, IMM2) }
+    unsafe { transmute(__lsx_vinsgr2vr_w(transmute(a), transmute(b), IMM2)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vinsgr2vr_d<const IMM1: u32>(a: v2i64, b: i64) -> v2i64 {
+pub fn lsx_vinsgr2vr_d<const IMM1: u32>(a: m128i, b: i64) -> m128i {
     static_assert_uimm_bits!(IMM1, 1);
-    unsafe { __lsx_vinsgr2vr_d(a, b, IMM1) }
+    unsafe { transmute(__lsx_vinsgr2vr_d(transmute(a), transmute(b), IMM1)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfadd_s(a: v4f32, b: v4f32) -> v4f32 {
-    unsafe { __lsx_vfadd_s(a, b) }
+pub fn lsx_vfadd_s(a: m128, b: m128) -> m128 {
+    unsafe { transmute(__lsx_vfadd_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfadd_d(a: v2f64, b: v2f64) -> v2f64 {
-    unsafe { __lsx_vfadd_d(a, b) }
+pub fn lsx_vfadd_d(a: m128d, b: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfadd_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfsub_s(a: v4f32, b: v4f32) -> v4f32 {
-    unsafe { __lsx_vfsub_s(a, b) }
+pub fn lsx_vfsub_s(a: m128, b: m128) -> m128 {
+    unsafe { transmute(__lsx_vfsub_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfsub_d(a: v2f64, b: v2f64) -> v2f64 {
-    unsafe { __lsx_vfsub_d(a, b) }
+pub fn lsx_vfsub_d(a: m128d, b: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfsub_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmul_s(a: v4f32, b: v4f32) -> v4f32 {
-    unsafe { __lsx_vfmul_s(a, b) }
+pub fn lsx_vfmul_s(a: m128, b: m128) -> m128 {
+    unsafe { transmute(__lsx_vfmul_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmul_d(a: v2f64, b: v2f64) -> v2f64 {
-    unsafe { __lsx_vfmul_d(a, b) }
+pub fn lsx_vfmul_d(a: m128d, b: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfmul_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfdiv_s(a: v4f32, b: v4f32) -> v4f32 {
-    unsafe { __lsx_vfdiv_s(a, b) }
+pub fn lsx_vfdiv_s(a: m128, b: m128) -> m128 {
+    unsafe { transmute(__lsx_vfdiv_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfdiv_d(a: v2f64, b: v2f64) -> v2f64 {
-    unsafe { __lsx_vfdiv_d(a, b) }
+pub fn lsx_vfdiv_d(a: m128d, b: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfdiv_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcvt_h_s(a: v4f32, b: v4f32) -> v8i16 {
-    unsafe { __lsx_vfcvt_h_s(a, b) }
+pub fn lsx_vfcvt_h_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcvt_h_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcvt_s_d(a: v2f64, b: v2f64) -> v4f32 {
-    unsafe { __lsx_vfcvt_s_d(a, b) }
+pub fn lsx_vfcvt_s_d(a: m128d, b: m128d) -> m128 {
+    unsafe { transmute(__lsx_vfcvt_s_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmin_s(a: v4f32, b: v4f32) -> v4f32 {
-    unsafe { __lsx_vfmin_s(a, b) }
+pub fn lsx_vfmin_s(a: m128, b: m128) -> m128 {
+    unsafe { transmute(__lsx_vfmin_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmin_d(a: v2f64, b: v2f64) -> v2f64 {
-    unsafe { __lsx_vfmin_d(a, b) }
+pub fn lsx_vfmin_d(a: m128d, b: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfmin_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmina_s(a: v4f32, b: v4f32) -> v4f32 {
-    unsafe { __lsx_vfmina_s(a, b) }
+pub fn lsx_vfmina_s(a: m128, b: m128) -> m128 {
+    unsafe { transmute(__lsx_vfmina_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmina_d(a: v2f64, b: v2f64) -> v2f64 {
-    unsafe { __lsx_vfmina_d(a, b) }
+pub fn lsx_vfmina_d(a: m128d, b: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfmina_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmax_s(a: v4f32, b: v4f32) -> v4f32 {
-    unsafe { __lsx_vfmax_s(a, b) }
+pub fn lsx_vfmax_s(a: m128, b: m128) -> m128 {
+    unsafe { transmute(__lsx_vfmax_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmax_d(a: v2f64, b: v2f64) -> v2f64 {
-    unsafe { __lsx_vfmax_d(a, b) }
+pub fn lsx_vfmax_d(a: m128d, b: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfmax_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmaxa_s(a: v4f32, b: v4f32) -> v4f32 {
-    unsafe { __lsx_vfmaxa_s(a, b) }
+pub fn lsx_vfmaxa_s(a: m128, b: m128) -> m128 {
+    unsafe { transmute(__lsx_vfmaxa_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmaxa_d(a: v2f64, b: v2f64) -> v2f64 {
-    unsafe { __lsx_vfmaxa_d(a, b) }
+pub fn lsx_vfmaxa_d(a: m128d, b: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfmaxa_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfclass_s(a: v4f32) -> v4i32 {
-    unsafe { __lsx_vfclass_s(a) }
+pub fn lsx_vfclass_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vfclass_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfclass_d(a: v2f64) -> v2i64 {
-    unsafe { __lsx_vfclass_d(a) }
+pub fn lsx_vfclass_d(a: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfclass_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfsqrt_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfsqrt_s(a) }
+pub fn lsx_vfsqrt_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfsqrt_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfsqrt_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfsqrt_d(a) }
+pub fn lsx_vfsqrt_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfsqrt_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrecip_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfrecip_s(a) }
+pub fn lsx_vfrecip_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfrecip_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrecip_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfrecip_d(a) }
+pub fn lsx_vfrecip_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfrecip_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx,frecipe")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrecipe_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfrecipe_s(a) }
+pub fn lsx_vfrecipe_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfrecipe_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx,frecipe")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrecipe_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfrecipe_d(a) }
+pub fn lsx_vfrecipe_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfrecipe_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx,frecipe")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrsqrte_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfrsqrte_s(a) }
+pub fn lsx_vfrsqrte_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfrsqrte_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx,frecipe")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrsqrte_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfrsqrte_d(a) }
+pub fn lsx_vfrsqrte_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfrsqrte_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrint_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfrint_s(a) }
+pub fn lsx_vfrint_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfrint_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrint_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfrint_d(a) }
+pub fn lsx_vfrint_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfrint_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrsqrt_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfrsqrt_s(a) }
+pub fn lsx_vfrsqrt_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfrsqrt_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrsqrt_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfrsqrt_d(a) }
+pub fn lsx_vfrsqrt_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfrsqrt_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vflogb_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vflogb_s(a) }
+pub fn lsx_vflogb_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vflogb_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vflogb_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vflogb_d(a) }
+pub fn lsx_vflogb_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vflogb_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcvth_s_h(a: v8i16) -> v4f32 {
-    unsafe { __lsx_vfcvth_s_h(a) }
+pub fn lsx_vfcvth_s_h(a: m128i) -> m128 {
+    unsafe { transmute(__lsx_vfcvth_s_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcvth_d_s(a: v4f32) -> v2f64 {
-    unsafe { __lsx_vfcvth_d_s(a) }
+pub fn lsx_vfcvth_d_s(a: m128) -> m128d {
+    unsafe { transmute(__lsx_vfcvth_d_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcvtl_s_h(a: v8i16) -> v4f32 {
-    unsafe { __lsx_vfcvtl_s_h(a) }
+pub fn lsx_vfcvtl_s_h(a: m128i) -> m128 {
+    unsafe { transmute(__lsx_vfcvtl_s_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcvtl_d_s(a: v4f32) -> v2f64 {
-    unsafe { __lsx_vfcvtl_d_s(a) }
+pub fn lsx_vfcvtl_d_s(a: m128) -> m128d {
+    unsafe { transmute(__lsx_vfcvtl_d_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftint_w_s(a: v4f32) -> v4i32 {
-    unsafe { __lsx_vftint_w_s(a) }
+pub fn lsx_vftint_w_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftint_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftint_l_d(a: v2f64) -> v2i64 {
-    unsafe { __lsx_vftint_l_d(a) }
+pub fn lsx_vftint_l_d(a: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftint_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftint_wu_s(a: v4f32) -> v4u32 {
-    unsafe { __lsx_vftint_wu_s(a) }
+pub fn lsx_vftint_wu_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftint_wu_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftint_lu_d(a: v2f64) -> v2u64 {
-    unsafe { __lsx_vftint_lu_d(a) }
+pub fn lsx_vftint_lu_d(a: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftint_lu_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrz_w_s(a: v4f32) -> v4i32 {
-    unsafe { __lsx_vftintrz_w_s(a) }
+pub fn lsx_vftintrz_w_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrz_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrz_l_d(a: v2f64) -> v2i64 {
-    unsafe { __lsx_vftintrz_l_d(a) }
+pub fn lsx_vftintrz_l_d(a: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftintrz_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrz_wu_s(a: v4f32) -> v4u32 {
-    unsafe { __lsx_vftintrz_wu_s(a) }
+pub fn lsx_vftintrz_wu_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrz_wu_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrz_lu_d(a: v2f64) -> v2u64 {
-    unsafe { __lsx_vftintrz_lu_d(a) }
+pub fn lsx_vftintrz_lu_d(a: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftintrz_lu_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vffint_s_w(a: v4i32) -> v4f32 {
-    unsafe { __lsx_vffint_s_w(a) }
+pub fn lsx_vffint_s_w(a: m128i) -> m128 {
+    unsafe { transmute(__lsx_vffint_s_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vffint_d_l(a: v2i64) -> v2f64 {
-    unsafe { __lsx_vffint_d_l(a) }
+pub fn lsx_vffint_d_l(a: m128i) -> m128d {
+    unsafe { transmute(__lsx_vffint_d_l(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vffint_s_wu(a: v4u32) -> v4f32 {
-    unsafe { __lsx_vffint_s_wu(a) }
+pub fn lsx_vffint_s_wu(a: m128i) -> m128 {
+    unsafe { transmute(__lsx_vffint_s_wu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vffint_d_lu(a: v2u64) -> v2f64 {
-    unsafe { __lsx_vffint_d_lu(a) }
+pub fn lsx_vffint_d_lu(a: m128i) -> m128d {
+    unsafe { transmute(__lsx_vffint_d_lu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vandn_v(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vandn_v(a, b) }
+pub fn lsx_vandn_v(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vandn_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vneg_b(a: v16i8) -> v16i8 {
-    unsafe { __lsx_vneg_b(a) }
+pub fn lsx_vneg_b(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vneg_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vneg_h(a: v8i16) -> v8i16 {
-    unsafe { __lsx_vneg_h(a) }
+pub fn lsx_vneg_h(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vneg_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vneg_w(a: v4i32) -> v4i32 {
-    unsafe { __lsx_vneg_w(a) }
+pub fn lsx_vneg_w(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vneg_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vneg_d(a: v2i64) -> v2i64 {
-    unsafe { __lsx_vneg_d(a) }
+pub fn lsx_vneg_d(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vneg_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmuh_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vmuh_b(a, b) }
+pub fn lsx_vmuh_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmuh_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmuh_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vmuh_h(a, b) }
+pub fn lsx_vmuh_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmuh_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmuh_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vmuh_w(a, b) }
+pub fn lsx_vmuh_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmuh_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmuh_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vmuh_d(a, b) }
+pub fn lsx_vmuh_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmuh_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmuh_bu(a: v16u8, b: v16u8) -> v16u8 {
-    unsafe { __lsx_vmuh_bu(a, b) }
+pub fn lsx_vmuh_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmuh_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmuh_hu(a: v8u16, b: v8u16) -> v8u16 {
-    unsafe { __lsx_vmuh_hu(a, b) }
+pub fn lsx_vmuh_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmuh_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmuh_wu(a: v4u32, b: v4u32) -> v4u32 {
-    unsafe { __lsx_vmuh_wu(a, b) }
+pub fn lsx_vmuh_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmuh_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmuh_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vmuh_du(a, b) }
+pub fn lsx_vmuh_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmuh_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsllwil_h_b<const IMM3: u32>(a: v16i8) -> v8i16 {
+pub fn lsx_vsllwil_h_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vsllwil_h_b(a, IMM3) }
+    unsafe { transmute(__lsx_vsllwil_h_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsllwil_w_h<const IMM4: u32>(a: v8i16) -> v4i32 {
+pub fn lsx_vsllwil_w_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsllwil_w_h(a, IMM4) }
+    unsafe { transmute(__lsx_vsllwil_w_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsllwil_d_w<const IMM5: u32>(a: v4i32) -> v2i64 {
+pub fn lsx_vsllwil_d_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsllwil_d_w(a, IMM5) }
+    unsafe { transmute(__lsx_vsllwil_d_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsllwil_hu_bu<const IMM3: u32>(a: v16u8) -> v8u16 {
+pub fn lsx_vsllwil_hu_bu<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vsllwil_hu_bu(a, IMM3) }
+    unsafe { transmute(__lsx_vsllwil_hu_bu(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsllwil_wu_hu<const IMM4: u32>(a: v8u16) -> v4u32 {
+pub fn lsx_vsllwil_wu_hu<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsllwil_wu_hu(a, IMM4) }
+    unsafe { transmute(__lsx_vsllwil_wu_hu(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsllwil_du_wu<const IMM5: u32>(a: v4u32) -> v2u64 {
+pub fn lsx_vsllwil_du_wu<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsllwil_du_wu(a, IMM5) }
+    unsafe { transmute(__lsx_vsllwil_du_wu(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsran_b_h(a: v8i16, b: v8i16) -> v16i8 {
-    unsafe { __lsx_vsran_b_h(a, b) }
+pub fn lsx_vsran_b_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsran_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsran_h_w(a: v4i32, b: v4i32) -> v8i16 {
-    unsafe { __lsx_vsran_h_w(a, b) }
+pub fn lsx_vsran_h_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsran_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsran_w_d(a: v2i64, b: v2i64) -> v4i32 {
-    unsafe { __lsx_vsran_w_d(a, b) }
+pub fn lsx_vsran_w_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsran_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssran_b_h(a: v8i16, b: v8i16) -> v16i8 {
-    unsafe { __lsx_vssran_b_h(a, b) }
+pub fn lsx_vssran_b_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssran_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssran_h_w(a: v4i32, b: v4i32) -> v8i16 {
-    unsafe { __lsx_vssran_h_w(a, b) }
+pub fn lsx_vssran_h_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssran_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssran_w_d(a: v2i64, b: v2i64) -> v4i32 {
-    unsafe { __lsx_vssran_w_d(a, b) }
+pub fn lsx_vssran_w_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssran_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssran_bu_h(a: v8u16, b: v8u16) -> v16u8 {
-    unsafe { __lsx_vssran_bu_h(a, b) }
+pub fn lsx_vssran_bu_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssran_bu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssran_hu_w(a: v4u32, b: v4u32) -> v8u16 {
-    unsafe { __lsx_vssran_hu_w(a, b) }
+pub fn lsx_vssran_hu_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssran_hu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssran_wu_d(a: v2u64, b: v2u64) -> v4u32 {
-    unsafe { __lsx_vssran_wu_d(a, b) }
+pub fn lsx_vssran_wu_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssran_wu_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrarn_b_h(a: v8i16, b: v8i16) -> v16i8 {
-    unsafe { __lsx_vsrarn_b_h(a, b) }
+pub fn lsx_vsrarn_b_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrarn_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrarn_h_w(a: v4i32, b: v4i32) -> v8i16 {
-    unsafe { __lsx_vsrarn_h_w(a, b) }
+pub fn lsx_vsrarn_h_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrarn_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrarn_w_d(a: v2i64, b: v2i64) -> v4i32 {
-    unsafe { __lsx_vsrarn_w_d(a, b) }
+pub fn lsx_vsrarn_w_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrarn_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarn_b_h(a: v8i16, b: v8i16) -> v16i8 {
-    unsafe { __lsx_vssrarn_b_h(a, b) }
+pub fn lsx_vssrarn_b_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrarn_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarn_h_w(a: v4i32, b: v4i32) -> v8i16 {
-    unsafe { __lsx_vssrarn_h_w(a, b) }
+pub fn lsx_vssrarn_h_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrarn_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarn_w_d(a: v2i64, b: v2i64) -> v4i32 {
-    unsafe { __lsx_vssrarn_w_d(a, b) }
+pub fn lsx_vssrarn_w_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrarn_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarn_bu_h(a: v8u16, b: v8u16) -> v16u8 {
-    unsafe { __lsx_vssrarn_bu_h(a, b) }
+pub fn lsx_vssrarn_bu_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrarn_bu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarn_hu_w(a: v4u32, b: v4u32) -> v8u16 {
-    unsafe { __lsx_vssrarn_hu_w(a, b) }
+pub fn lsx_vssrarn_hu_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrarn_hu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarn_wu_d(a: v2u64, b: v2u64) -> v4u32 {
-    unsafe { __lsx_vssrarn_wu_d(a, b) }
+pub fn lsx_vssrarn_wu_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrarn_wu_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrln_b_h(a: v8i16, b: v8i16) -> v16i8 {
-    unsafe { __lsx_vsrln_b_h(a, b) }
+pub fn lsx_vsrln_b_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrln_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrln_h_w(a: v4i32, b: v4i32) -> v8i16 {
-    unsafe { __lsx_vsrln_h_w(a, b) }
+pub fn lsx_vsrln_h_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrln_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrln_w_d(a: v2i64, b: v2i64) -> v4i32 {
-    unsafe { __lsx_vsrln_w_d(a, b) }
+pub fn lsx_vsrln_w_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrln_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrln_bu_h(a: v8u16, b: v8u16) -> v16u8 {
-    unsafe { __lsx_vssrln_bu_h(a, b) }
+pub fn lsx_vssrln_bu_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrln_bu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrln_hu_w(a: v4u32, b: v4u32) -> v8u16 {
-    unsafe { __lsx_vssrln_hu_w(a, b) }
+pub fn lsx_vssrln_hu_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrln_hu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrln_wu_d(a: v2u64, b: v2u64) -> v4u32 {
-    unsafe { __lsx_vssrln_wu_d(a, b) }
+pub fn lsx_vssrln_wu_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrln_wu_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlrn_b_h(a: v8i16, b: v8i16) -> v16i8 {
-    unsafe { __lsx_vsrlrn_b_h(a, b) }
+pub fn lsx_vsrlrn_b_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrlrn_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlrn_h_w(a: v4i32, b: v4i32) -> v8i16 {
-    unsafe { __lsx_vsrlrn_h_w(a, b) }
+pub fn lsx_vsrlrn_h_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrlrn_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlrn_w_d(a: v2i64, b: v2i64) -> v4i32 {
-    unsafe { __lsx_vsrlrn_w_d(a, b) }
+pub fn lsx_vsrlrn_w_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsrlrn_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrn_bu_h(a: v8u16, b: v8u16) -> v16u8 {
-    unsafe { __lsx_vssrlrn_bu_h(a, b) }
+pub fn lsx_vssrlrn_bu_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrlrn_bu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrn_hu_w(a: v4u32, b: v4u32) -> v8u16 {
-    unsafe { __lsx_vssrlrn_hu_w(a, b) }
+pub fn lsx_vssrlrn_hu_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrlrn_hu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrn_wu_d(a: v2u64, b: v2u64) -> v4u32 {
-    unsafe { __lsx_vssrlrn_wu_d(a, b) }
+pub fn lsx_vssrlrn_wu_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrlrn_wu_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrstpi_b<const IMM5: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vfrstpi_b<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vfrstpi_b(a, b, IMM5) }
+    unsafe { transmute(__lsx_vfrstpi_b(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrstpi_h<const IMM5: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vfrstpi_h<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vfrstpi_h(a, b, IMM5) }
+    unsafe { transmute(__lsx_vfrstpi_h(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrstp_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 {
-    unsafe { __lsx_vfrstp_b(a, b, c) }
+pub fn lsx_vfrstp_b(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vfrstp_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrstp_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 {
-    unsafe { __lsx_vfrstp_h(a, b, c) }
+pub fn lsx_vfrstp_h(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vfrstp_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vshuf4i_d<const IMM8: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vshuf4i_d<const IMM8: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vshuf4i_d(a, b, IMM8) }
+    unsafe { transmute(__lsx_vshuf4i_d(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbsrl_v<const IMM5: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vbsrl_v<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vbsrl_v(a, IMM5) }
+    unsafe { transmute(__lsx_vbsrl_v(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vbsll_v<const IMM5: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vbsll_v<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vbsll_v(a, IMM5) }
+    unsafe { transmute(__lsx_vbsll_v(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vextrins_b<const IMM8: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vextrins_b<const IMM8: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vextrins_b(a, b, IMM8) }
+    unsafe { transmute(__lsx_vextrins_b(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vextrins_h<const IMM8: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vextrins_h<const IMM8: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vextrins_h(a, b, IMM8) }
+    unsafe { transmute(__lsx_vextrins_h(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vextrins_w<const IMM8: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vextrins_w<const IMM8: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vextrins_w(a, b, IMM8) }
+    unsafe { transmute(__lsx_vextrins_w(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vextrins_d<const IMM8: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vextrins_d<const IMM8: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vextrins_d(a, b, IMM8) }
+    unsafe { transmute(__lsx_vextrins_d(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmskltz_b(a: v16i8) -> v16i8 {
-    unsafe { __lsx_vmskltz_b(a) }
+pub fn lsx_vmskltz_b(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmskltz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmskltz_h(a: v8i16) -> v8i16 {
-    unsafe { __lsx_vmskltz_h(a) }
+pub fn lsx_vmskltz_h(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmskltz_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmskltz_w(a: v4i32) -> v4i32 {
-    unsafe { __lsx_vmskltz_w(a) }
+pub fn lsx_vmskltz_w(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmskltz_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmskltz_d(a: v2i64) -> v2i64 {
-    unsafe { __lsx_vmskltz_d(a) }
+pub fn lsx_vmskltz_d(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmskltz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsigncov_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vsigncov_b(a, b) }
+pub fn lsx_vsigncov_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsigncov_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsigncov_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vsigncov_h(a, b) }
+pub fn lsx_vsigncov_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsigncov_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsigncov_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vsigncov_w(a, b) }
+pub fn lsx_vsigncov_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsigncov_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsigncov_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsigncov_d(a, b) }
+pub fn lsx_vsigncov_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsigncov_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmadd_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 {
-    unsafe { __lsx_vfmadd_s(a, b, c) }
+pub fn lsx_vfmadd_s(a: m128, b: m128, c: m128) -> m128 {
+    unsafe { transmute(__lsx_vfmadd_s(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 {
-    unsafe { __lsx_vfmadd_d(a, b, c) }
+pub fn lsx_vfmadd_d(a: m128d, b: m128d, c: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfmadd_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmsub_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 {
-    unsafe { __lsx_vfmsub_s(a, b, c) }
+pub fn lsx_vfmsub_s(a: m128, b: m128, c: m128) -> m128 {
+    unsafe { transmute(__lsx_vfmsub_s(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 {
-    unsafe { __lsx_vfmsub_d(a, b, c) }
+pub fn lsx_vfmsub_d(a: m128d, b: m128d, c: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfmsub_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfnmadd_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 {
-    unsafe { __lsx_vfnmadd_s(a, b, c) }
+pub fn lsx_vfnmadd_s(a: m128, b: m128, c: m128) -> m128 {
+    unsafe { transmute(__lsx_vfnmadd_s(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfnmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 {
-    unsafe { __lsx_vfnmadd_d(a, b, c) }
+pub fn lsx_vfnmadd_d(a: m128d, b: m128d, c: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfnmadd_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfnmsub_s(a: v4f32, b: v4f32, c: v4f32) -> v4f32 {
-    unsafe { __lsx_vfnmsub_s(a, b, c) }
+pub fn lsx_vfnmsub_s(a: m128, b: m128, c: m128) -> m128 {
+    unsafe { transmute(__lsx_vfnmsub_s(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfnmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 {
-    unsafe { __lsx_vfnmsub_d(a, b, c) }
+pub fn lsx_vfnmsub_d(a: m128d, b: m128d, c: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfnmsub_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrne_w_s(a: v4f32) -> v4i32 {
-    unsafe { __lsx_vftintrne_w_s(a) }
+pub fn lsx_vftintrne_w_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrne_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrne_l_d(a: v2f64) -> v2i64 {
-    unsafe { __lsx_vftintrne_l_d(a) }
+pub fn lsx_vftintrne_l_d(a: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftintrne_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrp_w_s(a: v4f32) -> v4i32 {
-    unsafe { __lsx_vftintrp_w_s(a) }
+pub fn lsx_vftintrp_w_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrp_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrp_l_d(a: v2f64) -> v2i64 {
-    unsafe { __lsx_vftintrp_l_d(a) }
+pub fn lsx_vftintrp_l_d(a: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftintrp_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrm_w_s(a: v4f32) -> v4i32 {
-    unsafe { __lsx_vftintrm_w_s(a) }
+pub fn lsx_vftintrm_w_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrm_w_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrm_l_d(a: v2f64) -> v2i64 {
-    unsafe { __lsx_vftintrm_l_d(a) }
+pub fn lsx_vftintrm_l_d(a: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftintrm_l_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftint_w_d(a: v2f64, b: v2f64) -> v4i32 {
-    unsafe { __lsx_vftint_w_d(a, b) }
+pub fn lsx_vftint_w_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftint_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vffint_s_l(a: v2i64, b: v2i64) -> v4f32 {
-    unsafe { __lsx_vffint_s_l(a, b) }
+pub fn lsx_vffint_s_l(a: m128i, b: m128i) -> m128 {
+    unsafe { transmute(__lsx_vffint_s_l(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrz_w_d(a: v2f64, b: v2f64) -> v4i32 {
-    unsafe { __lsx_vftintrz_w_d(a, b) }
+pub fn lsx_vftintrz_w_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftintrz_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrp_w_d(a: v2f64, b: v2f64) -> v4i32 {
-    unsafe { __lsx_vftintrp_w_d(a, b) }
+pub fn lsx_vftintrp_w_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftintrp_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrm_w_d(a: v2f64, b: v2f64) -> v4i32 {
-    unsafe { __lsx_vftintrm_w_d(a, b) }
+pub fn lsx_vftintrm_w_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftintrm_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrne_w_d(a: v2f64, b: v2f64) -> v4i32 {
-    unsafe { __lsx_vftintrne_w_d(a, b) }
+pub fn lsx_vftintrne_w_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vftintrne_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintl_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftintl_l_s(a) }
+pub fn lsx_vftintl_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintl_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftinth_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftinth_l_s(a) }
+pub fn lsx_vftinth_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftinth_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vffinth_d_w(a: v4i32) -> v2f64 {
-    unsafe { __lsx_vffinth_d_w(a) }
+pub fn lsx_vffinth_d_w(a: m128i) -> m128d {
+    unsafe { transmute(__lsx_vffinth_d_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vffintl_d_w(a: v4i32) -> v2f64 {
-    unsafe { __lsx_vffintl_d_w(a) }
+pub fn lsx_vffintl_d_w(a: m128i) -> m128d {
+    unsafe { transmute(__lsx_vffintl_d_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrzl_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftintrzl_l_s(a) }
+pub fn lsx_vftintrzl_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrzl_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrzh_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftintrzh_l_s(a) }
+pub fn lsx_vftintrzh_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrzh_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrpl_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftintrpl_l_s(a) }
+pub fn lsx_vftintrpl_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrpl_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrph_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftintrph_l_s(a) }
+pub fn lsx_vftintrph_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrph_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrml_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftintrml_l_s(a) }
+pub fn lsx_vftintrml_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrml_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrmh_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftintrmh_l_s(a) }
+pub fn lsx_vftintrmh_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrmh_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrnel_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftintrnel_l_s(a) }
+pub fn lsx_vftintrnel_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrnel_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vftintrneh_l_s(a: v4f32) -> v2i64 {
-    unsafe { __lsx_vftintrneh_l_s(a) }
+pub fn lsx_vftintrneh_l_s(a: m128) -> m128i {
+    unsafe { transmute(__lsx_vftintrneh_l_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrintrne_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfrintrne_s(a) }
+pub fn lsx_vfrintrne_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfrintrne_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrintrne_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfrintrne_d(a) }
+pub fn lsx_vfrintrne_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfrintrne_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrintrz_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfrintrz_s(a) }
+pub fn lsx_vfrintrz_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfrintrz_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrintrz_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfrintrz_d(a) }
+pub fn lsx_vfrintrz_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfrintrz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrintrp_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfrintrp_s(a) }
+pub fn lsx_vfrintrp_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfrintrp_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrintrp_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfrintrp_d(a) }
+pub fn lsx_vfrintrp_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfrintrp_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrintrm_s(a: v4f32) -> v4f32 {
-    unsafe { __lsx_vfrintrm_s(a) }
+pub fn lsx_vfrintrm_s(a: m128) -> m128 {
+    unsafe { transmute(__lsx_vfrintrm_s(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfrintrm_d(a: v2f64) -> v2f64 {
-    unsafe { __lsx_vfrintrm_d(a) }
+pub fn lsx_vfrintrm_d(a: m128d) -> m128d {
+    unsafe { transmute(__lsx_vfrintrm_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2, 3)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vstelm_b<const IMM_S8: i32, const IMM4: u32>(a: v16i8, mem_addr: *mut i8) {
+pub unsafe fn lsx_vstelm_b<const IMM_S8: i32, const IMM4: u32>(a: m128i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S8, 8);
     static_assert_uimm_bits!(IMM4, 4);
-    __lsx_vstelm_b(a, mem_addr, IMM_S8, IMM4)
+    transmute(__lsx_vstelm_b(transmute(a), mem_addr, IMM_S8, IMM4))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2, 3)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vstelm_h<const IMM_S8: i32, const IMM3: u32>(a: v8i16, mem_addr: *mut i8) {
+pub unsafe fn lsx_vstelm_h<const IMM_S8: i32, const IMM3: u32>(a: m128i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S8, 8);
     static_assert_uimm_bits!(IMM3, 3);
-    __lsx_vstelm_h(a, mem_addr, IMM_S8, IMM3)
+    transmute(__lsx_vstelm_h(transmute(a), mem_addr, IMM_S8, IMM3))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2, 3)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vstelm_w<const IMM_S8: i32, const IMM2: u32>(a: v4i32, mem_addr: *mut i8) {
+pub unsafe fn lsx_vstelm_w<const IMM_S8: i32, const IMM2: u32>(a: m128i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S8, 8);
     static_assert_uimm_bits!(IMM2, 2);
-    __lsx_vstelm_w(a, mem_addr, IMM_S8, IMM2)
+    transmute(__lsx_vstelm_w(transmute(a), mem_addr, IMM_S8, IMM2))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2, 3)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vstelm_d<const IMM_S8: i32, const IMM1: u32>(a: v2i64, mem_addr: *mut i8) {
+pub unsafe fn lsx_vstelm_d<const IMM_S8: i32, const IMM1: u32>(a: m128i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S8, 8);
     static_assert_uimm_bits!(IMM1, 1);
-    __lsx_vstelm_d(a, mem_addr, IMM_S8, IMM1)
+    transmute(__lsx_vstelm_d(transmute(a), mem_addr, IMM_S8, IMM1))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_d_w(a: v4i32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vaddwev_d_w(a, b) }
+pub fn lsx_vaddwev_d_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_w_h(a: v8i16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vaddwev_w_h(a, b) }
+pub fn lsx_vaddwev_w_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_h_b(a: v16i8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vaddwev_h_b(a, b) }
+pub fn lsx_vaddwev_h_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_d_w(a: v4i32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vaddwod_d_w(a, b) }
+pub fn lsx_vaddwod_d_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_w_h(a: v8i16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vaddwod_w_h(a, b) }
+pub fn lsx_vaddwod_w_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_h_b(a: v16i8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vaddwod_h_b(a, b) }
+pub fn lsx_vaddwod_h_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_d_wu(a: v4u32, b: v4u32) -> v2i64 {
-    unsafe { __lsx_vaddwev_d_wu(a, b) }
+pub fn lsx_vaddwev_d_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_w_hu(a: v8u16, b: v8u16) -> v4i32 {
-    unsafe { __lsx_vaddwev_w_hu(a, b) }
+pub fn lsx_vaddwev_w_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_h_bu(a: v16u8, b: v16u8) -> v8i16 {
-    unsafe { __lsx_vaddwev_h_bu(a, b) }
+pub fn lsx_vaddwev_h_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_d_wu(a: v4u32, b: v4u32) -> v2i64 {
-    unsafe { __lsx_vaddwod_d_wu(a, b) }
+pub fn lsx_vaddwod_d_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_w_hu(a: v8u16, b: v8u16) -> v4i32 {
-    unsafe { __lsx_vaddwod_w_hu(a, b) }
+pub fn lsx_vaddwod_w_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_h_bu(a: v16u8, b: v16u8) -> v8i16 {
-    unsafe { __lsx_vaddwod_h_bu(a, b) }
+pub fn lsx_vaddwod_h_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_d_wu_w(a: v4u32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vaddwev_d_wu_w(a, b) }
+pub fn lsx_vaddwev_d_wu_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_d_wu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_w_hu_h(a: v8u16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vaddwev_w_hu_h(a, b) }
+pub fn lsx_vaddwev_w_hu_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_w_hu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_h_bu_b(a: v16u8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vaddwev_h_bu_b(a, b) }
+pub fn lsx_vaddwev_h_bu_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_h_bu_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_d_wu_w(a: v4u32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vaddwod_d_wu_w(a, b) }
+pub fn lsx_vaddwod_d_wu_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_d_wu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_w_hu_h(a: v8u16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vaddwod_w_hu_h(a, b) }
+pub fn lsx_vaddwod_w_hu_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_w_hu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_h_bu_b(a: v16u8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vaddwod_h_bu_b(a, b) }
+pub fn lsx_vaddwod_h_bu_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_h_bu_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwev_d_w(a: v4i32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vsubwev_d_w(a, b) }
+pub fn lsx_vsubwev_d_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwev_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwev_w_h(a: v8i16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vsubwev_w_h(a, b) }
+pub fn lsx_vsubwev_w_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwev_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwev_h_b(a: v16i8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vsubwev_h_b(a, b) }
+pub fn lsx_vsubwev_h_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwev_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwod_d_w(a: v4i32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vsubwod_d_w(a, b) }
+pub fn lsx_vsubwod_d_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwod_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwod_w_h(a: v8i16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vsubwod_w_h(a, b) }
+pub fn lsx_vsubwod_w_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwod_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwod_h_b(a: v16i8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vsubwod_h_b(a, b) }
+pub fn lsx_vsubwod_h_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwod_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwev_d_wu(a: v4u32, b: v4u32) -> v2i64 {
-    unsafe { __lsx_vsubwev_d_wu(a, b) }
+pub fn lsx_vsubwev_d_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwev_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwev_w_hu(a: v8u16, b: v8u16) -> v4i32 {
-    unsafe { __lsx_vsubwev_w_hu(a, b) }
+pub fn lsx_vsubwev_w_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwev_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwev_h_bu(a: v16u8, b: v16u8) -> v8i16 {
-    unsafe { __lsx_vsubwev_h_bu(a, b) }
+pub fn lsx_vsubwev_h_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwev_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwod_d_wu(a: v4u32, b: v4u32) -> v2i64 {
-    unsafe { __lsx_vsubwod_d_wu(a, b) }
+pub fn lsx_vsubwod_d_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwod_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwod_w_hu(a: v8u16, b: v8u16) -> v4i32 {
-    unsafe { __lsx_vsubwod_w_hu(a, b) }
+pub fn lsx_vsubwod_w_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwod_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwod_h_bu(a: v16u8, b: v16u8) -> v8i16 {
-    unsafe { __lsx_vsubwod_h_bu(a, b) }
+pub fn lsx_vsubwod_h_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwod_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_q_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vaddwev_q_d(a, b) }
+pub fn lsx_vaddwev_q_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_q_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vaddwod_q_d(a, b) }
+pub fn lsx_vaddwod_q_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_q_du(a: v2u64, b: v2u64) -> v2i64 {
-    unsafe { __lsx_vaddwev_q_du(a, b) }
+pub fn lsx_vaddwev_q_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_q_du(a: v2u64, b: v2u64) -> v2i64 {
-    unsafe { __lsx_vaddwod_q_du(a, b) }
+pub fn lsx_vaddwod_q_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwev_q_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsubwev_q_d(a, b) }
+pub fn lsx_vsubwev_q_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwev_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwod_q_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsubwod_q_d(a, b) }
+pub fn lsx_vsubwod_q_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwod_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwev_q_du(a: v2u64, b: v2u64) -> v2i64 {
-    unsafe { __lsx_vsubwev_q_du(a, b) }
+pub fn lsx_vsubwev_q_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwev_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsubwod_q_du(a: v2u64, b: v2u64) -> v2i64 {
-    unsafe { __lsx_vsubwod_q_du(a, b) }
+pub fn lsx_vsubwod_q_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsubwod_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwev_q_du_d(a: v2u64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vaddwev_q_du_d(a, b) }
+pub fn lsx_vaddwev_q_du_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwev_q_du_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vaddwod_q_du_d(a: v2u64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vaddwod_q_du_d(a, b) }
+pub fn lsx_vaddwod_q_du_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vaddwod_q_du_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_d_w(a: v4i32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vmulwev_d_w(a, b) }
+pub fn lsx_vmulwev_d_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_w_h(a: v8i16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vmulwev_w_h(a, b) }
+pub fn lsx_vmulwev_w_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_h_b(a: v16i8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vmulwev_h_b(a, b) }
+pub fn lsx_vmulwev_h_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_d_w(a: v4i32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vmulwod_d_w(a, b) }
+pub fn lsx_vmulwod_d_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_d_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_w_h(a: v8i16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vmulwod_w_h(a, b) }
+pub fn lsx_vmulwod_w_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_w_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_h_b(a: v16i8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vmulwod_h_b(a, b) }
+pub fn lsx_vmulwod_h_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_h_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_d_wu(a: v4u32, b: v4u32) -> v2i64 {
-    unsafe { __lsx_vmulwev_d_wu(a, b) }
+pub fn lsx_vmulwev_d_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_w_hu(a: v8u16, b: v8u16) -> v4i32 {
-    unsafe { __lsx_vmulwev_w_hu(a, b) }
+pub fn lsx_vmulwev_w_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_h_bu(a: v16u8, b: v16u8) -> v8i16 {
-    unsafe { __lsx_vmulwev_h_bu(a, b) }
+pub fn lsx_vmulwev_h_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_d_wu(a: v4u32, b: v4u32) -> v2i64 {
-    unsafe { __lsx_vmulwod_d_wu(a, b) }
+pub fn lsx_vmulwod_d_wu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_d_wu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_w_hu(a: v8u16, b: v8u16) -> v4i32 {
-    unsafe { __lsx_vmulwod_w_hu(a, b) }
+pub fn lsx_vmulwod_w_hu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_w_hu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_h_bu(a: v16u8, b: v16u8) -> v8i16 {
-    unsafe { __lsx_vmulwod_h_bu(a, b) }
+pub fn lsx_vmulwod_h_bu(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_h_bu(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_d_wu_w(a: v4u32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vmulwev_d_wu_w(a, b) }
+pub fn lsx_vmulwev_d_wu_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_d_wu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_w_hu_h(a: v8u16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vmulwev_w_hu_h(a, b) }
+pub fn lsx_vmulwev_w_hu_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_w_hu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_h_bu_b(a: v16u8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vmulwev_h_bu_b(a, b) }
+pub fn lsx_vmulwev_h_bu_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_h_bu_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_d_wu_w(a: v4u32, b: v4i32) -> v2i64 {
-    unsafe { __lsx_vmulwod_d_wu_w(a, b) }
+pub fn lsx_vmulwod_d_wu_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_d_wu_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_w_hu_h(a: v8u16, b: v8i16) -> v4i32 {
-    unsafe { __lsx_vmulwod_w_hu_h(a, b) }
+pub fn lsx_vmulwod_w_hu_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_w_hu_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_h_bu_b(a: v16u8, b: v16i8) -> v8i16 {
-    unsafe { __lsx_vmulwod_h_bu_b(a, b) }
+pub fn lsx_vmulwod_h_bu_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_h_bu_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_q_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vmulwev_q_d(a, b) }
+pub fn lsx_vmulwev_q_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_q_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vmulwod_q_d(a, b) }
+pub fn lsx_vmulwod_q_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_q_du(a: v2u64, b: v2u64) -> v2i64 {
-    unsafe { __lsx_vmulwev_q_du(a, b) }
+pub fn lsx_vmulwev_q_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_q_du(a: v2u64, b: v2u64) -> v2i64 {
-    unsafe { __lsx_vmulwod_q_du(a, b) }
+pub fn lsx_vmulwod_q_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_q_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwev_q_du_d(a: v2u64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vmulwev_q_du_d(a, b) }
+pub fn lsx_vmulwev_q_du_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwev_q_du_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmulwod_q_du_d(a: v2u64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vmulwod_q_du_d(a, b) }
+pub fn lsx_vmulwod_q_du_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmulwod_q_du_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhaddw_q_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vhaddw_q_d(a, b) }
+pub fn lsx_vhaddw_q_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhaddw_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhaddw_qu_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vhaddw_qu_du(a, b) }
+pub fn lsx_vhaddw_qu_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhaddw_qu_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhsubw_q_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vhsubw_q_d(a, b) }
+pub fn lsx_vhsubw_q_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhsubw_q_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vhsubw_qu_du(a: v2u64, b: v2u64) -> v2u64 {
-    unsafe { __lsx_vhsubw_qu_du(a, b) }
+pub fn lsx_vhsubw_qu_du(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vhsubw_qu_du(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_d_w(a: v2i64, b: v4i32, c: v4i32) -> v2i64 {
-    unsafe { __lsx_vmaddwev_d_w(a, b, c) }
+pub fn lsx_vmaddwev_d_w(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_d_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_w_h(a: v4i32, b: v8i16, c: v8i16) -> v4i32 {
-    unsafe { __lsx_vmaddwev_w_h(a, b, c) }
+pub fn lsx_vmaddwev_w_h(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_w_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_h_b(a: v8i16, b: v16i8, c: v16i8) -> v8i16 {
-    unsafe { __lsx_vmaddwev_h_b(a, b, c) }
+pub fn lsx_vmaddwev_h_b(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_h_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_d_wu(a: v2u64, b: v4u32, c: v4u32) -> v2u64 {
-    unsafe { __lsx_vmaddwev_d_wu(a, b, c) }
+pub fn lsx_vmaddwev_d_wu(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_d_wu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_w_hu(a: v4u32, b: v8u16, c: v8u16) -> v4u32 {
-    unsafe { __lsx_vmaddwev_w_hu(a, b, c) }
+pub fn lsx_vmaddwev_w_hu(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_w_hu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_h_bu(a: v8u16, b: v16u8, c: v16u8) -> v8u16 {
-    unsafe { __lsx_vmaddwev_h_bu(a, b, c) }
+pub fn lsx_vmaddwev_h_bu(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_h_bu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_d_w(a: v2i64, b: v4i32, c: v4i32) -> v2i64 {
-    unsafe { __lsx_vmaddwod_d_w(a, b, c) }
+pub fn lsx_vmaddwod_d_w(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_d_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_w_h(a: v4i32, b: v8i16, c: v8i16) -> v4i32 {
-    unsafe { __lsx_vmaddwod_w_h(a, b, c) }
+pub fn lsx_vmaddwod_w_h(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_w_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_h_b(a: v8i16, b: v16i8, c: v16i8) -> v8i16 {
-    unsafe { __lsx_vmaddwod_h_b(a, b, c) }
+pub fn lsx_vmaddwod_h_b(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_h_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_d_wu(a: v2u64, b: v4u32, c: v4u32) -> v2u64 {
-    unsafe { __lsx_vmaddwod_d_wu(a, b, c) }
+pub fn lsx_vmaddwod_d_wu(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_d_wu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_w_hu(a: v4u32, b: v8u16, c: v8u16) -> v4u32 {
-    unsafe { __lsx_vmaddwod_w_hu(a, b, c) }
+pub fn lsx_vmaddwod_w_hu(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_w_hu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_h_bu(a: v8u16, b: v16u8, c: v16u8) -> v8u16 {
-    unsafe { __lsx_vmaddwod_h_bu(a, b, c) }
+pub fn lsx_vmaddwod_h_bu(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_h_bu(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_d_wu_w(a: v2i64, b: v4u32, c: v4i32) -> v2i64 {
-    unsafe { __lsx_vmaddwev_d_wu_w(a, b, c) }
+pub fn lsx_vmaddwev_d_wu_w(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_d_wu_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_w_hu_h(a: v4i32, b: v8u16, c: v8i16) -> v4i32 {
-    unsafe { __lsx_vmaddwev_w_hu_h(a, b, c) }
+pub fn lsx_vmaddwev_w_hu_h(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_w_hu_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_h_bu_b(a: v8i16, b: v16u8, c: v16i8) -> v8i16 {
-    unsafe { __lsx_vmaddwev_h_bu_b(a, b, c) }
+pub fn lsx_vmaddwev_h_bu_b(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_h_bu_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_d_wu_w(a: v2i64, b: v4u32, c: v4i32) -> v2i64 {
-    unsafe { __lsx_vmaddwod_d_wu_w(a, b, c) }
+pub fn lsx_vmaddwod_d_wu_w(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_d_wu_w(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_w_hu_h(a: v4i32, b: v8u16, c: v8i16) -> v4i32 {
-    unsafe { __lsx_vmaddwod_w_hu_h(a, b, c) }
+pub fn lsx_vmaddwod_w_hu_h(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_w_hu_h(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_h_bu_b(a: v8i16, b: v16u8, c: v16i8) -> v8i16 {
-    unsafe { __lsx_vmaddwod_h_bu_b(a, b, c) }
+pub fn lsx_vmaddwod_h_bu_b(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_h_bu_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_q_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 {
-    unsafe { __lsx_vmaddwev_q_d(a, b, c) }
+pub fn lsx_vmaddwev_q_d(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_q_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_q_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 {
-    unsafe { __lsx_vmaddwod_q_d(a, b, c) }
+pub fn lsx_vmaddwod_q_d(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_q_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_q_du(a: v2u64, b: v2u64, c: v2u64) -> v2u64 {
-    unsafe { __lsx_vmaddwev_q_du(a, b, c) }
+pub fn lsx_vmaddwev_q_du(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_q_du(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_q_du(a: v2u64, b: v2u64, c: v2u64) -> v2u64 {
-    unsafe { __lsx_vmaddwod_q_du(a, b, c) }
+pub fn lsx_vmaddwod_q_du(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_q_du(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwev_q_du_d(a: v2i64, b: v2u64, c: v2i64) -> v2i64 {
-    unsafe { __lsx_vmaddwev_q_du_d(a, b, c) }
+pub fn lsx_vmaddwev_q_du_d(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwev_q_du_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmaddwod_q_du_d(a: v2i64, b: v2u64, c: v2i64) -> v2i64 {
-    unsafe { __lsx_vmaddwod_q_du_d(a, b, c) }
+pub fn lsx_vmaddwod_q_du_d(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmaddwod_q_du_d(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrotr_b(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vrotr_b(a, b) }
+pub fn lsx_vrotr_b(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vrotr_b(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrotr_h(a: v8i16, b: v8i16) -> v8i16 {
-    unsafe { __lsx_vrotr_h(a, b) }
+pub fn lsx_vrotr_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vrotr_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrotr_w(a: v4i32, b: v4i32) -> v4i32 {
-    unsafe { __lsx_vrotr_w(a, b) }
+pub fn lsx_vrotr_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vrotr_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrotr_d(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vrotr_d(a, b) }
+pub fn lsx_vrotr_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vrotr_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vadd_q(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vadd_q(a, b) }
+pub fn lsx_vadd_q(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vadd_q(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsub_q(a: v2i64, b: v2i64) -> v2i64 {
-    unsafe { __lsx_vsub_q(a, b) }
+pub fn lsx_vsub_q(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vsub_q(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vldrepl_b<const IMM_S12: i32>(mem_addr: *const i8) -> v16i8 {
+pub unsafe fn lsx_vldrepl_b<const IMM_S12: i32>(mem_addr: *const i8) -> m128i {
     static_assert_simm_bits!(IMM_S12, 12);
-    __lsx_vldrepl_b(mem_addr, IMM_S12)
+    transmute(__lsx_vldrepl_b(mem_addr, IMM_S12))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vldrepl_h<const IMM_S11: i32>(mem_addr: *const i8) -> v8i16 {
+pub unsafe fn lsx_vldrepl_h<const IMM_S11: i32>(mem_addr: *const i8) -> m128i {
     static_assert_simm_bits!(IMM_S11, 11);
-    __lsx_vldrepl_h(mem_addr, IMM_S11)
+    transmute(__lsx_vldrepl_h(mem_addr, IMM_S11))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vldrepl_w<const IMM_S10: i32>(mem_addr: *const i8) -> v4i32 {
+pub unsafe fn lsx_vldrepl_w<const IMM_S10: i32>(mem_addr: *const i8) -> m128i {
     static_assert_simm_bits!(IMM_S10, 10);
-    __lsx_vldrepl_w(mem_addr, IMM_S10)
+    transmute(__lsx_vldrepl_w(mem_addr, IMM_S10))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vldrepl_d<const IMM_S9: i32>(mem_addr: *const i8) -> v2i64 {
+pub unsafe fn lsx_vldrepl_d<const IMM_S9: i32>(mem_addr: *const i8) -> m128i {
     static_assert_simm_bits!(IMM_S9, 9);
-    __lsx_vldrepl_d(mem_addr, IMM_S9)
+    transmute(__lsx_vldrepl_d(mem_addr, IMM_S9))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmskgez_b(a: v16i8) -> v16i8 {
-    unsafe { __lsx_vmskgez_b(a) }
+pub fn lsx_vmskgez_b(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmskgez_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vmsknz_b(a: v16i8) -> v16i8 {
-    unsafe { __lsx_vmsknz_b(a) }
+pub fn lsx_vmsknz_b(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vmsknz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vexth_h_b(a: v16i8) -> v8i16 {
-    unsafe { __lsx_vexth_h_b(a) }
+pub fn lsx_vexth_h_b(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vexth_h_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vexth_w_h(a: v8i16) -> v4i32 {
-    unsafe { __lsx_vexth_w_h(a) }
+pub fn lsx_vexth_w_h(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vexth_w_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vexth_d_w(a: v4i32) -> v2i64 {
-    unsafe { __lsx_vexth_d_w(a) }
+pub fn lsx_vexth_d_w(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vexth_d_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vexth_q_d(a: v2i64) -> v2i64 {
-    unsafe { __lsx_vexth_q_d(a) }
+pub fn lsx_vexth_q_d(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vexth_q_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vexth_hu_bu(a: v16u8) -> v8u16 {
-    unsafe { __lsx_vexth_hu_bu(a) }
+pub fn lsx_vexth_hu_bu(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vexth_hu_bu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vexth_wu_hu(a: v8u16) -> v4u32 {
-    unsafe { __lsx_vexth_wu_hu(a) }
+pub fn lsx_vexth_wu_hu(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vexth_wu_hu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vexth_du_wu(a: v4u32) -> v2u64 {
-    unsafe { __lsx_vexth_du_wu(a) }
+pub fn lsx_vexth_du_wu(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vexth_du_wu(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vexth_qu_du(a: v2u64) -> v2u64 {
-    unsafe { __lsx_vexth_qu_du(a) }
+pub fn lsx_vexth_qu_du(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vexth_qu_du(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrotri_b<const IMM3: u32>(a: v16i8) -> v16i8 {
+pub fn lsx_vrotri_b<const IMM3: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM3, 3);
-    unsafe { __lsx_vrotri_b(a, IMM3) }
+    unsafe { transmute(__lsx_vrotri_b(transmute(a), IMM3)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrotri_h<const IMM4: u32>(a: v8i16) -> v8i16 {
+pub fn lsx_vrotri_h<const IMM4: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vrotri_h(a, IMM4) }
+    unsafe { transmute(__lsx_vrotri_h(transmute(a), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrotri_w<const IMM5: u32>(a: v4i32) -> v4i32 {
+pub fn lsx_vrotri_w<const IMM5: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vrotri_w(a, IMM5) }
+    unsafe { transmute(__lsx_vrotri_w(transmute(a), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrotri_d<const IMM6: u32>(a: v2i64) -> v2i64 {
+pub fn lsx_vrotri_d<const IMM6: u32>(a: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vrotri_d(a, IMM6) }
+    unsafe { transmute(__lsx_vrotri_d(transmute(a), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vextl_q_d(a: v2i64) -> v2i64 {
-    unsafe { __lsx_vextl_q_d(a) }
+pub fn lsx_vextl_q_d(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vextl_q_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlni_b_h<const IMM4: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vsrlni_b_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsrlni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vsrlni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlni_h_w<const IMM5: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vsrlni_h_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsrlni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vsrlni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlni_w_d<const IMM6: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vsrlni_w_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsrlni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vsrlni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlni_d_q<const IMM7: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vsrlni_d_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vsrlni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vsrlni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlrni_b_h<const IMM4: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vsrlrni_b_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsrlrni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vsrlrni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlrni_h_w<const IMM5: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vsrlrni_h_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsrlrni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vsrlrni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlrni_w_d<const IMM6: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vsrlrni_w_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsrlrni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vsrlrni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrlrni_d_q<const IMM7: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vsrlrni_d_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vsrlrni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vsrlrni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlni_b_h<const IMM4: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vssrlni_b_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vssrlni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vssrlni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlni_h_w<const IMM5: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vssrlni_h_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vssrlni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vssrlni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlni_w_d<const IMM6: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vssrlni_w_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vssrlni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vssrlni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlni_d_q<const IMM7: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vssrlni_d_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vssrlni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vssrlni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlni_bu_h<const IMM4: u32>(a: v16u8, b: v16i8) -> v16u8 {
+pub fn lsx_vssrlni_bu_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vssrlni_bu_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vssrlni_bu_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlni_hu_w<const IMM5: u32>(a: v8u16, b: v8i16) -> v8u16 {
+pub fn lsx_vssrlni_hu_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vssrlni_hu_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vssrlni_hu_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlni_wu_d<const IMM6: u32>(a: v4u32, b: v4i32) -> v4u32 {
+pub fn lsx_vssrlni_wu_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vssrlni_wu_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vssrlni_wu_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlni_du_q<const IMM7: u32>(a: v2u64, b: v2i64) -> v2u64 {
+pub fn lsx_vssrlni_du_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vssrlni_du_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vssrlni_du_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrni_b_h<const IMM4: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vssrlrni_b_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vssrlrni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vssrlrni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrni_h_w<const IMM5: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vssrlrni_h_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vssrlrni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vssrlrni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrni_w_d<const IMM6: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vssrlrni_w_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vssrlrni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vssrlrni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrni_d_q<const IMM7: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vssrlrni_d_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vssrlrni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vssrlrni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrni_bu_h<const IMM4: u32>(a: v16u8, b: v16i8) -> v16u8 {
+pub fn lsx_vssrlrni_bu_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vssrlrni_bu_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vssrlrni_bu_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrni_hu_w<const IMM5: u32>(a: v8u16, b: v8i16) -> v8u16 {
+pub fn lsx_vssrlrni_hu_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vssrlrni_hu_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vssrlrni_hu_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrni_wu_d<const IMM6: u32>(a: v4u32, b: v4i32) -> v4u32 {
+pub fn lsx_vssrlrni_wu_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vssrlrni_wu_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vssrlrni_wu_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrni_du_q<const IMM7: u32>(a: v2u64, b: v2i64) -> v2u64 {
+pub fn lsx_vssrlrni_du_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vssrlrni_du_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vssrlrni_du_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrani_b_h<const IMM4: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vsrani_b_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsrani_b_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vsrani_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrani_h_w<const IMM5: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vsrani_h_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsrani_h_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vsrani_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrani_w_d<const IMM6: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vsrani_w_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsrani_w_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vsrani_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrani_d_q<const IMM7: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vsrani_d_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vsrani_d_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vsrani_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrarni_b_h<const IMM4: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vsrarni_b_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vsrarni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vsrarni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrarni_h_w<const IMM5: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vsrarni_h_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vsrarni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vsrarni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrarni_w_d<const IMM6: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vsrarni_w_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vsrarni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vsrarni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vsrarni_d_q<const IMM7: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vsrarni_d_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vsrarni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vsrarni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrani_b_h<const IMM4: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vssrani_b_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vssrani_b_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vssrani_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrani_h_w<const IMM5: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vssrani_h_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vssrani_h_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vssrani_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrani_w_d<const IMM6: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vssrani_w_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vssrani_w_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vssrani_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrani_d_q<const IMM7: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vssrani_d_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vssrani_d_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vssrani_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrani_bu_h<const IMM4: u32>(a: v16u8, b: v16i8) -> v16u8 {
+pub fn lsx_vssrani_bu_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vssrani_bu_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vssrani_bu_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrani_hu_w<const IMM5: u32>(a: v8u16, b: v8i16) -> v8u16 {
+pub fn lsx_vssrani_hu_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vssrani_hu_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vssrani_hu_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrani_wu_d<const IMM6: u32>(a: v4u32, b: v4i32) -> v4u32 {
+pub fn lsx_vssrani_wu_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vssrani_wu_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vssrani_wu_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrani_du_q<const IMM7: u32>(a: v2u64, b: v2i64) -> v2u64 {
+pub fn lsx_vssrani_du_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vssrani_du_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vssrani_du_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarni_b_h<const IMM4: u32>(a: v16i8, b: v16i8) -> v16i8 {
+pub fn lsx_vssrarni_b_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vssrarni_b_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vssrarni_b_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarni_h_w<const IMM5: u32>(a: v8i16, b: v8i16) -> v8i16 {
+pub fn lsx_vssrarni_h_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vssrarni_h_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vssrarni_h_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarni_w_d<const IMM6: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vssrarni_w_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vssrarni_w_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vssrarni_w_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarni_d_q<const IMM7: u32>(a: v2i64, b: v2i64) -> v2i64 {
+pub fn lsx_vssrarni_d_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vssrarni_d_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vssrarni_d_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarni_bu_h<const IMM4: u32>(a: v16u8, b: v16i8) -> v16u8 {
+pub fn lsx_vssrarni_bu_h<const IMM4: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM4, 4);
-    unsafe { __lsx_vssrarni_bu_h(a, b, IMM4) }
+    unsafe { transmute(__lsx_vssrarni_bu_h(transmute(a), transmute(b), IMM4)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarni_hu_w<const IMM5: u32>(a: v8u16, b: v8i16) -> v8u16 {
+pub fn lsx_vssrarni_hu_w<const IMM5: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM5, 5);
-    unsafe { __lsx_vssrarni_hu_w(a, b, IMM5) }
+    unsafe { transmute(__lsx_vssrarni_hu_w(transmute(a), transmute(b), IMM5)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarni_wu_d<const IMM6: u32>(a: v4u32, b: v4i32) -> v4u32 {
+pub fn lsx_vssrarni_wu_d<const IMM6: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM6, 6);
-    unsafe { __lsx_vssrarni_wu_d(a, b, IMM6) }
+    unsafe { transmute(__lsx_vssrarni_wu_d(transmute(a), transmute(b), IMM6)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrarni_du_q<const IMM7: u32>(a: v2u64, b: v2i64) -> v2u64 {
+pub fn lsx_vssrarni_du_q<const IMM7: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM7, 7);
-    unsafe { __lsx_vssrarni_du_q(a, b, IMM7) }
+    unsafe { transmute(__lsx_vssrarni_du_q(transmute(a), transmute(b), IMM7)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vpermi_w<const IMM8: u32>(a: v4i32, b: v4i32) -> v4i32 {
+pub fn lsx_vpermi_w<const IMM8: u32>(a: m128i, b: m128i) -> m128i {
     static_assert_uimm_bits!(IMM8, 8);
-    unsafe { __lsx_vpermi_w(a, b, IMM8) }
+    unsafe { transmute(__lsx_vpermi_w(transmute(a), transmute(b), IMM8)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(1)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vld<const IMM_S12: i32>(mem_addr: *const i8) -> v16i8 {
+pub unsafe fn lsx_vld<const IMM_S12: i32>(mem_addr: *const i8) -> m128i {
     static_assert_simm_bits!(IMM_S12, 12);
-    __lsx_vld(mem_addr, IMM_S12)
+    transmute(__lsx_vld(mem_addr, IMM_S12))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(2)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vst<const IMM_S12: i32>(a: v16i8, mem_addr: *mut i8) {
+pub unsafe fn lsx_vst<const IMM_S12: i32>(a: m128i, mem_addr: *mut i8) {
     static_assert_simm_bits!(IMM_S12, 12);
-    __lsx_vst(a, mem_addr, IMM_S12)
+    transmute(__lsx_vst(transmute(a), mem_addr, IMM_S12))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrn_b_h(a: v8i16, b: v8i16) -> v16i8 {
-    unsafe { __lsx_vssrlrn_b_h(a, b) }
+pub fn lsx_vssrlrn_b_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrlrn_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrn_h_w(a: v4i32, b: v4i32) -> v8i16 {
-    unsafe { __lsx_vssrlrn_h_w(a, b) }
+pub fn lsx_vssrlrn_h_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrlrn_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrlrn_w_d(a: v2i64, b: v2i64) -> v4i32 {
-    unsafe { __lsx_vssrlrn_w_d(a, b) }
+pub fn lsx_vssrlrn_w_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrlrn_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrln_b_h(a: v8i16, b: v8i16) -> v16i8 {
-    unsafe { __lsx_vssrln_b_h(a, b) }
+pub fn lsx_vssrln_b_h(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrln_b_h(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrln_h_w(a: v4i32, b: v4i32) -> v8i16 {
-    unsafe { __lsx_vssrln_h_w(a, b) }
+pub fn lsx_vssrln_h_w(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrln_h_w(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vssrln_w_d(a: v2i64, b: v2i64) -> v4i32 {
-    unsafe { __lsx_vssrln_w_d(a, b) }
+pub fn lsx_vssrln_w_d(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vssrln_w_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vorn_v(a: v16i8, b: v16i8) -> v16i8 {
-    unsafe { __lsx_vorn_v(a, b) }
+pub fn lsx_vorn_v(a: m128i, b: m128i) -> m128i {
+    unsafe { transmute(__lsx_vorn_v(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vldi<const IMM_S13: i32>() -> v2i64 {
+pub fn lsx_vldi<const IMM_S13: i32>() -> m128i {
     static_assert_simm_bits!(IMM_S13, 13);
-    unsafe { __lsx_vldi(IMM_S13) }
+    unsafe { transmute(__lsx_vldi(IMM_S13)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vshuf_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 {
-    unsafe { __lsx_vshuf_b(a, b, c) }
+pub fn lsx_vshuf_b(a: m128i, b: m128i, c: m128i) -> m128i {
+    unsafe { transmute(__lsx_vshuf_b(transmute(a), transmute(b), transmute(c))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vldx(mem_addr: *const i8, b: i64) -> v16i8 {
-    __lsx_vldx(mem_addr, b)
+pub unsafe fn lsx_vldx(mem_addr: *const i8, b: i64) -> m128i {
+    transmute(__lsx_vldx(mem_addr, transmute(b)))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub unsafe fn lsx_vstx(a: v16i8, mem_addr: *mut i8, b: i64) {
-    __lsx_vstx(a, mem_addr, b)
+pub unsafe fn lsx_vstx(a: m128i, mem_addr: *mut i8, b: i64) {
+    transmute(__lsx_vstx(transmute(a), mem_addr, transmute(b)))
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vextl_qu_du(a: v2u64) -> v2u64 {
-    unsafe { __lsx_vextl_qu_du(a) }
+pub fn lsx_vextl_qu_du(a: m128i) -> m128i {
+    unsafe { transmute(__lsx_vextl_qu_du(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bnz_b(a: v16u8) -> i32 {
-    unsafe { __lsx_bnz_b(a) }
+pub fn lsx_bnz_b(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bnz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bnz_d(a: v2u64) -> i32 {
-    unsafe { __lsx_bnz_d(a) }
+pub fn lsx_bnz_d(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bnz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bnz_h(a: v8u16) -> i32 {
-    unsafe { __lsx_bnz_h(a) }
+pub fn lsx_bnz_h(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bnz_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bnz_v(a: v16u8) -> i32 {
-    unsafe { __lsx_bnz_v(a) }
+pub fn lsx_bnz_v(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bnz_v(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bnz_w(a: v4u32) -> i32 {
-    unsafe { __lsx_bnz_w(a) }
+pub fn lsx_bnz_w(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bnz_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bz_b(a: v16u8) -> i32 {
-    unsafe { __lsx_bz_b(a) }
+pub fn lsx_bz_b(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bz_b(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bz_d(a: v2u64) -> i32 {
-    unsafe { __lsx_bz_d(a) }
+pub fn lsx_bz_d(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bz_d(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bz_h(a: v8u16) -> i32 {
-    unsafe { __lsx_bz_h(a) }
+pub fn lsx_bz_h(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bz_h(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bz_v(a: v16u8) -> i32 {
-    unsafe { __lsx_bz_v(a) }
+pub fn lsx_bz_v(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bz_v(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_bz_w(a: v4u32) -> i32 {
-    unsafe { __lsx_bz_w(a) }
+pub fn lsx_bz_w(a: m128i) -> i32 {
+    unsafe { transmute(__lsx_bz_w(transmute(a))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_caf_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_caf_d(a, b) }
+pub fn lsx_vfcmp_caf_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_caf_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_caf_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_caf_s(a, b) }
+pub fn lsx_vfcmp_caf_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_caf_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_ceq_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_ceq_d(a, b) }
+pub fn lsx_vfcmp_ceq_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_ceq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_ceq_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_ceq_s(a, b) }
+pub fn lsx_vfcmp_ceq_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_ceq_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cle_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_cle_d(a, b) }
+pub fn lsx_vfcmp_cle_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cle_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cle_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_cle_s(a, b) }
+pub fn lsx_vfcmp_cle_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cle_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_clt_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_clt_d(a, b) }
+pub fn lsx_vfcmp_clt_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_clt_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_clt_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_clt_s(a, b) }
+pub fn lsx_vfcmp_clt_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_clt_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cne_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_cne_d(a, b) }
+pub fn lsx_vfcmp_cne_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cne_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cne_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_cne_s(a, b) }
+pub fn lsx_vfcmp_cne_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cne_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cor_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_cor_d(a, b) }
+pub fn lsx_vfcmp_cor_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cor_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cor_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_cor_s(a, b) }
+pub fn lsx_vfcmp_cor_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cor_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cueq_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_cueq_d(a, b) }
+pub fn lsx_vfcmp_cueq_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cueq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cueq_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_cueq_s(a, b) }
+pub fn lsx_vfcmp_cueq_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cueq_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cule_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_cule_d(a, b) }
+pub fn lsx_vfcmp_cule_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cule_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cule_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_cule_s(a, b) }
+pub fn lsx_vfcmp_cule_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cule_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cult_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_cult_d(a, b) }
+pub fn lsx_vfcmp_cult_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cult_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cult_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_cult_s(a, b) }
+pub fn lsx_vfcmp_cult_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cult_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cun_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_cun_d(a, b) }
+pub fn lsx_vfcmp_cun_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cun_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cune_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_cune_d(a, b) }
+pub fn lsx_vfcmp_cune_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cune_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cune_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_cune_s(a, b) }
+pub fn lsx_vfcmp_cune_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cune_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_cun_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_cun_s(a, b) }
+pub fn lsx_vfcmp_cun_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_cun_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_saf_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_saf_d(a, b) }
+pub fn lsx_vfcmp_saf_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_saf_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_saf_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_saf_s(a, b) }
+pub fn lsx_vfcmp_saf_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_saf_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_seq_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_seq_d(a, b) }
+pub fn lsx_vfcmp_seq_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_seq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_seq_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_seq_s(a, b) }
+pub fn lsx_vfcmp_seq_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_seq_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sle_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_sle_d(a, b) }
+pub fn lsx_vfcmp_sle_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sle_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sle_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_sle_s(a, b) }
+pub fn lsx_vfcmp_sle_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sle_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_slt_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_slt_d(a, b) }
+pub fn lsx_vfcmp_slt_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_slt_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_slt_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_slt_s(a, b) }
+pub fn lsx_vfcmp_slt_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_slt_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sne_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_sne_d(a, b) }
+pub fn lsx_vfcmp_sne_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sne_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sne_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_sne_s(a, b) }
+pub fn lsx_vfcmp_sne_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sne_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sor_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_sor_d(a, b) }
+pub fn lsx_vfcmp_sor_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sor_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sor_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_sor_s(a, b) }
+pub fn lsx_vfcmp_sor_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sor_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sueq_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_sueq_d(a, b) }
+pub fn lsx_vfcmp_sueq_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sueq_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sueq_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_sueq_s(a, b) }
+pub fn lsx_vfcmp_sueq_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sueq_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sule_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_sule_d(a, b) }
+pub fn lsx_vfcmp_sule_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sule_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sule_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_sule_s(a, b) }
+pub fn lsx_vfcmp_sule_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sule_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sult_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_sult_d(a, b) }
+pub fn lsx_vfcmp_sult_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sult_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sult_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_sult_s(a, b) }
+pub fn lsx_vfcmp_sult_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sult_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sun_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_sun_d(a, b) }
+pub fn lsx_vfcmp_sun_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sun_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sune_d(a: v2f64, b: v2f64) -> v2i64 {
-    unsafe { __lsx_vfcmp_sune_d(a, b) }
+pub fn lsx_vfcmp_sune_d(a: m128d, b: m128d) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sune_d(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sune_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_sune_s(a, b) }
+pub fn lsx_vfcmp_sune_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sune_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vfcmp_sun_s(a: v4f32, b: v4f32) -> v4i32 {
-    unsafe { __lsx_vfcmp_sun_s(a, b) }
+pub fn lsx_vfcmp_sun_s(a: m128, b: m128) -> m128i {
+    unsafe { transmute(__lsx_vfcmp_sun_s(transmute(a), transmute(b))) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrepli_b<const IMM_S10: i32>() -> v16i8 {
+pub fn lsx_vrepli_b<const IMM_S10: i32>() -> m128i {
     static_assert_simm_bits!(IMM_S10, 10);
-    unsafe { __lsx_vrepli_b(IMM_S10) }
+    unsafe { transmute(__lsx_vrepli_b(IMM_S10)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrepli_d<const IMM_S10: i32>() -> v2i64 {
+pub fn lsx_vrepli_d<const IMM_S10: i32>() -> m128i {
     static_assert_simm_bits!(IMM_S10, 10);
-    unsafe { __lsx_vrepli_d(IMM_S10) }
+    unsafe { transmute(__lsx_vrepli_d(IMM_S10)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrepli_h<const IMM_S10: i32>() -> v8i16 {
+pub fn lsx_vrepli_h<const IMM_S10: i32>() -> m128i {
     static_assert_simm_bits!(IMM_S10, 10);
-    unsafe { __lsx_vrepli_h(IMM_S10) }
+    unsafe { transmute(__lsx_vrepli_h(IMM_S10)) }
 }
 
 #[inline]
 #[target_feature(enable = "lsx")]
 #[rustc_legacy_const_generics(0)]
 #[unstable(feature = "stdarch_loongarch", issue = "117427")]
-pub fn lsx_vrepli_w<const IMM_S10: i32>() -> v4i32 {
+pub fn lsx_vrepli_w<const IMM_S10: i32>() -> m128i {
     static_assert_simm_bits!(IMM_S10, 10);
-    unsafe { __lsx_vrepli_w(IMM_S10) }
+    unsafe { transmute(__lsx_vrepli_w(IMM_S10)) }
 }
diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lsx/types.rs b/library/stdarch/crates/core_arch/src/loongarch64/lsx/types.rs
index 4097164c2fa..4fb69457174 100644
--- a/library/stdarch/crates/core_arch/src/loongarch64/lsx/types.rs
+++ b/library/stdarch/crates/core_arch/src/loongarch64/lsx/types.rs
@@ -1,33 +1,140 @@
 types! {
     #![unstable(feature = "stdarch_loongarch", issue = "117427")]
 
-    /// LOONGARCH-specific 128-bit wide vector of 16 packed `i8`.
-    pub struct v16i8(16 x pub(crate) i8);
+    /// 128-bit wide integer vector type, LoongArch-specific
+    ///
+    /// This type is the same as the `__m128i` type defined in `lsxintrin.h`,
+    /// representing a 128-bit SIMD register. Usage of this type typically
+    /// occurs in conjunction with the `lsx` and higher target features for
+    /// LoongArch.
+    ///
+    /// Internally this type may be viewed as:
+    ///
+    /// * `i8x16` - sixteen `i8` values packed together
+    /// * `i16x8` - eight `i16` values packed together
+    /// * `i32x4` - four `i32` values packed together
+    /// * `i64x2` - two `i64` values packed together
+    ///
+    /// (as well as unsigned versions). Each intrinsic may interpret the
+    /// internal bits differently, check the documentation of the intrinsic
+    /// to see how it's being used.
+    ///
+    /// The in-memory representation of this type is the same as the one of an
+    /// equivalent array (i.e. the in-memory order of elements is the same, and
+    /// there is no padding); however, the alignment is different and equal to
+    /// the size of the type. Note that the ABI for function calls may *not* be
+    /// the same.
+    ///
+    /// Note that this means that an instance of `m128i` typically just means
+    /// a "bag of bits" which is left up to interpretation at the point of use.
+    ///
+    /// Most intrinsics using `m128i` are prefixed with `lsx_` and the integer
+    /// types tend to correspond to suffixes like "b", "h", "w" or "d".
+    pub struct m128i(2 x i64);
 
-    /// LOONGARCH-specific 128-bit wide vector of 8 packed `i16`.
-    pub struct v8i16(8 x pub(crate) i16);
+    /// 128-bit wide set of four `f32` values, LoongArch-specific
+    ///
+    /// This type is the same as the `__m128` type defined in `lsxintrin.h`,
+    /// representing a 128-bit SIMD register which internally consists of
+    /// four packed `f32` instances. Usage of this type typically occurs in
+    /// conjunction with the `lsx` and higher target features for LoongArch.
+    ///
+    /// Note that unlike `m128i`, the integer version of the 128-bit registers,
+    /// this `m128` type has *one* interpretation. Each instance of `m128`
+    /// corresponds to `f32x4`, or four `f32` values packed together.
+    ///
+    /// The in-memory representation of this type is the same as the one of an
+    /// equivalent array (i.e. the in-memory order of elements is the same, and
+    /// there is no padding); however, the alignment is different and equal to
+    /// the size of the type. Note that the ABI for function calls may *not* be
+    /// the same.
+    ///
+    /// Most intrinsics using `m128` are prefixed with `lsx_` and are suffixed
+    /// with "s".
+    pub struct m128(4 x f32);
 
-    /// LOONGARCH-specific 128-bit wide vector of 4 packed `i32`.
-    pub struct v4i32(4 x pub(crate) i32);
-
-    /// LOONGARCH-specific 128-bit wide vector of 2 packed `i64`.
-    pub struct v2i64(2 x pub(crate) i64);
-
-    /// LOONGARCH-specific 128-bit wide vector of 16 packed `u8`.
-    pub struct v16u8(16 x pub(crate) u8);
-
-    /// LOONGARCH-specific 128-bit wide vector of 8 packed `u16`.
-    pub struct v8u16(8 x pub(crate) u16);
-
-    /// LOONGARCH-specific 128-bit wide vector of 4 packed `u32`.
-    pub struct v4u32(4 x pub(crate) u32);
-
-    /// LOONGARCH-specific 128-bit wide vector of 2 packed `u64`.
-    pub struct v2u64(2 x pub(crate) u64);
+    /// 128-bit wide set of two `f64` values, LoongArch-specific
+    ///
+    /// This type is the same as the `__m128d` type defined in `lsxintrin.h`,
+    /// representing a 128-bit SIMD register which internally consists of
+    /// two packed `f64` instances. Usage of this type typically occurs in
+    /// conjunction with the `lsx` and higher target features for LoongArch.
+    ///
+    /// Note that unlike `m128i`, the integer version of the 128-bit registers,
+    /// this `m128d` type has *one* interpretation. Each instance of `m128d`
+    /// always corresponds to `f64x2`, or two `f64` values packed together.
+    ///
+    /// The in-memory representation of this type is the same as the one of an
+    /// equivalent array (i.e. the in-memory order of elements is the same, and
+    /// there is no padding); however, the alignment is different and equal to
+    /// the size of the type. Note that the ABI for function calls may *not* be
+    /// the same.
+    ///
+    /// Most intrinsics using `m128d` are prefixed with `lsx_` and are suffixed
+    /// with "d". Not to be confused with "d" which is used for `m128i`.
+    pub struct m128d(2 x f64);
+}
 
-    /// LOONGARCH-specific 128-bit wide vector of 4 packed `f32`.
-    pub struct v4f32(4 x pub(crate) f32);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v16i8([i8; 16]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v8i16([i16; 8]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v4i32([i32; 4]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v2i64([i64; 2]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v16u8([u8; 16]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v8u16([u16; 8]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v4u32([u32; 4]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v2u64([u64; 2]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v4f32([f32; 4]);
+#[allow(non_camel_case_types)]
+#[repr(simd)]
+pub(crate) struct __v2f64([f64; 2]);
 
-    /// LOONGARCH-specific 128-bit wide vector of 2 packed `f64`.
-    pub struct v2f64(2 x pub(crate) f64);
-}
+// These type aliases are provided solely for transitional compatibility.
+// They are temporary and will be removed when appropriate.
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v16i8 = m128i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v8i16 = m128i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v4i32 = m128i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v2i64 = m128i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v16u8 = m128i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v8u16 = m128i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v4u32 = m128i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v2u64 = m128i;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v4f32 = m128;
+#[allow(non_camel_case_types)]
+#[unstable(feature = "stdarch_loongarch", issue = "117427")]
+pub type v2f64 = m128d;
diff --git a/library/stdarch/crates/core_arch/src/s390x/vector.rs b/library/stdarch/crates/core_arch/src/s390x/vector.rs
index a09a27a029c..0ce720a9244 100644
--- a/library/stdarch/crates/core_arch/src/s390x/vector.rs
+++ b/library/stdarch/crates/core_arch/src/s390x/vector.rs
@@ -5831,24 +5831,30 @@ mod tests {
     use crate::core_arch::simd::*;
     use stdarch_test::simd_test;
 
+    impl<const N: usize> ShuffleMask<N> {
+        fn as_array(&self) -> &[u32; N] {
+            unsafe { std::mem::transmute(self) }
+        }
+    }
+
     #[test]
     fn reverse_mask() {
-        assert_eq!(ShuffleMask::<4>::reverse().0, [3, 2, 1, 0]);
+        assert_eq!(ShuffleMask::<4>::reverse().as_array(), &[3, 2, 1, 0]);
     }
 
     #[test]
     fn mergel_mask() {
-        assert_eq!(ShuffleMask::<4>::merge_low().0, [2, 6, 3, 7]);
+        assert_eq!(ShuffleMask::<4>::merge_low().as_array(), &[2, 6, 3, 7]);
     }
 
     #[test]
     fn mergeh_mask() {
-        assert_eq!(ShuffleMask::<4>::merge_high().0, [0, 4, 1, 5]);
+        assert_eq!(ShuffleMask::<4>::merge_high().as_array(), &[0, 4, 1, 5]);
     }
 
     #[test]
     fn pack_mask() {
-        assert_eq!(ShuffleMask::<4>::pack().0, [1, 3, 5, 7]);
+        assert_eq!(ShuffleMask::<4>::pack().as_array(), &[1, 3, 5, 7]);
     }
 
     #[test]
diff --git a/library/stdarch/crates/core_arch/src/x86/sse2.rs b/library/stdarch/crates/core_arch/src/x86/sse2.rs
index 3dabcde18ce..1eaa89663b2 100644
--- a/library/stdarch/crates/core_arch/src/x86/sse2.rs
+++ b/library/stdarch/crates/core_arch/src/x86/sse2.rs
@@ -1272,7 +1272,7 @@ pub unsafe fn _mm_loadu_si128(mem_addr: *const __m128i) -> __m128i {
 }
 
 /// Conditionally store 8-bit integer elements from `a` into memory using
-/// `mask`.
+/// `mask` flagged as non-temporal (unlikely to be used again soon).
 ///
 /// Elements are not stored when the highest bit is not set in the
 /// corresponding element.
@@ -1281,6 +1281,15 @@ pub unsafe fn _mm_loadu_si128(mem_addr: *const __m128i) -> __m128i {
 /// to be aligned on any particular boundary.
 ///
 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskmoveu_si128)
+///
+/// # Safety of non-temporal stores
+///
+/// After using this intrinsic, but before any other access to the memory that this intrinsic
+/// mutates, a call to [`_mm_sfence`] must be performed by the thread that used the intrinsic. In
+/// particular, functions that call this intrinsic should generally call `_mm_sfence` before they
+/// return.
+///
+/// See [`_mm_sfence`] for details.
 #[inline]
 #[target_feature(enable = "sse2")]
 #[cfg_attr(test, assert_instr(maskmovdqu))]
diff --git a/library/stdarch/crates/intrinsic-test/Cargo.toml b/library/stdarch/crates/intrinsic-test/Cargo.toml
index 06051abc8d0..fbbf90e1400 100644
--- a/library/stdarch/crates/intrinsic-test/Cargo.toml
+++ b/library/stdarch/crates/intrinsic-test/Cargo.toml
@@ -11,12 +11,9 @@ license = "MIT OR Apache-2.0"
 edition = "2024"
 
 [dependencies]
-lazy_static = "1.4.0"
 serde = { version = "1", features = ["derive"] }
 serde_json = "1.0"
-csv = "1.1"
 clap = { version = "4.4", features = ["derive"] }
-regex = "1.4.2"
 log = "0.4.11"
 pretty_env_logger = "0.5.0"
 rayon = "1.5.0"
diff --git a/library/stdarch/crates/intrinsic-test/src/arm/argument.rs b/library/stdarch/crates/intrinsic-test/src/arm/argument.rs
new file mode 100644
index 00000000000..c43609bb2db
--- /dev/null
+++ b/library/stdarch/crates/intrinsic-test/src/arm/argument.rs
@@ -0,0 +1,15 @@
+use crate::arm::intrinsic::ArmIntrinsicType;
+use crate::common::argument::Argument;
+
+// This functionality is present due to the nature
+// of how intrinsics are defined in the JSON source
+// of ARM intrinsics.
+impl Argument<ArmIntrinsicType> {
+    pub fn type_and_name_from_c(arg: &str) -> (&str, &str) {
+        let split_index = arg
+            .rfind([' ', '*'])
+            .expect("Couldn't split type and argname");
+
+        (arg[..split_index + 1].trim_end(), &arg[split_index + 1..])
+    }
+}
diff --git a/library/stdarch/crates/intrinsic-test/src/arm/compile.rs b/library/stdarch/crates/intrinsic-test/src/arm/compile.rs
index 48a8ed950e3..7da35f9a111 100644
--- a/library/stdarch/crates/intrinsic-test/src/arm/compile.rs
+++ b/library/stdarch/crates/intrinsic-test/src/arm/compile.rs
@@ -6,16 +6,16 @@ pub fn build_cpp_compilation(config: &ProcessedCli) -> Option<CppCompilation> {
 
     // -ffp-contract=off emulates Rust's approach of not fusing separate mul-add operations
     let mut command = CompilationCommandBuilder::new()
-        .add_arch_flags(vec!["armv8.6-a", "crypto", "crc", "dotprod", "fp16"])
+        .add_arch_flags(["armv8.6-a", "crypto", "crc", "dotprod", "fp16"])
         .set_compiler(cpp_compiler)
         .set_target(&config.target)
         .set_opt_level("2")
         .set_cxx_toolchain_dir(config.cxx_toolchain_dir.as_deref())
         .set_project_root("c_programs")
-        .add_extra_flags(vec!["-ffp-contract=off", "-Wno-narrowing"]);
+        .add_extra_flags(["-ffp-contract=off", "-Wno-narrowing"]);
 
     if !config.target.contains("v7") {
-        command = command.add_arch_flags(vec!["faminmax", "lut", "sha3"]);
+        command = command.add_arch_flags(["faminmax", "lut", "sha3"]);
     }
 
     if !cpp_compiler.contains("clang") {
diff --git a/library/stdarch/crates/intrinsic-test/src/arm/intrinsic.rs b/library/stdarch/crates/intrinsic-test/src/arm/intrinsic.rs
index 16572b2c03f..fd93eff76e0 100644
--- a/library/stdarch/crates/intrinsic-test/src/arm/intrinsic.rs
+++ b/library/stdarch/crates/intrinsic-test/src/arm/intrinsic.rs
@@ -5,19 +5,22 @@ use crate::common::intrinsic_helpers::{IntrinsicType, IntrinsicTypeDefinition, S
 use std::ops::{Deref, DerefMut};
 
 #[derive(Debug, Clone, PartialEq)]
-pub struct ArmIntrinsicType(pub IntrinsicType);
+pub struct ArmIntrinsicType {
+    pub data: IntrinsicType,
+    pub target: String,
+}
 
 impl Deref for ArmIntrinsicType {
     type Target = IntrinsicType;
 
     fn deref(&self) -> &Self::Target {
-        &self.0
+        &self.data
     }
 }
 
 impl DerefMut for ArmIntrinsicType {
     fn deref_mut(&mut self) -> &mut Self::Target {
-        &mut self.0
+        &mut self.data
     }
 }
 
diff --git a/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs b/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs
index 58d366c86a9..b019abab213 100644
--- a/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs
+++ b/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs
@@ -2,7 +2,7 @@ use super::intrinsic::ArmIntrinsicType;
 use crate::common::argument::{Argument, ArgumentList};
 use crate::common::constraint::Constraint;
 use crate::common::intrinsic::Intrinsic;
-use crate::common::intrinsic_helpers::{IntrinsicType, IntrinsicTypeDefinition};
+use crate::common::intrinsic_helpers::IntrinsicType;
 use serde::Deserialize;
 use serde_json::Value;
 use std::collections::HashMap;
@@ -86,18 +86,21 @@ fn json_to_intrinsic(
         .into_iter()
         .enumerate()
         .map(|(i, arg)| {
-            let arg_name = Argument::<ArmIntrinsicType>::type_and_name_from_c(&arg).1;
+            let (type_name, arg_name) = Argument::<ArmIntrinsicType>::type_and_name_from_c(&arg);
             let metadata = intr.args_prep.as_mut();
             let metadata = metadata.and_then(|a| a.remove(arg_name));
             let arg_prep: Option<ArgPrep> = metadata.and_then(|a| a.try_into().ok());
             let constraint: Option<Constraint> = arg_prep.and_then(|a| a.try_into().ok());
+            let ty = ArmIntrinsicType::from_c(type_name, target)
+                .unwrap_or_else(|_| panic!("Failed to parse argument '{arg}'"));
 
-            let mut arg = Argument::<ArmIntrinsicType>::from_c(i, &arg, target, constraint);
+            let mut arg =
+                Argument::<ArmIntrinsicType>::new(i, String::from(arg_name), ty, constraint);
 
             // The JSON doesn't list immediates as const
             let IntrinsicType {
                 ref mut constant, ..
-            } = arg.ty.0;
+            } = arg.ty.data;
             if arg.name.starts_with("imm") {
                 *constant = true
             }
diff --git a/library/stdarch/crates/intrinsic-test/src/arm/mod.rs b/library/stdarch/crates/intrinsic-test/src/arm/mod.rs
index 0a64a24e731..51f5ac42837 100644
--- a/library/stdarch/crates/intrinsic-test/src/arm/mod.rs
+++ b/library/stdarch/crates/intrinsic-test/src/arm/mod.rs
@@ -1,23 +1,24 @@
+mod argument;
 mod compile;
 mod config;
 mod intrinsic;
 mod json_parser;
 mod types;
 
-use std::fs::File;
+use std::fs::{self, File};
 
 use rayon::prelude::*;
 
-use crate::arm::config::POLY128_OSTREAM_DEF;
-use crate::common::SupportedArchitectureTest;
 use crate::common::cli::ProcessedCli;
 use crate::common::compare::compare_outputs;
 use crate::common::gen_c::{write_main_cpp, write_mod_cpp};
-use crate::common::gen_rust::compile_rust_programs;
-use crate::common::intrinsic::{Intrinsic, IntrinsicDefinition};
+use crate::common::gen_rust::{
+    compile_rust_programs, write_bin_cargo_toml, write_lib_cargo_toml, write_lib_rs, write_main_rs,
+};
+use crate::common::intrinsic::Intrinsic;
 use crate::common::intrinsic_helpers::TypeKind;
-use crate::common::write_file::write_rust_testfiles;
-use config::{AARCH_CONFIGURATIONS, F16_FORMATTING_DEF, build_notices};
+use crate::common::{SupportedArchitectureTest, chunk_info};
+use config::{AARCH_CONFIGURATIONS, F16_FORMATTING_DEF, POLY128_OSTREAM_DEF, build_notices};
 use intrinsic::ArmIntrinsicType;
 use json_parser::get_neon_intrinsics;
 
@@ -26,13 +27,6 @@ pub struct ArmArchitectureTest {
     cli_options: ProcessedCli,
 }
 
-fn chunk_info(intrinsic_count: usize) -> (usize, usize) {
-    let available_parallelism = std::thread::available_parallelism().unwrap().get();
-    let chunk_size = intrinsic_count.div_ceil(Ord::min(available_parallelism, intrinsic_count));
-
-    (chunk_size, intrinsic_count.div_ceil(chunk_size))
-}
-
 impl SupportedArchitectureTest for ArmArchitectureTest {
     fn create(cli_options: ProcessedCli) -> Box<Self> {
         let a32 = cli_options.target.contains("v7");
@@ -68,9 +62,10 @@ impl SupportedArchitectureTest for ArmArchitectureTest {
 
         let (chunk_size, chunk_count) = chunk_info(self.intrinsics.len());
 
-        let cpp_compiler = compile::build_cpp_compilation(&self.cli_options).unwrap();
+        let cpp_compiler_wrapped = compile::build_cpp_compilation(&self.cli_options);
 
         let notice = &build_notices("// ");
+        fs::create_dir_all("c_programs").unwrap();
         self.intrinsics
             .par_chunks(chunk_size)
             .enumerate()
@@ -79,10 +74,15 @@ impl SupportedArchitectureTest for ArmArchitectureTest {
                 let mut file = File::create(&c_filename).unwrap();
                 write_mod_cpp(&mut file, notice, c_target, platform_headers, chunk).unwrap();
 
-                // compile this cpp file into a .o file
-                let output = cpp_compiler
-                    .compile_object_file(&format!("mod_{i}.cpp"), &format!("mod_{i}.o"))?;
-                assert!(output.status.success(), "{output:?}");
+                // compile this cpp file into a .o file.
+                //
+                // This is done because `cpp_compiler_wrapped` is None when
+                // the --generate-only flag is passed
+                if let Some(cpp_compiler) = cpp_compiler_wrapped.as_ref() {
+                    let output = cpp_compiler
+                        .compile_object_file(&format!("mod_{i}.cpp"), &format!("mod_{i}.o"))?;
+                    assert!(output.status.success(), "{output:?}");
+                }
 
                 Ok(())
             })
@@ -98,46 +98,84 @@ impl SupportedArchitectureTest for ArmArchitectureTest {
         )
         .unwrap();
 
-        // compile this cpp file into a .o file
-        info!("compiling main.cpp");
-        let output = cpp_compiler
-            .compile_object_file("main.cpp", "intrinsic-test-programs.o")
-            .unwrap();
-        assert!(output.status.success(), "{output:?}");
-
-        let object_files = (0..chunk_count)
-            .map(|i| format!("mod_{i}.o"))
-            .chain(["intrinsic-test-programs.o".to_owned()]);
-
-        let output = cpp_compiler
-            .link_executable(object_files, "intrinsic-test-programs")
-            .unwrap();
-        assert!(output.status.success(), "{output:?}");
+        // This is done because `cpp_compiler_wrapped` is None when
+        // the --generate-only flag is passed
+        if let Some(cpp_compiler) = cpp_compiler_wrapped.as_ref() {
+            // compile this cpp file into a .o file
+            info!("compiling main.cpp");
+            let output = cpp_compiler
+                .compile_object_file("main.cpp", "intrinsic-test-programs.o")
+                .unwrap();
+            assert!(output.status.success(), "{output:?}");
+
+            let object_files = (0..chunk_count)
+                .map(|i| format!("mod_{i}.o"))
+                .chain(["intrinsic-test-programs.o".to_owned()]);
+
+            let output = cpp_compiler
+                .link_executable(object_files, "intrinsic-test-programs")
+                .unwrap();
+            assert!(output.status.success(), "{output:?}");
+        }
 
         true
     }
 
     fn build_rust_file(&self) -> bool {
-        let rust_target = if self.cli_options.target.contains("v7") {
+        std::fs::create_dir_all("rust_programs/src").unwrap();
+
+        let architecture = if self.cli_options.target.contains("v7") {
             "arm"
         } else {
             "aarch64"
         };
+
+        let (chunk_size, chunk_count) = chunk_info(self.intrinsics.len());
+
+        let mut cargo = File::create("rust_programs/Cargo.toml").unwrap();
+        write_bin_cargo_toml(&mut cargo, chunk_count).unwrap();
+
+        let mut main_rs = File::create("rust_programs/src/main.rs").unwrap();
+        write_main_rs(
+            &mut main_rs,
+            chunk_count,
+            AARCH_CONFIGURATIONS,
+            "",
+            self.intrinsics.iter().map(|i| i.name.as_str()),
+        )
+        .unwrap();
+
         let target = &self.cli_options.target;
         let toolchain = self.cli_options.toolchain.as_deref();
         let linker = self.cli_options.linker.as_deref();
-        let intrinsics_name_list = write_rust_testfiles(
-            self.intrinsics
-                .iter()
-                .map(|i| i as &dyn IntrinsicDefinition<_>)
-                .collect::<Vec<_>>(),
-            rust_target,
-            &build_notices("// "),
-            F16_FORMATTING_DEF,
-            AARCH_CONFIGURATIONS,
-        );
 
-        compile_rust_programs(intrinsics_name_list, toolchain, target, linker)
+        let notice = &build_notices("// ");
+        self.intrinsics
+            .par_chunks(chunk_size)
+            .enumerate()
+            .map(|(i, chunk)| {
+                std::fs::create_dir_all(format!("rust_programs/mod_{i}/src"))?;
+
+                let rust_filename = format!("rust_programs/mod_{i}/src/lib.rs");
+                trace!("generating `{rust_filename}`");
+                let mut file = File::create(rust_filename)?;
+
+                let cfg = AARCH_CONFIGURATIONS;
+                let definitions = F16_FORMATTING_DEF;
+                write_lib_rs(&mut file, architecture, notice, cfg, definitions, chunk)?;
+
+                let toml_filename = format!("rust_programs/mod_{i}/Cargo.toml");
+                trace!("generating `{toml_filename}`");
+                let mut file = File::create(toml_filename).unwrap();
+
+                write_lib_cargo_toml(&mut file, &format!("mod_{i}"))?;
+
+                Ok(())
+            })
+            .collect::<Result<(), std::io::Error>>()
+            .unwrap();
+
+        compile_rust_programs(toolchain, target, linker)
     }
 
     fn compare_outputs(&self) -> bool {
diff --git a/library/stdarch/crates/intrinsic-test/src/arm/types.rs b/library/stdarch/crates/intrinsic-test/src/arm/types.rs
index 77f5e8d0e56..32f8f106ce2 100644
--- a/library/stdarch/crates/intrinsic-test/src/arm/types.rs
+++ b/library/stdarch/crates/intrinsic-test/src/arm/types.rs
@@ -5,12 +5,10 @@ use crate::common::intrinsic_helpers::{IntrinsicType, IntrinsicTypeDefinition, S
 impl IntrinsicTypeDefinition for ArmIntrinsicType {
     /// Gets a string containing the typename for this type in C format.
     fn c_type(&self) -> String {
-        let prefix = self.0.kind.c_prefix();
-        let const_prefix = if self.0.constant { "const " } else { "" };
+        let prefix = self.kind.c_prefix();
+        let const_prefix = if self.constant { "const " } else { "" };
 
-        if let (Some(bit_len), simd_len, vec_len) =
-            (self.0.bit_len, self.0.simd_len, self.0.vec_len)
-        {
+        if let (Some(bit_len), simd_len, vec_len) = (self.bit_len, self.simd_len, self.vec_len) {
             match (simd_len, vec_len) {
                 (None, None) => format!("{const_prefix}{prefix}{bit_len}_t"),
                 (Some(simd), None) => format!("{prefix}{bit_len}x{simd}_t"),
@@ -23,35 +21,16 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType {
     }
 
     fn c_single_vector_type(&self) -> String {
-        if let (Some(bit_len), Some(simd_len)) = (self.0.bit_len, self.0.simd_len) {
+        if let (Some(bit_len), Some(simd_len)) = (self.bit_len, self.simd_len) {
             format!(
                 "{prefix}{bit_len}x{simd_len}_t",
-                prefix = self.0.kind.c_prefix()
+                prefix = self.kind.c_prefix()
             )
         } else {
             unreachable!("Shouldn't be called on this type")
         }
     }
 
-    fn rust_type(&self) -> String {
-        let rust_prefix = self.0.kind.rust_prefix();
-        let c_prefix = self.0.kind.c_prefix();
-        if self.0.ptr_constant {
-            self.c_type()
-        } else if let (Some(bit_len), simd_len, vec_len) =
-            (self.0.bit_len, self.0.simd_len, self.0.vec_len)
-        {
-            match (simd_len, vec_len) {
-                (None, None) => format!("{rust_prefix}{bit_len}"),
-                (Some(simd), None) => format!("{c_prefix}{bit_len}x{simd}_t"),
-                (Some(simd), Some(vec)) => format!("{c_prefix}{bit_len}x{simd}x{vec}_t"),
-                (None, Some(_)) => todo!("{:#?}", self), // Likely an invalid case
-            }
-        } else {
-            todo!("{:#?}", self)
-        }
-    }
-
     /// Determines the load function for this type.
     fn get_load_function(&self, language: Language) -> String {
         if let IntrinsicType {
@@ -59,9 +38,8 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType {
             bit_len: Some(bl),
             simd_len,
             vec_len,
-            target,
             ..
-        } = &self.0
+        } = &self.data
         {
             let quad = if simd_len.unwrap_or(1) * bl > 64 {
                 "q"
@@ -69,7 +47,7 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType {
                 ""
             };
 
-            let choose_workaround = language == Language::C && target.contains("v7");
+            let choose_workaround = language == Language::C && self.target.contains("v7");
             format!(
                 "vld{len}{quad}_{type}{size}",
                 type = match k {
@@ -97,7 +75,7 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType {
             bit_len: Some(bl),
             simd_len,
             ..
-        } = &self.0
+        } = &self.data
         {
             let quad = if (simd_len.unwrap_or(1) * bl) > 64 {
                 "q"
@@ -120,8 +98,10 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType {
             todo!("get_lane_function IntrinsicType: {:#?}", self)
         }
     }
+}
 
-    fn from_c(s: &str, target: &str) -> Result<Self, String> {
+impl ArmIntrinsicType {
+    pub fn from_c(s: &str, target: &str) -> Result<Self, String> {
         const CONST_STR: &str = "const";
         if let Some(s) = s.strip_suffix('*') {
             let (s, constant) = match s.trim().strip_suffix(CONST_STR) {
@@ -162,32 +142,36 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType {
                     ),
                     None => None,
                 };
-                Ok(ArmIntrinsicType(IntrinsicType {
-                    ptr: false,
-                    ptr_constant: false,
-                    constant,
-                    kind: arg_kind,
-                    bit_len: Some(bit_len),
-                    simd_len,
-                    vec_len,
+                Ok(ArmIntrinsicType {
+                    data: IntrinsicType {
+                        ptr: false,
+                        ptr_constant: false,
+                        constant,
+                        kind: arg_kind,
+                        bit_len: Some(bit_len),
+                        simd_len,
+                        vec_len,
+                    },
                     target: target.to_string(),
-                }))
+                })
             } else {
                 let kind = start.parse::<TypeKind>()?;
                 let bit_len = match kind {
                     TypeKind::Int(_) => Some(32),
                     _ => None,
                 };
-                Ok(ArmIntrinsicType(IntrinsicType {
-                    ptr: false,
-                    ptr_constant: false,
-                    constant,
-                    kind: start.parse::<TypeKind>()?,
-                    bit_len,
-                    simd_len: None,
-                    vec_len: None,
+                Ok(ArmIntrinsicType {
+                    data: IntrinsicType {
+                        ptr: false,
+                        ptr_constant: false,
+                        constant,
+                        kind: start.parse::<TypeKind>()?,
+                        bit_len,
+                        simd_len: None,
+                        vec_len: None,
+                    },
                     target: target.to_string(),
-                }))
+                })
             }
         }
     }
diff --git a/library/stdarch/crates/intrinsic-test/src/common/argument.rs b/library/stdarch/crates/intrinsic-test/src/common/argument.rs
index 1df4f55995e..f38515e40a9 100644
--- a/library/stdarch/crates/intrinsic-test/src/common/argument.rs
+++ b/library/stdarch/crates/intrinsic-test/src/common/argument.rs
@@ -20,6 +20,15 @@ impl<T> Argument<T>
 where
     T: IntrinsicTypeDefinition,
 {
+    pub fn new(pos: usize, name: String, ty: T, constraint: Option<Constraint>) -> Self {
+        Argument {
+            pos,
+            name,
+            ty,
+            constraint,
+        }
+    }
+
     pub fn to_c_type(&self) -> String {
         self.ty.c_type()
     }
@@ -36,14 +45,6 @@ where
         self.constraint.is_some()
     }
 
-    pub fn type_and_name_from_c(arg: &str) -> (&str, &str) {
-        let split_index = arg
-            .rfind([' ', '*'])
-            .expect("Couldn't split type and argname");
-
-        (arg[..split_index + 1].trim_end(), &arg[split_index + 1..])
-    }
-
     /// The binding keyword (e.g. "const" or "let") for the array of possible test inputs.
     fn rust_vals_array_binding(&self) -> impl std::fmt::Display {
         if self.ty.is_rust_vals_array_const() {
@@ -62,25 +63,6 @@ where
         }
     }
 
-    pub fn from_c(
-        pos: usize,
-        arg: &str,
-        target: &str,
-        constraint: Option<Constraint>,
-    ) -> Argument<T> {
-        let (ty, var_name) = Self::type_and_name_from_c(arg);
-
-        let ty =
-            T::from_c(ty, target).unwrap_or_else(|_| panic!("Failed to parse argument '{arg}'"));
-
-        Argument {
-            pos,
-            name: String::from(var_name),
-            ty: ty,
-            constraint,
-        }
-    }
-
     fn as_call_param_c(&self) -> String {
         self.ty.as_call_param_c(&self.name)
     }
@@ -114,14 +96,6 @@ where
             .join(", ")
     }
 
-    pub fn as_constraint_parameters_rust(&self) -> String {
-        self.iter()
-            .filter(|a| a.has_constraint())
-            .map(|arg| arg.name.clone())
-            .collect::<Vec<String>>()
-            .join(", ")
-    }
-
     /// Creates a line for each argument that initializes an array for C from which `loads` argument
     /// values can be loaded  as a sliding window.
     /// e.g `const int32x2_t a_vals = {0x3effffff, 0x3effffff, 0x3f7fffff}`, if loads=2.
@@ -146,21 +120,25 @@ where
 
     /// Creates a line for each argument that initializes an array for Rust from which `loads` argument
     /// values can be loaded as a sliding window, e.g `const A_VALS: [u32; 20]  = [...];`
-    pub fn gen_arglists_rust(&self, indentation: Indentation, loads: u32) -> String {
-        self.iter()
-            .filter(|&arg| !arg.has_constraint())
-            .map(|arg| {
-                format!(
-                    "{indentation}{bind} {name}: [{ty}; {load_size}] = {values};",
-                    bind = arg.rust_vals_array_binding(),
-                    name = arg.rust_vals_array_name(),
-                    ty = arg.ty.rust_scalar_type(),
-                    load_size = arg.ty.num_lanes() * arg.ty.num_vectors() + loads - 1,
-                    values = arg.ty.populate_random(indentation, loads, &Language::Rust)
-                )
-            })
-            .collect::<Vec<_>>()
-            .join("\n")
+    pub fn gen_arglists_rust(
+        &self,
+        w: &mut impl std::io::Write,
+        indentation: Indentation,
+        loads: u32,
+    ) -> std::io::Result<()> {
+        for arg in self.iter().filter(|&arg| !arg.has_constraint()) {
+            writeln!(
+                w,
+                "{indentation}{bind} {name}: [{ty}; {load_size}] = {values};",
+                bind = arg.rust_vals_array_binding(),
+                name = arg.rust_vals_array_name(),
+                ty = arg.ty.rust_scalar_type(),
+                load_size = arg.ty.num_lanes() * arg.ty.num_vectors() + loads - 1,
+                values = arg.ty.populate_random(indentation, loads, &Language::Rust)
+            )?
+        }
+
+        Ok(())
     }
 
     /// Creates a line for each argument that initializes the argument from an array `[arg]_vals` at
diff --git a/library/stdarch/crates/intrinsic-test/src/common/compare.rs b/library/stdarch/crates/intrinsic-test/src/common/compare.rs
index cb55922eb19..1ad00839ef0 100644
--- a/library/stdarch/crates/intrinsic-test/src/common/compare.rs
+++ b/library/stdarch/crates/intrinsic-test/src/common/compare.rs
@@ -2,25 +2,29 @@ use super::cli::FailureReason;
 use rayon::prelude::*;
 use std::process::Command;
 
-pub fn compare_outputs(intrinsic_name_list: &Vec<String>, runner: &str, target: &str) -> bool {
-    fn runner_command(runner: &str) -> Command {
-        let mut it = runner.split_whitespace();
-        let mut cmd = Command::new(it.next().unwrap());
-        cmd.args(it);
+fn runner_command(runner: &str) -> Command {
+    let mut it = runner.split_whitespace();
+    let mut cmd = Command::new(it.next().unwrap());
+    cmd.args(it);
 
-        cmd
-    }
+    cmd
+}
 
+pub fn compare_outputs(intrinsic_name_list: &Vec<String>, runner: &str, target: &str) -> bool {
     let intrinsics = intrinsic_name_list
         .par_iter()
         .filter_map(|intrinsic_name| {
+
             let c = runner_command(runner)
-                .arg("./c_programs/intrinsic-test-programs")
+                .arg("intrinsic-test-programs")
                 .arg(intrinsic_name)
+                .current_dir("c_programs")
                 .output();
 
             let rust = runner_command(runner)
-                .arg(format!("target/{target}/release/{intrinsic_name}"))
+                .arg(format!("target/{target}/release/intrinsic-test-programs"))
+                .arg(intrinsic_name)
+                .current_dir("rust_programs")
                 .output();
 
             let (c, rust) = match (c, rust) {
@@ -30,7 +34,7 @@ pub fn compare_outputs(intrinsic_name_list: &Vec<String>, runner: &str, target:
 
             if !c.status.success() {
                 error!(
-                    "Failed to run C program for intrinsic {intrinsic_name}\nstdout: {stdout}\nstderr: {stderr}",
+                    "Failed to run C program for intrinsic `{intrinsic_name}`\nstdout: {stdout}\nstderr: {stderr}",
                     stdout = std::str::from_utf8(&c.stdout).unwrap_or(""),
                     stderr = std::str::from_utf8(&c.stderr).unwrap_or(""),
                 );
@@ -39,9 +43,9 @@ pub fn compare_outputs(intrinsic_name_list: &Vec<String>, runner: &str, target:
 
             if !rust.status.success() {
                 error!(
-                    "Failed to run Rust program for intrinsic {intrinsic_name}\nstdout: {stdout}\nstderr: {stderr}",
-                    stdout = String::from_utf8_lossy(&rust.stdout),
-                    stderr = String::from_utf8_lossy(&rust.stderr),
+                    "Failed to run Rust program for intrinsic `{intrinsic_name}`\nstdout: {stdout}\nstderr: {stderr}",
+                    stdout = std::str::from_utf8(&rust.stdout).unwrap_or(""),
+                    stderr = std::str::from_utf8(&rust.stderr).unwrap_or(""),
                 );
                 return Some(FailureReason::RunRust(intrinsic_name.clone()));
             }
diff --git a/library/stdarch/crates/intrinsic-test/src/common/compile_c.rs b/library/stdarch/crates/intrinsic-test/src/common/compile_c.rs
index 0c905a149e4..258e4181658 100644
--- a/library/stdarch/crates/intrinsic-test/src/common/compile_c.rs
+++ b/library/stdarch/crates/intrinsic-test/src/common/compile_c.rs
@@ -37,9 +37,9 @@ impl CompilationCommandBuilder {
         self
     }
 
-    pub fn add_arch_flags(mut self, flags: Vec<&str>) -> Self {
-        let mut new_arch_flags = flags.into_iter().map(|v| v.to_string()).collect();
-        self.arch_flags.append(&mut new_arch_flags);
+    pub fn add_arch_flags<'a>(mut self, flags: impl IntoIterator<Item = &'a str>) -> Self {
+        self.arch_flags
+            .extend(flags.into_iter().map(|s| s.to_owned()));
 
         self
     }
@@ -55,14 +55,15 @@ impl CompilationCommandBuilder {
         self
     }
 
-    pub fn add_extra_flags(mut self, flags: Vec<&str>) -> Self {
-        let mut flags: Vec<String> = flags.into_iter().map(|f| f.to_string()).collect();
-        self.extra_flags.append(&mut flags);
+    pub fn add_extra_flags<'a>(mut self, flags: impl IntoIterator<Item = &'a str>) -> Self {
+        self.extra_flags
+            .extend(flags.into_iter().map(|s| s.to_owned()));
+
         self
     }
 
     pub fn add_extra_flag(self, flag: &str) -> Self {
-        self.add_extra_flags(vec![flag])
+        self.add_extra_flags([flag])
     }
 }
 
diff --git a/library/stdarch/crates/intrinsic-test/src/common/constraint.rs b/library/stdarch/crates/intrinsic-test/src/common/constraint.rs
index 269fb7f90cb..5984e0fcc22 100644
--- a/library/stdarch/crates/intrinsic-test/src/common/constraint.rs
+++ b/library/stdarch/crates/intrinsic-test/src/common/constraint.rs
@@ -1,17 +1,24 @@
 use serde::Deserialize;
 use std::ops::Range;
 
+/// Describes the values to test for a const generic parameter.
 #[derive(Debug, PartialEq, Clone, Deserialize)]
 pub enum Constraint {
+    /// Test a single value.
     Equal(i64),
+    /// Test a range of values, e.g. `0..16`.
     Range(Range<i64>),
+    /// Test discrete values, e.g. `vec![1, 2, 4, 8]`.
+    Set(Vec<i64>),
 }
 
 impl Constraint {
-    pub fn to_range(&self) -> Range<i64> {
+    /// Iterate over the values of this constraint.
+    pub fn iter<'a>(&'a self) -> impl Iterator<Item = i64> + 'a {
         match self {
-            Constraint::Equal(eq) => *eq..*eq + 1,
-            Constraint::Range(range) => range.clone(),
+            Constraint::Equal(i) => std::slice::Iter::default().copied().chain(*i..*i + 1),
+            Constraint::Range(range) => std::slice::Iter::default().copied().chain(range.clone()),
+            Constraint::Set(items) => items.iter().copied().chain(std::ops::Range::default()),
         }
     }
 }
diff --git a/library/stdarch/crates/intrinsic-test/src/common/gen_c.rs b/library/stdarch/crates/intrinsic-test/src/common/gen_c.rs
index 905efb6d890..84755ce5250 100644
--- a/library/stdarch/crates/intrinsic-test/src/common/gen_c.rs
+++ b/library/stdarch/crates/intrinsic-test/src/common/gen_c.rs
@@ -40,7 +40,7 @@ pub fn generate_c_constraint_blocks<'a, T: IntrinsicTypeDefinition + 'a>(
     };
 
     let body_indentation = indentation.nested();
-    for i in current.constraint.iter().flat_map(|c| c.to_range()) {
+    for i in current.constraint.iter().flat_map(|c| c.iter()) {
         let ty = current.ty.c_type();
 
         writeln!(w, "{indentation}{{")?;
diff --git a/library/stdarch/crates/intrinsic-test/src/common/gen_rust.rs b/library/stdarch/crates/intrinsic-test/src/common/gen_rust.rs
index 0e4a95ab528..2a02b8fdff1 100644
--- a/library/stdarch/crates/intrinsic-test/src/common/gen_rust.rs
+++ b/library/stdarch/crates/intrinsic-test/src/common/gen_rust.rs
@@ -1,10 +1,6 @@
 use itertools::Itertools;
-use rayon::prelude::*;
-use std::collections::BTreeMap;
-use std::fs::File;
 use std::process::Command;
 
-use super::argument::Argument;
 use super::indentation::Indentation;
 use super::intrinsic::{IntrinsicDefinition, format_f16_return_value};
 use super::intrinsic_helpers::IntrinsicTypeDefinition;
@@ -12,86 +8,144 @@ use super::intrinsic_helpers::IntrinsicTypeDefinition;
 // The number of times each intrinsic will be called.
 const PASSES: u32 = 20;
 
-pub fn format_rust_main_template(
-    notices: &str,
-    definitions: &str,
-    configurations: &str,
-    arch_definition: &str,
-    arglists: &str,
-    passes: &str,
-) -> String {
-    format!(
-        r#"{notices}#![feature(simd_ffi)]
-#![feature(f16)]
-#![allow(unused)]
-{configurations}
-{definitions}
-
-use core_arch::arch::{arch_definition}::*;
-
-fn main() {{
-{arglists}
-{passes}
-}}
-"#,
-    )
-}
-
-fn write_cargo_toml(w: &mut impl std::io::Write, binaries: &[String]) -> std::io::Result<()> {
+fn write_cargo_toml_header(w: &mut impl std::io::Write, name: &str) -> std::io::Result<()> {
     writeln!(
         w,
         concat!(
             "[package]\n",
-            "name = \"intrinsic-test-programs\"\n",
+            "name = \"{name}\"\n",
             "version = \"{version}\"\n",
             "authors = [{authors}]\n",
             "license = \"{license}\"\n",
             "edition = \"2018\"\n",
-            "[workspace]\n",
-            "[dependencies]\n",
-            "core_arch = {{ path = \"../crates/core_arch\" }}",
         ),
+        name = name,
         version = env!("CARGO_PKG_VERSION"),
         authors = env!("CARGO_PKG_AUTHORS")
             .split(":")
             .format_with(", ", |author, fmt| fmt(&format_args!("\"{author}\""))),
         license = env!("CARGO_PKG_LICENSE"),
-    )?;
+    )
+}
+
+pub fn write_bin_cargo_toml(
+    w: &mut impl std::io::Write,
+    module_count: usize,
+) -> std::io::Result<()> {
+    write_cargo_toml_header(w, "intrinsic-test-programs")?;
+
+    writeln!(w, "[dependencies]")?;
+
+    for i in 0..module_count {
+        writeln!(w, "mod_{i} = {{ path = \"mod_{i}/\" }}")?;
+    }
+
+    Ok(())
+}
+
+pub fn write_lib_cargo_toml(w: &mut impl std::io::Write, name: &str) -> std::io::Result<()> {
+    write_cargo_toml_header(w, name)?;
+
+    writeln!(w, "[dependencies]")?;
+    writeln!(w, "core_arch = {{ path = \"../../crates/core_arch\" }}")?;
+
+    Ok(())
+}
+
+pub fn write_main_rs<'a>(
+    w: &mut impl std::io::Write,
+    chunk_count: usize,
+    cfg: &str,
+    definitions: &str,
+    intrinsics: impl Iterator<Item = &'a str> + Clone,
+) -> std::io::Result<()> {
+    writeln!(w, "#![feature(simd_ffi)]")?;
+    writeln!(w, "#![feature(f16)]")?;
+    writeln!(w, "#![allow(unused)]")?;
+
+    // Cargo will spam the logs if these warnings are not silenced.
+    writeln!(w, "#![allow(non_upper_case_globals)]")?;
+    writeln!(w, "#![allow(non_camel_case_types)]")?;
+    writeln!(w, "#![allow(non_snake_case)]")?;
 
-    for binary in binaries {
-        writeln!(
-            w,
-            concat!(
-                "[[bin]]\n",
-                "name = \"{binary}\"\n",
-                "path = \"{binary}/main.rs\"\n",
-            ),
-            binary = binary,
-        )?;
+    writeln!(w, "{cfg}")?;
+    writeln!(w, "{definitions}")?;
+
+    for module in 0..chunk_count {
+        writeln!(w, "use mod_{module}::*;")?;
+    }
+
+    writeln!(w, "fn main() {{")?;
+
+    writeln!(w, "    match std::env::args().nth(1).unwrap().as_str() {{")?;
+
+    for binary in intrinsics {
+        writeln!(w, "        \"{binary}\" => run_{binary}(),")?;
     }
 
+    writeln!(
+        w,
+        "        other => panic!(\"unknown intrinsic `{{}}`\", other),"
+    )?;
+
+    writeln!(w, "    }}")?;
+    writeln!(w, "}}")?;
+
     Ok(())
 }
 
-pub fn compile_rust_programs(
-    binaries: Vec<String>,
-    toolchain: Option<&str>,
-    target: &str,
-    linker: Option<&str>,
-) -> bool {
-    let mut cargo = File::create("rust_programs/Cargo.toml").unwrap();
-    write_cargo_toml(&mut cargo, &binaries).unwrap();
+pub fn write_lib_rs<T: IntrinsicTypeDefinition>(
+    w: &mut impl std::io::Write,
+    architecture: &str,
+    notice: &str,
+    cfg: &str,
+    definitions: &str,
+    intrinsics: &[impl IntrinsicDefinition<T>],
+) -> std::io::Result<()> {
+    write!(w, "{notice}")?;
+
+    writeln!(w, "#![feature(simd_ffi)]")?;
+    writeln!(w, "#![feature(f16)]")?;
+    writeln!(w, "#![allow(unused)]")?;
+
+    // Cargo will spam the logs if these warnings are not silenced.
+    writeln!(w, "#![allow(non_upper_case_globals)]")?;
+    writeln!(w, "#![allow(non_camel_case_types)]")?;
+    writeln!(w, "#![allow(non_snake_case)]")?;
+
+    writeln!(w, "{cfg}")?;
+
+    writeln!(w, "use core_arch::arch::{architecture}::*;")?;
+
+    writeln!(w, "{definitions}")?;
+
+    for intrinsic in intrinsics {
+        crate::common::gen_rust::create_rust_test_module(w, intrinsic)?;
+    }
+
+    Ok(())
+}
 
+pub fn compile_rust_programs(toolchain: Option<&str>, target: &str, linker: Option<&str>) -> bool {
     /* If there has been a linker explicitly set from the command line then
      * we want to set it via setting it in the RUSTFLAGS*/
 
+    // This is done because `toolchain` is None when
+    // the --generate-only flag is passed
+    if toolchain.is_none() {
+        return true;
+    }
+
+    trace!("Building cargo command");
+
     let mut cargo_command = Command::new("cargo");
     cargo_command.current_dir("rust_programs");
 
-    if let Some(toolchain) = toolchain {
-        if !toolchain.is_empty() {
-            cargo_command.arg(toolchain);
-        }
+    // Do not use the target directory of the workspace please.
+    cargo_command.env("CARGO_TARGET_DIR", "target");
+
+    if toolchain.is_some_and(|val| !val.is_empty()) {
+        cargo_command.arg(toolchain.unwrap());
     }
     cargo_command.args(["build", "--target", target, "--release"]);
 
@@ -105,7 +159,16 @@ pub fn compile_rust_programs(
     }
 
     cargo_command.env("RUSTFLAGS", rust_flags);
+
+    trace!("running cargo");
+
+    if log::log_enabled!(log::Level::Trace) {
+        cargo_command.stdout(std::process::Stdio::inherit());
+        cargo_command.stderr(std::process::Stdio::inherit());
+    }
+
     let output = cargo_command.output();
+    trace!("cargo is done");
 
     if let Ok(output) = output {
         if output.status.success() {
@@ -124,119 +187,117 @@ pub fn compile_rust_programs(
     }
 }
 
-// Creates directory structure and file path mappings
-pub fn setup_rust_file_paths(identifiers: &Vec<String>) -> BTreeMap<&String, String> {
-    identifiers
-        .par_iter()
-        .map(|identifier| {
-            let rust_dir = format!("rust_programs/{identifier}");
-            let _ = std::fs::create_dir_all(&rust_dir);
-            let rust_filename = format!("{rust_dir}/main.rs");
-
-            (identifier, rust_filename)
-        })
-        .collect::<BTreeMap<&String, String>>()
-}
-
 pub fn generate_rust_test_loop<T: IntrinsicTypeDefinition>(
+    w: &mut impl std::io::Write,
     intrinsic: &dyn IntrinsicDefinition<T>,
     indentation: Indentation,
-    additional: &str,
+    specializations: &[Vec<u8>],
     passes: u32,
-) -> String {
-    let constraints = intrinsic.arguments().as_constraint_parameters_rust();
-    let constraints = if !constraints.is_empty() {
-        format!("::<{constraints}>")
-    } else {
-        constraints
-    };
+) -> std::io::Result<()> {
+    let intrinsic_name = intrinsic.name();
+
+    // Each function (and each specialization) has its own type. Erase that type with a cast.
+    let mut coerce = String::from("unsafe fn(");
+    for _ in intrinsic.arguments().iter().filter(|a| !a.has_constraint()) {
+        coerce += "_, ";
+    }
+    coerce += ") -> _";
+
+    match specializations {
+        [] => {
+            writeln!(w, "    let specializations = [(\"\", {intrinsic_name})];")?;
+        }
+        [const_args] if const_args.is_empty() => {
+            writeln!(w, "    let specializations = [(\"\", {intrinsic_name})];")?;
+        }
+        _ => {
+            writeln!(w, "    let specializations = [")?;
+
+            for specialization in specializations {
+                let mut specialization: Vec<_> =
+                    specialization.iter().map(|d| d.to_string()).collect();
+
+                let const_args = specialization.join(",");
+
+                // The identifier is reversed.
+                specialization.reverse();
+                let id = specialization.join("-");
+
+                writeln!(
+                    w,
+                    "        (\"-{id}\", {intrinsic_name}::<{const_args}> as {coerce}),"
+                )?;
+            }
+
+            writeln!(w, "    ];")?;
+        }
+    }
 
     let return_value = format_f16_return_value(intrinsic);
     let indentation2 = indentation.nested();
     let indentation3 = indentation2.nested();
-    format!(
-        "{indentation}for i in 0..{passes} {{\n\
-            {indentation2}unsafe {{\n\
-                {loaded_args}\
-                {indentation3}let __return_value = {intrinsic_call}{const}({args});\n\
-                {indentation3}println!(\"Result {additional}-{{}}: {{:?}}\", i + 1, {return_value});\n\
-            {indentation2}}}\n\
-        {indentation}}}",
+    writeln!(
+        w,
+        "\
+            for (id, f) in specializations {{\n\
+                for i in 0..{passes} {{\n\
+                    unsafe {{\n\
+                        {loaded_args}\
+                        let __return_value = f({args});\n\
+                        println!(\"Result {{id}}-{{}}: {{:?}}\", i + 1, {return_value});\n\
+                    }}\n\
+                }}\n\
+            }}",
         loaded_args = intrinsic.arguments().load_values_rust(indentation3),
-        intrinsic_call = intrinsic.name(),
-        const = constraints,
         args = intrinsic.arguments().as_call_param_rust(),
     )
 }
 
-pub fn generate_rust_constraint_blocks<T: IntrinsicTypeDefinition>(
-    intrinsic: &dyn IntrinsicDefinition<T>,
-    indentation: Indentation,
-    constraints: &[&Argument<T>],
-    name: String,
-) -> String {
-    if let Some((current, constraints)) = constraints.split_last() {
-        let range = current
-            .constraint
-            .iter()
-            .map(|c| c.to_range())
-            .flat_map(|r| r.into_iter());
-
-        let body_indentation = indentation.nested();
-        range
-            .map(|i| {
-                format!(
-                    "{indentation}{{\n\
-                        {body_indentation}const {name}: {ty} = {val};\n\
-                        {pass}\n\
-                    {indentation}}}",
-                    name = current.name,
-                    ty = current.ty.rust_type(),
-                    val = i,
-                    pass = generate_rust_constraint_blocks(
-                        intrinsic,
-                        body_indentation,
-                        constraints,
-                        format!("{name}-{i}")
-                    )
-                )
+/// Generate the specializations (unique sequences of const-generic arguments) for this intrinsic.
+fn generate_rust_specializations<'a>(
+    constraints: &mut impl Iterator<Item = impl Iterator<Item = i64>>,
+) -> Vec<Vec<u8>> {
+    let mut specializations = vec![vec![]];
+
+    for constraint in constraints {
+        specializations = constraint
+            .flat_map(|right| {
+                specializations.iter().map(move |left| {
+                    let mut left = left.clone();
+                    left.push(u8::try_from(right).unwrap());
+                    left
+                })
             })
-            .join("\n")
-    } else {
-        generate_rust_test_loop(intrinsic, indentation, &name, PASSES)
+            .collect();
     }
+
+    specializations
 }
 
 // Top-level function to create complete test program
-pub fn create_rust_test_program<T: IntrinsicTypeDefinition>(
+pub fn create_rust_test_module<T: IntrinsicTypeDefinition>(
+    w: &mut impl std::io::Write,
     intrinsic: &dyn IntrinsicDefinition<T>,
-    target: &str,
-    notice: &str,
-    definitions: &str,
-    cfg: &str,
-) -> String {
+) -> std::io::Result<()> {
+    trace!("generating `{}`", intrinsic.name());
+    let indentation = Indentation::default();
+
+    writeln!(w, "pub fn run_{}() {{", intrinsic.name())?;
+
+    // Define the arrays of arguments.
     let arguments = intrinsic.arguments();
-    let constraints = arguments
-        .iter()
-        .filter(|i| i.has_constraint())
-        .collect_vec();
+    arguments.gen_arglists_rust(w, indentation.nested(), PASSES)?;
 
-    let indentation = Indentation::default();
-    format_rust_main_template(
-        notice,
-        definitions,
-        cfg,
-        target,
-        intrinsic
-            .arguments()
-            .gen_arglists_rust(indentation.nested(), PASSES)
-            .as_str(),
-        generate_rust_constraint_blocks(
-            intrinsic,
-            indentation.nested(),
-            &constraints,
-            Default::default(),
-        )
-        .as_str(),
-    )
+    // Define any const generics as `const` items, then generate the actual test loop.
+    let specializations = generate_rust_specializations(
+        &mut arguments
+            .iter()
+            .filter_map(|i| i.constraint.as_ref().map(|v| v.iter())),
+    );
+
+    generate_rust_test_loop(w, intrinsic, indentation, &specializations, PASSES)?;
+
+    writeln!(w, "}}")?;
+
+    Ok(())
 }
diff --git a/library/stdarch/crates/intrinsic-test/src/common/intrinsic_helpers.rs b/library/stdarch/crates/intrinsic-test/src/common/intrinsic_helpers.rs
index 697f9c8754d..f5e84ca97af 100644
--- a/library/stdarch/crates/intrinsic-test/src/common/intrinsic_helpers.rs
+++ b/library/stdarch/crates/intrinsic-test/src/common/intrinsic_helpers.rs
@@ -120,8 +120,6 @@ pub struct IntrinsicType {
     /// rows encoded in the type (e.g. uint8x8_t).
     /// A value of `None` can be assumed to be 1 though.
     pub vec_len: Option<u32>,
-
-    pub target: String,
 }
 
 impl IntrinsicType {
@@ -321,18 +319,10 @@ pub trait IntrinsicTypeDefinition: Deref<Target = IntrinsicType> {
     /// can be implemented in an `impl` block
     fn get_lane_function(&self) -> String;
 
-    /// can be implemented in an `impl` block
-    fn from_c(_s: &str, _target: &str) -> Result<Self, String>
-    where
-        Self: Sized;
-
     /// Gets a string containing the typename for this type in C format.
     /// can be directly defined in `impl` blocks
     fn c_type(&self) -> String;
 
     /// can be directly defined in `impl` blocks
     fn c_single_vector_type(&self) -> String;
-
-    /// can be defined in `impl` blocks
-    fn rust_type(&self) -> String;
 }
diff --git a/library/stdarch/crates/intrinsic-test/src/common/mod.rs b/library/stdarch/crates/intrinsic-test/src/common/mod.rs
index 5d51d3460ec..5a57c8027db 100644
--- a/library/stdarch/crates/intrinsic-test/src/common/mod.rs
+++ b/library/stdarch/crates/intrinsic-test/src/common/mod.rs
@@ -11,7 +11,6 @@ pub mod indentation;
 pub mod intrinsic;
 pub mod intrinsic_helpers;
 pub mod values;
-pub mod write_file;
 
 /// Architectures must support this trait
 /// to be successfully tested.
@@ -23,3 +22,10 @@ pub trait SupportedArchitectureTest {
     fn build_rust_file(&self) -> bool;
     fn compare_outputs(&self) -> bool;
 }
+
+pub fn chunk_info(intrinsic_count: usize) -> (usize, usize) {
+    let available_parallelism = std::thread::available_parallelism().unwrap().get();
+    let chunk_size = intrinsic_count.div_ceil(Ord::min(available_parallelism, intrinsic_count));
+
+    (chunk_size, intrinsic_count.div_ceil(chunk_size))
+}
diff --git a/library/stdarch/crates/intrinsic-test/src/common/write_file.rs b/library/stdarch/crates/intrinsic-test/src/common/write_file.rs
deleted file mode 100644
index 92dd70b7c57..00000000000
--- a/library/stdarch/crates/intrinsic-test/src/common/write_file.rs
+++ /dev/null
@@ -1,33 +0,0 @@
-use super::gen_rust::{create_rust_test_program, setup_rust_file_paths};
-use super::intrinsic::IntrinsicDefinition;
-use super::intrinsic_helpers::IntrinsicTypeDefinition;
-use std::fs::File;
-use std::io::Write;
-
-pub fn write_file(filename: &String, code: String) {
-    let mut file = File::create(filename).unwrap();
-    file.write_all(code.into_bytes().as_slice()).unwrap();
-}
-
-pub fn write_rust_testfiles<T: IntrinsicTypeDefinition>(
-    intrinsics: Vec<&dyn IntrinsicDefinition<T>>,
-    rust_target: &str,
-    notice: &str,
-    definitions: &str,
-    cfg: &str,
-) -> Vec<String> {
-    let intrinsics_name_list = intrinsics
-        .iter()
-        .map(|i| i.name().clone())
-        .collect::<Vec<_>>();
-    let filename_mapping = setup_rust_file_paths(&intrinsics_name_list);
-
-    intrinsics.iter().for_each(|&i| {
-        let rust_code = create_rust_test_program(i, rust_target, notice, definitions, cfg);
-        if let Some(filename) = filename_mapping.get(&i.name()) {
-            write_file(filename, rust_code)
-        }
-    });
-
-    intrinsics_name_list
-}
diff --git a/library/stdarch/crates/stdarch-gen-arm/Cargo.toml b/library/stdarch/crates/stdarch-gen-arm/Cargo.toml
index 312019f454c..de24335a52e 100644
--- a/library/stdarch/crates/stdarch-gen-arm/Cargo.toml
+++ b/library/stdarch/crates/stdarch-gen-arm/Cargo.toml
@@ -17,6 +17,6 @@ proc-macro2 = "1.0"
 quote = "1.0"
 regex = "1.5"
 serde = { version = "1.0", features = ["derive"] }
-serde_with = "1.14"
+serde_with = { version = "3.2.0", default-features = false, features = ["macros"] }
 serde_yaml = "0.8"
 walkdir = "2.3.2"
diff --git a/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs b/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs
index 40132097f5d..5076064ffcd 100644
--- a/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs
+++ b/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs
@@ -156,6 +156,7 @@ fn gen_bind(in_file: String, ext_name: &str) -> io::Result<()> {
 // OUT_DIR=`pwd`/crates/core_arch cargo run -p stdarch-gen-loongarch -- {in_file}
 // ```
 
+use crate::mem::transmute;
 use super::types::*;
 "#
     ));
@@ -239,38 +240,63 @@ fn gen_bind_body(
     para_num: i32,
     target: TargetFeature,
 ) -> (String, String) {
-    let type_to_rst = |t: &str, s: bool| -> &str {
-        match (t, s) {
-            ("V16QI", _) => "v16i8",
-            ("V32QI", _) => "v32i8",
-            ("V8HI", _) => "v8i16",
-            ("V16HI", _) => "v16i16",
-            ("V4SI", _) => "v4i32",
-            ("V8SI", _) => "v8i32",
-            ("V2DI", _) => "v2i64",
-            ("V4DI", _) => "v4i64",
-            ("UV16QI", _) => "v16u8",
-            ("UV32QI", _) => "v32u8",
-            ("UV8HI", _) => "v8u16",
-            ("UV16HI", _) => "v16u16",
-            ("UV4SI", _) => "v4u32",
-            ("UV8SI", _) => "v8u32",
-            ("UV2DI", _) => "v2u64",
-            ("UV4DI", _) => "v4u64",
-            ("SI", _) => "i32",
-            ("DI", _) => "i64",
-            ("USI", _) => "u32",
-            ("UDI", _) => "u64",
-            ("V4SF", _) => "v4f32",
-            ("V8SF", _) => "v8f32",
-            ("V2DF", _) => "v2f64",
-            ("V4DF", _) => "v4f64",
-            ("UQI", _) => "u32",
-            ("QI", _) => "i32",
-            ("CVPOINTER", false) => "*const i8",
-            ("CVPOINTER", true) => "*mut i8",
-            ("HI", _) => "i32",
-            (_, _) => panic!("unknown type: {t}"),
+    enum TypeKind {
+        Vector,
+        Intrinsic,
+    }
+    use TypeKind::*;
+    let type_to_rst = |t: &str, s: bool, k: TypeKind| -> &str {
+        match (t, s, k) {
+            ("V16QI", _, Vector) => "__v16i8",
+            ("V16QI", _, Intrinsic) => "m128i",
+            ("V32QI", _, Vector) => "__v32i8",
+            ("V32QI", _, Intrinsic) => "m256i",
+            ("V8HI", _, Vector) => "__v8i16",
+            ("V8HI", _, Intrinsic) => "m128i",
+            ("V16HI", _, Vector) => "__v16i16",
+            ("V16HI", _, Intrinsic) => "m256i",
+            ("V4SI", _, Vector) => "__v4i32",
+            ("V4SI", _, Intrinsic) => "m128i",
+            ("V8SI", _, Vector) => "__v8i32",
+            ("V8SI", _, Intrinsic) => "m256i",
+            ("V2DI", _, Vector) => "__v2i64",
+            ("V2DI", _, Intrinsic) => "m128i",
+            ("V4DI", _, Vector) => "__v4i64",
+            ("V4DI", _, Intrinsic) => "m256i",
+            ("UV16QI", _, Vector) => "__v16u8",
+            ("UV16QI", _, Intrinsic) => "m128i",
+            ("UV32QI", _, Vector) => "__v32u8",
+            ("UV32QI", _, Intrinsic) => "m256i",
+            ("UV8HI", _, Vector) => "__v8u16",
+            ("UV8HI", _, Intrinsic) => "m128i",
+            ("UV16HI", _, Vector) => "__v16u16",
+            ("UV16HI", _, Intrinsic) => "m256i",
+            ("UV4SI", _, Vector) => "__v4u32",
+            ("UV4SI", _, Intrinsic) => "m128i",
+            ("UV8SI", _, Vector) => "__v8u32",
+            ("UV8SI", _, Intrinsic) => "m256i",
+            ("UV2DI", _, Vector) => "__v2u64",
+            ("UV2DI", _, Intrinsic) => "m128i",
+            ("UV4DI", _, Vector) => "__v4u64",
+            ("UV4DI", _, Intrinsic) => "m256i",
+            ("SI", _, _) => "i32",
+            ("DI", _, _) => "i64",
+            ("USI", _, _) => "u32",
+            ("UDI", _, _) => "u64",
+            ("V4SF", _, Vector) => "__v4f32",
+            ("V4SF", _, Intrinsic) => "m128",
+            ("V8SF", _, Vector) => "__v8f32",
+            ("V8SF", _, Intrinsic) => "m256",
+            ("V2DF", _, Vector) => "__v2f64",
+            ("V2DF", _, Intrinsic) => "m128d",
+            ("V4DF", _, Vector) => "__v4f64",
+            ("V4DF", _, Intrinsic) => "m256d",
+            ("UQI", _, _) => "u32",
+            ("QI", _, _) => "i32",
+            ("CVPOINTER", false, _) => "*const i8",
+            ("CVPOINTER", true, _) => "*mut i8",
+            ("HI", _, _) => "i32",
+            (_, _, _) => panic!("unknown type: {t}"),
         }
     };
 
@@ -281,27 +307,27 @@ fn gen_bind_body(
             let fn_output = if out_t.to_lowercase() == "void" {
                 String::new()
             } else {
-                format!(" -> {}", type_to_rst(out_t, is_store))
+                format!(" -> {}", type_to_rst(out_t, is_store, Vector))
             };
             let fn_inputs = match para_num {
-                1 => format!("(a: {})", type_to_rst(in_t[0], is_store)),
+                1 => format!("(a: {})", type_to_rst(in_t[0], is_store, Vector)),
                 2 => format!(
                     "(a: {}, b: {})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store)
+                    type_to_rst(in_t[0], is_store, Vector),
+                    type_to_rst(in_t[1], is_store, Vector)
                 ),
                 3 => format!(
                     "(a: {}, b: {}, c: {})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store),
-                    type_to_rst(in_t[2], is_store)
+                    type_to_rst(in_t[0], is_store, Vector),
+                    type_to_rst(in_t[1], is_store, Vector),
+                    type_to_rst(in_t[2], is_store, Vector)
                 ),
                 4 => format!(
                     "(a: {}, b: {}, c: {}, d: {})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store),
-                    type_to_rst(in_t[2], is_store),
-                    type_to_rst(in_t[3], is_store)
+                    type_to_rst(in_t[0], is_store, Vector),
+                    type_to_rst(in_t[1], is_store, Vector),
+                    type_to_rst(in_t[2], is_store, Vector),
+                    type_to_rst(in_t[3], is_store, Vector)
                 ),
                 _ => panic!("unsupported parameter number"),
             };
@@ -330,34 +356,40 @@ fn gen_bind_body(
         let fn_output = if out_t.to_lowercase() == "void" {
             String::new()
         } else {
-            format!("-> {} ", type_to_rst(out_t, is_store))
+            format!("-> {} ", type_to_rst(out_t, is_store, Intrinsic))
         };
         let mut fn_inputs = match para_num {
-            1 => format!("(a: {})", type_to_rst(in_t[0], is_store)),
+            1 => format!("(a: {})", type_to_rst(in_t[0], is_store, Intrinsic)),
             2 => format!(
                 "(a: {}, b: {})",
-                type_to_rst(in_t[0], is_store),
-                type_to_rst(in_t[1], is_store)
+                type_to_rst(in_t[0], is_store, Intrinsic),
+                type_to_rst(in_t[1], is_store, Intrinsic)
             ),
             3 => format!(
                 "(a: {}, b: {}, c: {})",
-                type_to_rst(in_t[0], is_store),
-                type_to_rst(in_t[1], is_store),
-                type_to_rst(in_t[2], is_store)
+                type_to_rst(in_t[0], is_store, Intrinsic),
+                type_to_rst(in_t[1], is_store, Intrinsic),
+                type_to_rst(in_t[2], is_store, Intrinsic)
             ),
             4 => format!(
                 "(a: {}, b: {}, c: {}, d: {})",
-                type_to_rst(in_t[0], is_store),
-                type_to_rst(in_t[1], is_store),
-                type_to_rst(in_t[2], is_store),
-                type_to_rst(in_t[3], is_store)
+                type_to_rst(in_t[0], is_store, Intrinsic),
+                type_to_rst(in_t[1], is_store, Intrinsic),
+                type_to_rst(in_t[2], is_store, Intrinsic),
+                type_to_rst(in_t[3], is_store, Intrinsic)
             ),
             _ => panic!("unsupported parameter number"),
         };
         if para_num == 1 && in_t[0] == "HI" {
             fn_inputs = match asm_fmts[1].as_str() {
-                "si13" | "i13" => format!("<const IMM_S13: {}>()", type_to_rst(in_t[0], is_store)),
-                "si10" => format!("<const IMM_S10: {}>()", type_to_rst(in_t[0], is_store)),
+                "si13" | "i13" => format!(
+                    "<const IMM_S13: {}>()",
+                    type_to_rst(in_t[0], is_store, Intrinsic)
+                ),
+                "si10" => format!(
+                    "<const IMM_S10: {}>()",
+                    type_to_rst(in_t[0], is_store, Intrinsic)
+                ),
                 _ => panic!("unsupported assembly format: {}", asm_fmts[1]),
             };
             rustc_legacy_const_generics = "rustc_legacy_const_generics(0)";
@@ -365,8 +397,8 @@ fn gen_bind_body(
             fn_inputs = if asm_fmts[2].starts_with("ui") {
                 format!(
                     "<const IMM{2}: {1}>(a: {0})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store),
+                    type_to_rst(in_t[0], is_store, Intrinsic),
+                    type_to_rst(in_t[1], is_store, Intrinsic),
                     asm_fmts[2].get(2..).unwrap()
                 )
             } else {
@@ -377,8 +409,8 @@ fn gen_bind_body(
             fn_inputs = if asm_fmts[2].starts_with("si") {
                 format!(
                     "<const IMM_S{2}: {1}>(a: {0})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store),
+                    type_to_rst(in_t[0], is_store, Intrinsic),
+                    type_to_rst(in_t[1], is_store, Intrinsic),
                     asm_fmts[2].get(2..).unwrap()
                 )
             } else {
@@ -389,8 +421,8 @@ fn gen_bind_body(
             fn_inputs = if asm_fmts[2].starts_with("si") {
                 format!(
                     "<const IMM_S{2}: {1}>(mem_addr: {0})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store),
+                    type_to_rst(in_t[0], is_store, Intrinsic),
+                    type_to_rst(in_t[1], is_store, Intrinsic),
                     asm_fmts[2].get(2..).unwrap()
                 )
             } else {
@@ -401,8 +433,8 @@ fn gen_bind_body(
             fn_inputs = match asm_fmts[2].as_str() {
                 "rk" => format!(
                     "(mem_addr: {}, b: {})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store)
+                    type_to_rst(in_t[0], is_store, Intrinsic),
+                    type_to_rst(in_t[1], is_store, Intrinsic)
                 ),
                 _ => panic!("unsupported assembly format: {}", asm_fmts[2]),
             };
@@ -410,9 +442,9 @@ fn gen_bind_body(
             fn_inputs = if asm_fmts[2].starts_with("ui") {
                 format!(
                     "<const IMM{3}: {2}>(a: {0}, b: {1})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store),
-                    type_to_rst(in_t[2], is_store),
+                    type_to_rst(in_t[0], is_store, Intrinsic),
+                    type_to_rst(in_t[1], is_store, Intrinsic),
+                    type_to_rst(in_t[2], is_store, Intrinsic),
                     asm_fmts[2].get(2..).unwrap()
                 )
             } else {
@@ -423,9 +455,9 @@ fn gen_bind_body(
             fn_inputs = match asm_fmts[2].as_str() {
                 "si12" => format!(
                     "<const IMM_S12: {2}>(a: {0}, mem_addr: {1})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store),
-                    type_to_rst(in_t[2], is_store)
+                    type_to_rst(in_t[0], is_store, Intrinsic),
+                    type_to_rst(in_t[1], is_store, Intrinsic),
+                    type_to_rst(in_t[2], is_store, Intrinsic)
                 ),
                 _ => panic!("unsupported assembly format: {}", asm_fmts[2]),
             };
@@ -434,9 +466,9 @@ fn gen_bind_body(
             fn_inputs = match asm_fmts[2].as_str() {
                 "rk" => format!(
                     "(a: {}, mem_addr: {}, b: {})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store),
-                    type_to_rst(in_t[2], is_store)
+                    type_to_rst(in_t[0], is_store, Intrinsic),
+                    type_to_rst(in_t[1], is_store, Intrinsic),
+                    type_to_rst(in_t[2], is_store, Intrinsic)
                 ),
                 _ => panic!("unsupported assembly format: {}", asm_fmts[2]),
             };
@@ -444,10 +476,10 @@ fn gen_bind_body(
             fn_inputs = match (asm_fmts[2].as_str(), current_name.chars().last().unwrap()) {
                 ("si8", t) => format!(
                     "<const IMM_S8: {2}, const IMM{4}: {3}>(a: {0}, mem_addr: {1})",
-                    type_to_rst(in_t[0], is_store),
-                    type_to_rst(in_t[1], is_store),
-                    type_to_rst(in_t[2], is_store),
-                    type_to_rst(in_t[3], is_store),
+                    type_to_rst(in_t[0], is_store, Intrinsic),
+                    type_to_rst(in_t[1], is_store, Intrinsic),
+                    type_to_rst(in_t[2], is_store, Intrinsic),
+                    type_to_rst(in_t[3], is_store, Intrinsic),
                     type_to_imm(t),
                 ),
                 (_, _) => panic!(
@@ -466,10 +498,16 @@ fn gen_bind_body(
     let unsafe_end = if !is_mem { " }" } else { "" };
     let mut call_params = {
         match para_num {
-            1 => format!("{unsafe_start}__{current_name}(a){unsafe_end}"),
-            2 => format!("{unsafe_start}__{current_name}(a, b){unsafe_end}"),
-            3 => format!("{unsafe_start}__{current_name}(a, b, c){unsafe_end}"),
-            4 => format!("{unsafe_start}__{current_name}(a, b, c, d){unsafe_end}"),
+            1 => format!("{unsafe_start}transmute(__{current_name}(transmute(a))){unsafe_end}"),
+            2 => format!(
+                "{unsafe_start}transmute(__{current_name}(transmute(a), transmute(b))){unsafe_end}"
+            ),
+            3 => format!(
+                "{unsafe_start}transmute(__{current_name}(transmute(a), transmute(b), transmute(c))){unsafe_end}"
+            ),
+            4 => format!(
+                "{unsafe_start}transmute(__{current_name}(transmute(a), transmute(b), transmute(c), transmute(d))){unsafe_end}"
+            ),
             _ => panic!("unsupported parameter number"),
         }
     };
@@ -477,12 +515,12 @@ fn gen_bind_body(
         call_params = match asm_fmts[1].as_str() {
             "si10" => {
                 format!(
-                    "static_assert_simm_bits!(IMM_S10, 10);\n    {unsafe_start}__{current_name}(IMM_S10){unsafe_end}"
+                    "static_assert_simm_bits!(IMM_S10, 10);\n    {unsafe_start}transmute(__{current_name}(IMM_S10)){unsafe_end}"
                 )
             }
             "i13" => {
                 format!(
-                    "static_assert_simm_bits!(IMM_S13, 13);\n    {unsafe_start}__{current_name}(IMM_S13){unsafe_end}"
+                    "static_assert_simm_bits!(IMM_S13, 13);\n    {unsafe_start}transmute(__{current_name}(IMM_S13)){unsafe_end}"
                 )
             }
             _ => panic!("unsupported assembly format: {}", asm_fmts[2]),
@@ -490,7 +528,7 @@ fn gen_bind_body(
     } else if para_num == 2 && (in_t[1] == "UQI" || in_t[1] == "USI") {
         call_params = if asm_fmts[2].starts_with("ui") {
             format!(
-                "static_assert_uimm_bits!(IMM{0}, {0});\n    {unsafe_start}__{current_name}(a, IMM{0}){unsafe_end}",
+                "static_assert_uimm_bits!(IMM{0}, {0});\n    {unsafe_start}transmute(__{current_name}(transmute(a), IMM{0})){unsafe_end}",
                 asm_fmts[2].get(2..).unwrap()
             )
         } else {
@@ -500,7 +538,7 @@ fn gen_bind_body(
         call_params = match asm_fmts[2].as_str() {
             "si5" => {
                 format!(
-                    "static_assert_simm_bits!(IMM_S5, 5);\n    {unsafe_start}__{current_name}(a, IMM_S5){unsafe_end}"
+                    "static_assert_simm_bits!(IMM_S5, 5);\n    {unsafe_start}transmute(__{current_name}(transmute(a), IMM_S5)){unsafe_end}"
                 )
             }
             _ => panic!("unsupported assembly format: {}", asm_fmts[2]),
@@ -508,7 +546,7 @@ fn gen_bind_body(
     } else if para_num == 2 && in_t[0] == "CVPOINTER" && in_t[1] == "SI" {
         call_params = if asm_fmts[2].starts_with("si") {
             format!(
-                "static_assert_simm_bits!(IMM_S{0}, {0});\n    {unsafe_start}__{current_name}(mem_addr, IMM_S{0}){unsafe_end}",
+                "static_assert_simm_bits!(IMM_S{0}, {0});\n    {unsafe_start}transmute(__{current_name}(mem_addr, IMM_S{0})){unsafe_end}",
                 asm_fmts[2].get(2..).unwrap()
             )
         } else {
@@ -516,13 +554,15 @@ fn gen_bind_body(
         }
     } else if para_num == 2 && in_t[0] == "CVPOINTER" && in_t[1] == "DI" {
         call_params = match asm_fmts[2].as_str() {
-            "rk" => format!("{unsafe_start}__{current_name}(mem_addr, b){unsafe_end}"),
+            "rk" => format!(
+                "{unsafe_start}transmute(__{current_name}(mem_addr, transmute(b))){unsafe_end}"
+            ),
             _ => panic!("unsupported assembly format: {}", asm_fmts[2]),
         };
     } else if para_num == 3 && (in_t[2] == "USI" || in_t[2] == "UQI") {
         call_params = if asm_fmts[2].starts_with("ui") {
             format!(
-                "static_assert_uimm_bits!(IMM{0}, {0});\n    {unsafe_start}__{current_name}(a, b, IMM{0}){unsafe_end}",
+                "static_assert_uimm_bits!(IMM{0}, {0});\n    {unsafe_start}transmute(__{current_name}(transmute(a), transmute(b), IMM{0})){unsafe_end}",
                 asm_fmts[2].get(2..).unwrap()
             )
         } else {
@@ -531,19 +571,21 @@ fn gen_bind_body(
     } else if para_num == 3 && in_t[1] == "CVPOINTER" && in_t[2] == "SI" {
         call_params = match asm_fmts[2].as_str() {
             "si12" => format!(
-                "static_assert_simm_bits!(IMM_S12, 12);\n    {unsafe_start}__{current_name}(a, mem_addr, IMM_S12){unsafe_end}"
+                "static_assert_simm_bits!(IMM_S12, 12);\n    {unsafe_start}transmute(__{current_name}(transmute(a), mem_addr, IMM_S12)){unsafe_end}"
             ),
             _ => panic!("unsupported assembly format: {}", asm_fmts[2]),
         };
     } else if para_num == 3 && in_t[1] == "CVPOINTER" && in_t[2] == "DI" {
         call_params = match asm_fmts[2].as_str() {
-            "rk" => format!("{unsafe_start}__{current_name}(a, mem_addr, b){unsafe_end}"),
+            "rk" => format!(
+                "{unsafe_start}transmute(__{current_name}(transmute(a), mem_addr, transmute(b))){unsafe_end}"
+            ),
             _ => panic!("unsupported assembly format: {}", asm_fmts[2]),
         };
     } else if para_num == 4 {
         call_params = match (asm_fmts[2].as_str(), current_name.chars().last().unwrap()) {
             ("si8", t) => format!(
-                "static_assert_simm_bits!(IMM_S8, 8);\n    static_assert_uimm_bits!(IMM{0}, {0});\n    {unsafe_start}__{current_name}(a, mem_addr, IMM_S8, IMM{0}){unsafe_end}",
+                "static_assert_simm_bits!(IMM_S8, 8);\n    static_assert_uimm_bits!(IMM{0}, {0});\n    {unsafe_start}transmute(__{current_name}(transmute(a), mem_addr, IMM_S8, IMM{0})){unsafe_end}",
                 type_to_imm(t)
             ),
             (_, _) => panic!(
diff --git a/library/stdarch/examples/connect5.rs b/library/stdarch/examples/connect5.rs
index 371b28552b3..f24657b1483 100644
--- a/library/stdarch/examples/connect5.rs
+++ b/library/stdarch/examples/connect5.rs
@@ -563,11 +563,7 @@ fn search(pos: &Pos, alpha: i32, beta: i32, depth: i32, _ply: i32) -> i32 {
     assert!(bs >= -EVAL_INF && bs <= EVAL_INF);
 
     //best move at the root node, best score elsewhere
-    if _ply == 0 {
-        bm
-    } else {
-        bs
-    }
+    if _ply == 0 { bm } else { bs }
 }
 
 /// Evaluation function: give different scores to different patterns after a fixed depth.
diff --git a/library/stdarch/rust-version b/library/stdarch/rust-version
index 5102178848e..1ced6098acf 100644
--- a/library/stdarch/rust-version
+++ b/library/stdarch/rust-version
@@ -1 +1 @@
-040e2f8b9ff2d76fbe2146d6003e297ed4532088
+32e7a4b92b109c24e9822c862a7c74436b50e564
diff --git a/library/test/src/cli.rs b/library/test/src/cli.rs
index 8840714a662..1b3f9e2564c 100644
--- a/library/test/src/cli.rs
+++ b/library/test/src/cli.rs
@@ -162,18 +162,17 @@ tests whose names contain the filter are run. Multiple filter strings may
 be passed, which will run all tests matching any of the filters.
 
 By default, all tests are run in parallel. This can be altered with the
---test-threads flag or the RUST_TEST_THREADS environment variable when running
-tests (set it to 1).
+--test-threads flag when running tests (set it to 1).
 
-By default, the tests are run in alphabetical order. Use --shuffle or set
-RUST_TEST_SHUFFLE to run the tests in random order. Pass the generated
-"shuffle seed" to --shuffle-seed (or set RUST_TEST_SHUFFLE_SEED) to run the
-tests in the same order again. Note that --shuffle and --shuffle-seed do not
-affect whether the tests are run in parallel.
+By default, the tests are run in alphabetical order. Use --shuffle to run
+the tests in random order. Pass the generated "shuffle seed" to
+--shuffle-seed to run the tests in the same order again. Note that
+--shuffle and --shuffle-seed do not affect whether the tests are run in
+parallel.
 
 All tests have their standard output and standard error captured by default.
-This can be overridden with the --no-capture flag or setting RUST_TEST_NOCAPTURE
-environment variable to a value other than "0". Logging is not captured by default.
+This can be overridden with the --no-capture flag to a value other than "0".
+Logging is not captured by default.
 
 Test Attributes:
 
diff --git a/library/test/src/console.rs b/library/test/src/console.rs
index 8f29f1dada5..13b2b3d502c 100644
--- a/library/test/src/console.rs
+++ b/library/test/src/console.rs
@@ -281,23 +281,15 @@ fn on_test_event(
     Ok(())
 }
 
-/// A simple console test runner.
-/// Runs provided tests reporting process and results to the stdout.
-pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
+pub(crate) fn get_formatter(opts: &TestOpts, max_name_len: usize) -> Box<dyn OutputFormatter> {
     let output = match term::stdout() {
         None => OutputLocation::Raw(io::stdout()),
         Some(t) => OutputLocation::Pretty(t),
     };
 
-    let max_name_len = tests
-        .iter()
-        .max_by_key(|t| len_if_padded(t))
-        .map(|t| t.desc.name.as_slice().len())
-        .unwrap_or(0);
-
     let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
 
-    let mut out: Box<dyn OutputFormatter> = match opts.format {
+    match opts.format {
         OutputFormat::Pretty => Box::new(PrettyFormatter::new(
             output,
             opts.use_color(),
@@ -310,7 +302,19 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Resu
         }
         OutputFormat::Json => Box::new(JsonFormatter::new(output)),
         OutputFormat::Junit => Box::new(JunitFormatter::new(output)),
-    };
+    }
+}
+
+/// A simple console test runner.
+/// Runs provided tests reporting process and results to the stdout.
+pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
+    let max_name_len = tests
+        .iter()
+        .max_by_key(|t| len_if_padded(t))
+        .map(|t| t.desc.name.as_slice().len())
+        .unwrap_or(0);
+
+    let mut out = get_formatter(opts, max_name_len);
     let mut st = ConsoleTestState::new(opts)?;
 
     // Prevent the usage of `Instant` in some cases:
diff --git a/library/test/src/formatters/json.rs b/library/test/src/formatters/json.rs
index 92c1c0716f1..4a101f00d74 100644
--- a/library/test/src/formatters/json.rs
+++ b/library/test/src/formatters/json.rs
@@ -215,6 +215,17 @@ impl<T: Write> OutputFormatter for JsonFormatter<T> {
 
         Ok(state.failed == 0)
     }
+
+    fn write_merged_doctests_times(
+        &mut self,
+        total_time: f64,
+        compilation_time: f64,
+    ) -> io::Result<()> {
+        let newline = "\n";
+        self.writeln_message(&format!(
+            r#"{{ "type": "report", "total_time": {total_time}, "compilation_time": {compilation_time} }}{newline}"#,
+        ))
+    }
 }
 
 /// A formatting utility used to print strings with characters in need of escaping.
diff --git a/library/test/src/formatters/junit.rs b/library/test/src/formatters/junit.rs
index 84153a9d05b..1566f1cb1da 100644
--- a/library/test/src/formatters/junit.rs
+++ b/library/test/src/formatters/junit.rs
@@ -182,6 +182,16 @@ impl<T: Write> OutputFormatter for JunitFormatter<T> {
 
         Ok(state.failed == 0)
     }
+
+    fn write_merged_doctests_times(
+        &mut self,
+        total_time: f64,
+        compilation_time: f64,
+    ) -> io::Result<()> {
+        self.write_message(&format!(
+            "<report total_time=\"{total_time}\" compilation_time=\"{compilation_time}\"></report>\n",
+        ))
+    }
 }
 
 fn parse_class_name(desc: &TestDesc) -> (String, String) {
diff --git a/library/test/src/formatters/mod.rs b/library/test/src/formatters/mod.rs
index f1225fecfef..c97cdb16a50 100644
--- a/library/test/src/formatters/mod.rs
+++ b/library/test/src/formatters/mod.rs
@@ -33,6 +33,11 @@ pub(crate) trait OutputFormatter {
         state: &ConsoleTestState,
     ) -> io::Result<()>;
     fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool>;
+    fn write_merged_doctests_times(
+        &mut self,
+        total_time: f64,
+        compilation_time: f64,
+    ) -> io::Result<()>;
 }
 
 pub(crate) fn write_stderr_delimiter(test_output: &mut Vec<u8>, test_name: &TestName) {
diff --git a/library/test/src/formatters/pretty.rs b/library/test/src/formatters/pretty.rs
index bf3fc40db41..5836138644a 100644
--- a/library/test/src/formatters/pretty.rs
+++ b/library/test/src/formatters/pretty.rs
@@ -303,4 +303,14 @@ impl<T: Write> OutputFormatter for PrettyFormatter<T> {
 
         Ok(success)
     }
+
+    fn write_merged_doctests_times(
+        &mut self,
+        total_time: f64,
+        compilation_time: f64,
+    ) -> io::Result<()> {
+        self.write_plain(format!(
+            "all doctests ran in {total_time:.2}s; merged doctests compilation took {compilation_time:.2}s\n",
+        ))
+    }
 }
diff --git a/library/test/src/formatters/terse.rs b/library/test/src/formatters/terse.rs
index b28120ab56e..0720f06e174 100644
--- a/library/test/src/formatters/terse.rs
+++ b/library/test/src/formatters/terse.rs
@@ -295,4 +295,14 @@ impl<T: Write> OutputFormatter for TerseFormatter<T> {
 
         Ok(success)
     }
+
+    fn write_merged_doctests_times(
+        &mut self,
+        total_time: f64,
+        compilation_time: f64,
+    ) -> io::Result<()> {
+        self.write_plain(format!(
+            "all doctests ran in {total_time:.2}s; merged doctests compilation took {compilation_time:.2}s\n",
+        ))
+    }
 }
diff --git a/library/test/src/lib.rs b/library/test/src/lib.rs
index 1190bb56b97..d554807bbde 100644
--- a/library/test/src/lib.rs
+++ b/library/test/src/lib.rs
@@ -244,6 +244,21 @@ fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn {
     }
 }
 
+/// Public API used by rustdoc to display the `total` and `compilation` times in the expected
+/// format.
+pub fn print_merged_doctests_times(args: &[String], total_time: f64, compilation_time: f64) {
+    let opts = match cli::parse_opts(args) {
+        Some(Ok(o)) => o,
+        Some(Err(msg)) => {
+            eprintln!("error: {msg}");
+            process::exit(ERROR_EXIT_CODE);
+        }
+        None => return,
+    };
+    let mut formatter = console::get_formatter(&opts, 0);
+    formatter.write_merged_doctests_times(total_time, compilation_time).unwrap();
+}
+
 /// Invoked when unit tests terminate. Returns `Result::Err` if the test is
 /// considered a failure. By default, invokes `report()` and checks for a `0`
 /// result.